repo_name
stringlengths 6
112
| path
stringlengths 4
204
| copies
stringlengths 1
3
| size
stringlengths 4
6
| content
stringlengths 714
810k
| license
stringclasses 15
values |
---|---|---|---|---|---|
teonlamont/mne-python | mne/decoding/ems.py | 4 | 8295 | # Author: Denis Engemann <[email protected]>
# Alexandre Gramfort <[email protected]>
# Jean-Remi King <[email protected]>
#
# License: BSD (3-clause)
from collections import Counter
import numpy as np
from .mixin import TransformerMixin, EstimatorMixin
from .base import _set_cv
from ..utils import logger, verbose
from ..parallel import parallel_func
from .. import pick_types, pick_info
class EMS(TransformerMixin, EstimatorMixin):
"""Transformer to compute event-matched spatial filters.
This version of EMS [1]_ operates on the entire time course. No time
window needs to be specified. The result is a spatial filter at each
time point and a corresponding time course. Intuitively, the result
gives the similarity between the filter at each time point and the
data vector (sensors) at that time point.
.. note : EMS only works for binary classification.
Attributes
----------
filters_ : ndarray, shape (n_channels, n_times)
The set of spatial filters.
classes_ : ndarray, shape (n_classes,)
The target classes.
References
----------
.. [1] Aaron Schurger, Sebastien Marti, and Stanislas Dehaene, "Reducing
multi-sensor data to a single time course that reveals experimental
effects", BMC Neuroscience 2013, 14:122
"""
def __repr__(self): # noqa: D105
if hasattr(self, 'filters_'):
return '<EMS: fitted with %i filters on %i classes.>' % (
len(self.filters_), len(self.classes_))
else:
return '<EMS: not fitted.>'
def fit(self, X, y):
"""Fit the spatial filters.
.. note : EMS is fitted on data normalized by channel type before the
fitting of the spatial filters.
Parameters
----------
X : array, shape (n_epochs, n_channels, n_times)
The training data.
y : array of int, shape (n_epochs)
The target classes.
Returns
-------
self : returns and instance of self.
"""
classes = np.unique(y)
if len(classes) != 2:
raise ValueError('EMS only works for binary classification.')
self.classes_ = classes
filters = X[y == classes[0]].mean(0) - X[y == classes[1]].mean(0)
filters /= np.linalg.norm(filters, axis=0)[None, :]
self.filters_ = filters
return self
def transform(self, X):
"""Transform the data by the spatial filters.
Parameters
----------
X : array, shape (n_epochs, n_channels, n_times)
The input data.
Returns
-------
X : array, shape (n_epochs, n_times)
The input data transformed by the spatial filters.
"""
Xt = np.sum(X * self.filters_, axis=1)
return Xt
@verbose
def compute_ems(epochs, conditions=None, picks=None, n_jobs=1, verbose=None,
cv=None):
"""Compute event-matched spatial filter on epochs.
This version of EMS [1]_ operates on the entire time course. No time
window needs to be specified. The result is a spatial filter at each
time point and a corresponding time course. Intuitively, the result
gives the similarity between the filter at each time point and the
data vector (sensors) at that time point.
.. note : EMS only works for binary classification.
.. note : The present function applies a leave-one-out cross-validation,
following Schurger et al's paper. However, we recommend using
a stratified k-fold cross-validation. Indeed, leave-one-out tends
to overfit and cannot be used to estimate the variance of the
prediction within a given fold.
.. note : Because of the leave-one-out, this function needs an equal
number of epochs in each of the two conditions.
Parameters
----------
epochs : instance of mne.Epochs
The epochs.
conditions : list of str | None, defaults to None
If a list of strings, strings must match the epochs.event_id's key as
well as the number of conditions supported by the objective_function.
If None keys in epochs.event_id are used.
picks : array-like of int | None, defaults to None
Channels to be included. If None only good data channels are used.
n_jobs : int, defaults to 1
Number of jobs to run in parallel.
verbose : bool, str, int, or None, defaults to self.verbose
If not None, override default verbose level (see :func:`mne.verbose`
and :ref:`Logging documentation <tut_logging>` for more).
cv : cross-validation object | str | None, defaults to LeaveOneOut
The cross-validation scheme.
Returns
-------
surrogate_trials : ndarray, shape (n_trials // 2, n_times)
The trial surrogates.
mean_spatial_filter : ndarray, shape (n_channels, n_times)
The set of spatial filters.
conditions : ndarray, shape (n_classes,)
The conditions used. Values correspond to original event ids.
References
----------
.. [1] Aaron Schurger, Sebastien Marti, and Stanislas Dehaene, "Reducing
multi-sensor data to a single time course that reveals experimental
effects", BMC Neuroscience 2013, 14:122
"""
logger.info('...computing surrogate time series. This can take some time')
# Default to leave-one-out cv
cv = 'LeaveOneOut' if cv is None else cv
if picks is None:
picks = pick_types(epochs.info, meg=True, eeg=True)
if not len(set(Counter(epochs.events[:, 2]).values())) == 1:
raise ValueError('The same number of epochs is required by '
'this function. Please consider '
'`epochs.equalize_event_counts`')
if conditions is None:
conditions = epochs.event_id.keys()
epochs = epochs.copy()
else:
epochs = epochs[conditions]
epochs.drop_bad()
if len(conditions) != 2:
raise ValueError('Currently this function expects exactly 2 '
'conditions but you gave me %i' %
len(conditions))
ev = epochs.events[:, 2]
# Special care to avoid path dependent mappings and orders
conditions = list(sorted(conditions))
cond_idx = [np.where(ev == epochs.event_id[k])[0] for k in conditions]
info = pick_info(epochs.info, picks)
data = epochs.get_data()[:, picks]
# Scale (z-score) the data by channel type
# XXX the z-scoring is applied outside the CV, which is not standard.
for ch_type in ['mag', 'grad', 'eeg']:
if ch_type in epochs:
# FIXME should be applied to all sort of data channels
if ch_type == 'eeg':
this_picks = pick_types(info, meg=False, eeg=True)
else:
this_picks = pick_types(info, meg=ch_type, eeg=False)
data[:, this_picks] /= np.std(data[:, this_picks])
# Setup cross-validation. Need to use _set_cv to deal with sklearn
# deprecation of cv objects.
y = epochs.events[:, 2]
_, cv_splits = _set_cv(cv, 'classifier', X=y, y=y)
parallel, p_func, _ = parallel_func(_run_ems, n_jobs=n_jobs)
# FIXME this parallelization should be removed.
# 1) it's numpy computation so it's already efficient,
# 2) it duplicates the data in RAM,
# 3) the computation is already super fast.
out = parallel(p_func(_ems_diff, data, cond_idx, train, test)
for train, test in cv_splits)
surrogate_trials, spatial_filter = zip(*out)
surrogate_trials = np.array(surrogate_trials)
spatial_filter = np.mean(spatial_filter, axis=0)
return surrogate_trials, spatial_filter, epochs.events[:, 2]
def _ems_diff(data0, data1):
"""Compute the default diff objective function."""
return np.mean(data0, axis=0) - np.mean(data1, axis=0)
def _run_ems(objective_function, data, cond_idx, train, test):
"""Run EMS."""
d = objective_function(*(data[np.intersect1d(c, train)] for c in cond_idx))
d /= np.sqrt(np.sum(d ** 2, axis=0))[None, :]
# compute surrogates
return np.sum(data[test[0]] * d, axis=0), d
| bsd-3-clause |
benmoran56/esper | examples/benchmark_cache.py | 1 | 3589 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import sys
import time
import optparse
from dataclasses import dataclass as component
from esper import Processor, World
try:
from matplotlib import pyplot
except ImportError:
print("The matplotlib module is required for this benchmark.")
raise Exception
######################
# Commandline options:
######################
parser = optparse.OptionParser()
parser.add_option("-e", "--entities", dest="entities", action="store", default=5000, type="int",
help="Change the maximum number of Entities to benchmark. Default is 5000.")
(options, arguments) = parser.parse_args()
MAX_ENTITIES = options.entities
if MAX_ENTITIES <= 50:
print("The number of entities must be greater than 500.")
sys.exit(1)
##########################
# Simple timing decorator:
##########################
def timing(f):
def wrap(*args):
time1 = time.process_time()
ret = f(*args)
time2 = time.process_time()
current_run.append((time2 - time1) * 1000.0)
return ret
return wrap
##########################
# Create a World instance:
##########################
world = World()
#################################
# Define some generic components:
#################################
@component
class Velocity:
x: float = 0.0
y: float = 0.0
@component
class Position:
x: float = 0.0
y: float = 0.0
@component
class Health:
hp: int = 100
@component
class Command:
attack: bool = False
defend: bool = True
@component
class Projectile:
size: int = 10
lifespan: int = 100
@component
class Damageable:
defense: int = 45
@component
class Brain:
smarts: int = 9000
##########################
# Define some Processors:
##########################
class MovementProcessor(Processor):
def __init__(self):
super().__init__()
def process(self):
for ent, (vel, pos) in self.world.get_components(Velocity, Position):
pos.x += vel.x
pos.y += vel.y
print("Current Position: {}".format((int(pos.x), int(pos.y))))
#############################
# Set up some dummy entities:
#############################
def create_entities(world, number):
for _ in range(number // 2):
world.create_entity(Position(), Velocity(), Health(), Command())
world.create_entity(Position(), Health(), Damageable())
#################################################
# Perform several queries, and print the results:
#################################################
current_run = []
results = []
print("\nFor the first half of each pass, Entities are static.")
print("For the second half, Entities are created/deleted each frame.\n")
@timing
def query_entities(world):
for _, (_, _) in world.get_components(Position, Velocity):
pass
for _, (_, _, _) in world.get_components(Health, Damageable, Position):
pass
for current_pass in range(10):
world.clear_database()
create_entities(world, MAX_ENTITIES)
print(f"Pass {current_pass + 1}...")
for amount in range(1, 500):
query_entities(world)
if amount > 250:
world.delete_entity(amount, immediate=True)
create_entities(world, 1)
results.append(current_run)
current_run = []
averaged_results = [sorted(e)[0] for e in zip(*results)]
pyplot.ylabel("Query time (ms)")
pyplot.xlabel("Query of {} entities".format(MAX_ENTITIES))
pyplot.plot(averaged_results, label="Average query time")
pyplot.legend(bbox_to_anchor=(0.5, 1))
pyplot.show()
| mit |
ANNarchy/ANNarchy | examples/hybrid/Hybrid.py | 2 | 2865 | #
# ANNarchy - Hybrid network
#
# Simple example showing hybrid spike/rate-coded networks.
# Reproduces Fig.4 of (Vitay, Dinkelbach and Hamker, 2015)
#
# authors: Helge Uelo Dinkelbach, Julien Vitay
#
from ANNarchy import *
setup(dt=0.1)
# Rate-coded input neuron
input_neuron = Neuron(
parameters = "baseline = 0.0",
equations = "r = baseline"
)
# Rate-coded output neuron
simple_neuron = Neuron(
equations = "r = sum(exc)"
)
# Rate-coded population for input
pop1 = Population(geometry=1, neuron=input_neuron)
# Poisson Population to encode
pop2 = PoissonPopulation(geometry=1000, target="exc")
proj = Projection(pop1, pop2, 'exc').connect_all_to_all(weights=1.)
# Rate-coded population to decode
pop3 = Population(geometry=1000, neuron =simple_neuron)
proj = DecodingProjection(pop2, pop3, 'exc', window=10.0)
def diagonal(pre, post, weights):
"Simple connector pattern to progressively connect each post-synaptic neuron to a growing number of pre-synaptic neurons"
lil = CSR()
for rk_post in range(post.size):
lil.add(rk_post, range((rk_post+1)), [weights], [0] )
return lil
proj.connect_with_func(method=diagonal, weights=1.)
compile()
# Monitors
m1 = Monitor(pop1, 'r')
m2 = Monitor(pop2, 'spike')
m3 = Monitor(pop3, 'r')
# Simulate
duration = 250.
# 0 Hz
pop1.baseline = 0.0
simulate(duration)
# 10 Hz
pop1.baseline = 10.0
simulate(duration)
# 50 Hz
pop1.baseline = 50.0
simulate(duration)
# 100 Hz
pop1.baseline = 100.0
simulate(duration)
# Get recordings
data1 = m1.get()
data2 = m2.get()
data3 = m3.get()
# Raster plot of the spiking population
t, n = m2.raster_plot(data2['spike'])
# Variance of the the decoded firing rate
data_10 = data3['r'][int(1.0*duration/dt()):int(2*duration/dt()), :]
data_50 = data3['r'][int(2.0*duration/dt()):int(3*duration/dt()), :]
data_100 = data3['r'][int(3.0*duration/dt()):int(4*duration/dt()), :]
var_10 = np.mean(np.abs((data_10 - 10.)/10.), axis=0)
var_50 = np.mean(np.abs((data_50 - 50.)/50.), axis=0)
var_100 = np.mean(np.abs((data_100 - 100.)/100.), axis=0)
### Plot the results
import matplotlib.pyplot as plt
plt.subplot(3,1,1)
plt.plot(t, n, '.', markersize=0.5)
plt.title('a) Raster plot')
plt.xlabel('Time (ms)')
plt.ylabel('# neurons')
plt.xlim((0, 4*duration))
plt.subplot(3,1,2)
plt.plot(np.arange(0, 4*duration, 0.1), data1['r'][:, 0], label='Original firing rate')
plt.plot(np.arange(0, 4*duration, 0.1), data3['r'][:, 999], label='Decoded firing rate')
plt.legend(frameon=False, loc=2)
plt.title('b) Decoded firing rate')
plt.xlabel('Time (ms)')
plt.ylabel('Activity (Hz)')
plt.subplot(3,1,3)
plt.plot(var_10, label='10 Hz')
plt.plot(var_50, label='50 Hz')
plt.plot(var_100, label='100 Hz')
plt.legend(frameon=False)
plt.title('c) Precision')
plt.xlabel('# neurons used for decoding')
plt.ylabel('Normalized error')
plt.ylim((0,1))
plt.show()
| gpl-2.0 |
rubikloud/scikit-learn | sklearn/tests/test_pipeline.py | 14 | 15252 | """
Test the pipeline module.
"""
import numpy as np
from scipy import sparse
from sklearn.externals.six.moves import zip
from sklearn.utils.testing import assert_raises, assert_raises_regex, assert_raise_message
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_false
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_warns_message
from sklearn.base import clone
from sklearn.pipeline import Pipeline, FeatureUnion, make_pipeline, make_union
from sklearn.svm import SVC
from sklearn.linear_model import LogisticRegression
from sklearn.linear_model import LinearRegression
from sklearn.cluster import KMeans
from sklearn.feature_selection import SelectKBest, f_classif
from sklearn.decomposition import PCA, RandomizedPCA, TruncatedSVD
from sklearn.datasets import load_iris
from sklearn.preprocessing import StandardScaler
from sklearn.feature_extraction.text import CountVectorizer
JUNK_FOOD_DOCS = (
"the pizza pizza beer copyright",
"the pizza burger beer copyright",
"the the pizza beer beer copyright",
"the burger beer beer copyright",
"the coke burger coke copyright",
"the coke burger burger",
)
class IncorrectT(object):
"""Small class to test parameter dispatching.
"""
def __init__(self, a=None, b=None):
self.a = a
self.b = b
class T(IncorrectT):
def fit(self, X, y):
return self
def get_params(self, deep=False):
return {'a': self.a, 'b': self.b}
def set_params(self, **params):
self.a = params['a']
return self
class TransfT(T):
def transform(self, X, y=None):
return X
def inverse_transform(self, X):
return X
class FitParamT(object):
"""Mock classifier
"""
def __init__(self):
self.successful = False
pass
def fit(self, X, y, should_succeed=False):
self.successful = should_succeed
def predict(self, X):
return self.successful
def test_pipeline_init():
# Test the various init parameters of the pipeline.
assert_raises(TypeError, Pipeline)
# Check that we can't instantiate pipelines with objects without fit
# method
pipe = assert_raises(TypeError, Pipeline, [('svc', IncorrectT)])
# Smoke test with only an estimator
clf = T()
pipe = Pipeline([('svc', clf)])
assert_equal(pipe.get_params(deep=True),
dict(svc__a=None, svc__b=None, svc=clf,
**pipe.get_params(deep=False)))
# Check that params are set
pipe.set_params(svc__a=0.1)
assert_equal(clf.a, 0.1)
assert_equal(clf.b, None)
# Smoke test the repr:
repr(pipe)
# Test with two objects
clf = SVC()
filter1 = SelectKBest(f_classif)
pipe = Pipeline([('anova', filter1), ('svc', clf)])
# Check that we can't use the same stage name twice
assert_raises(ValueError, Pipeline, [('svc', SVC()), ('svc', SVC())])
# Check that params are set
pipe.set_params(svc__C=0.1)
assert_equal(clf.C, 0.1)
# Smoke test the repr:
repr(pipe)
# Check that params are not set when naming them wrong
assert_raises(ValueError, pipe.set_params, anova__C=0.1)
# Test clone
pipe2 = clone(pipe)
assert_false(pipe.named_steps['svc'] is pipe2.named_steps['svc'])
# Check that apart from estimators, the parameters are the same
params = pipe.get_params(deep=True)
params2 = pipe2.get_params(deep=True)
for x in pipe.get_params(deep=False):
params.pop(x)
for x in pipe2.get_params(deep=False):
params2.pop(x)
# Remove estimators that where copied
params.pop('svc')
params.pop('anova')
params2.pop('svc')
params2.pop('anova')
assert_equal(params, params2)
def test_pipeline_methods_anova():
# Test the various methods of the pipeline (anova).
iris = load_iris()
X = iris.data
y = iris.target
# Test with Anova + LogisticRegression
clf = LogisticRegression()
filter1 = SelectKBest(f_classif, k=2)
pipe = Pipeline([('anova', filter1), ('logistic', clf)])
pipe.fit(X, y)
pipe.predict(X)
pipe.predict_proba(X)
pipe.predict_log_proba(X)
pipe.score(X, y)
def test_pipeline_fit_params():
# Test that the pipeline can take fit parameters
pipe = Pipeline([('transf', TransfT()), ('clf', FitParamT())])
pipe.fit(X=None, y=None, clf__should_succeed=True)
# classifier should return True
assert_true(pipe.predict(None))
# and transformer params should not be changed
assert_true(pipe.named_steps['transf'].a is None)
assert_true(pipe.named_steps['transf'].b is None)
def test_pipeline_raise_set_params_error():
# Test pipeline raises set params error message for nested models.
pipe = Pipeline([('cls', LinearRegression())])
# expected error message
error_msg = ('Invalid parameter %s for estimator %s. '
'Check the list of available parameters '
'with `estimator.get_params().keys()`.')
assert_raise_message(ValueError,
error_msg % ('fake', 'Pipeline'),
pipe.set_params,
fake='nope')
# nested model check
assert_raise_message(ValueError,
error_msg % ("fake", pipe),
pipe.set_params,
fake__estimator='nope')
def test_pipeline_methods_pca_svm():
# Test the various methods of the pipeline (pca + svm).
iris = load_iris()
X = iris.data
y = iris.target
# Test with PCA + SVC
clf = SVC(probability=True, random_state=0)
pca = PCA(n_components='mle', whiten=True)
pipe = Pipeline([('pca', pca), ('svc', clf)])
pipe.fit(X, y)
pipe.predict(X)
pipe.predict_proba(X)
pipe.predict_log_proba(X)
pipe.score(X, y)
def test_pipeline_methods_preprocessing_svm():
# Test the various methods of the pipeline (preprocessing + svm).
iris = load_iris()
X = iris.data
y = iris.target
n_samples = X.shape[0]
n_classes = len(np.unique(y))
scaler = StandardScaler()
pca = RandomizedPCA(n_components=2, whiten=True)
clf = SVC(probability=True, random_state=0, decision_function_shape='ovr')
for preprocessing in [scaler, pca]:
pipe = Pipeline([('preprocess', preprocessing), ('svc', clf)])
pipe.fit(X, y)
# check shapes of various prediction functions
predict = pipe.predict(X)
assert_equal(predict.shape, (n_samples,))
proba = pipe.predict_proba(X)
assert_equal(proba.shape, (n_samples, n_classes))
log_proba = pipe.predict_log_proba(X)
assert_equal(log_proba.shape, (n_samples, n_classes))
decision_function = pipe.decision_function(X)
assert_equal(decision_function.shape, (n_samples, n_classes))
pipe.score(X, y)
def test_fit_predict_on_pipeline():
# test that the fit_predict method is implemented on a pipeline
# test that the fit_predict on pipeline yields same results as applying
# transform and clustering steps separately
iris = load_iris()
scaler = StandardScaler()
km = KMeans(random_state=0)
# first compute the transform and clustering step separately
scaled = scaler.fit_transform(iris.data)
separate_pred = km.fit_predict(scaled)
# use a pipeline to do the transform and clustering in one step
pipe = Pipeline([('scaler', scaler), ('Kmeans', km)])
pipeline_pred = pipe.fit_predict(iris.data)
assert_array_almost_equal(pipeline_pred, separate_pred)
def test_fit_predict_on_pipeline_without_fit_predict():
# tests that a pipeline does not have fit_predict method when final
# step of pipeline does not have fit_predict defined
scaler = StandardScaler()
pca = PCA()
pipe = Pipeline([('scaler', scaler), ('pca', pca)])
assert_raises_regex(AttributeError,
"'PCA' object has no attribute 'fit_predict'",
getattr, pipe, 'fit_predict')
def test_feature_union():
# basic sanity check for feature union
iris = load_iris()
X = iris.data
X -= X.mean(axis=0)
y = iris.target
svd = TruncatedSVD(n_components=2, random_state=0)
select = SelectKBest(k=1)
fs = FeatureUnion([("svd", svd), ("select", select)])
fs.fit(X, y)
X_transformed = fs.transform(X)
assert_equal(X_transformed.shape, (X.shape[0], 3))
# check if it does the expected thing
assert_array_almost_equal(X_transformed[:, :-1], svd.fit_transform(X))
assert_array_equal(X_transformed[:, -1],
select.fit_transform(X, y).ravel())
# test if it also works for sparse input
# We use a different svd object to control the random_state stream
fs = FeatureUnion([("svd", svd), ("select", select)])
X_sp = sparse.csr_matrix(X)
X_sp_transformed = fs.fit_transform(X_sp, y)
assert_array_almost_equal(X_transformed, X_sp_transformed.toarray())
# test setting parameters
fs.set_params(select__k=2)
assert_equal(fs.fit_transform(X, y).shape, (X.shape[0], 4))
# test it works with transformers missing fit_transform
fs = FeatureUnion([("mock", TransfT()), ("svd", svd), ("select", select)])
X_transformed = fs.fit_transform(X, y)
assert_equal(X_transformed.shape, (X.shape[0], 8))
def test_make_union():
pca = PCA()
mock = TransfT()
fu = make_union(pca, mock)
names, transformers = zip(*fu.transformer_list)
assert_equal(names, ("pca", "transft"))
assert_equal(transformers, (pca, mock))
def test_pipeline_transform():
# Test whether pipeline works with a transformer at the end.
# Also test pipeline.transform and pipeline.inverse_transform
iris = load_iris()
X = iris.data
pca = PCA(n_components=2)
pipeline = Pipeline([('pca', pca)])
# test transform and fit_transform:
X_trans = pipeline.fit(X).transform(X)
X_trans2 = pipeline.fit_transform(X)
X_trans3 = pca.fit_transform(X)
assert_array_almost_equal(X_trans, X_trans2)
assert_array_almost_equal(X_trans, X_trans3)
X_back = pipeline.inverse_transform(X_trans)
X_back2 = pca.inverse_transform(X_trans)
assert_array_almost_equal(X_back, X_back2)
def test_pipeline_fit_transform():
# Test whether pipeline works with a transformer missing fit_transform
iris = load_iris()
X = iris.data
y = iris.target
transft = TransfT()
pipeline = Pipeline([('mock', transft)])
# test fit_transform:
X_trans = pipeline.fit_transform(X, y)
X_trans2 = transft.fit(X, y).transform(X)
assert_array_almost_equal(X_trans, X_trans2)
def test_make_pipeline():
t1 = TransfT()
t2 = TransfT()
pipe = make_pipeline(t1, t2)
assert_true(isinstance(pipe, Pipeline))
assert_equal(pipe.steps[0][0], "transft-1")
assert_equal(pipe.steps[1][0], "transft-2")
pipe = make_pipeline(t1, t2, FitParamT())
assert_true(isinstance(pipe, Pipeline))
assert_equal(pipe.steps[0][0], "transft-1")
assert_equal(pipe.steps[1][0], "transft-2")
assert_equal(pipe.steps[2][0], "fitparamt")
def test_feature_union_weights():
# test feature union with transformer weights
iris = load_iris()
X = iris.data
y = iris.target
pca = RandomizedPCA(n_components=2, random_state=0)
select = SelectKBest(k=1)
# test using fit followed by transform
fs = FeatureUnion([("pca", pca), ("select", select)],
transformer_weights={"pca": 10})
fs.fit(X, y)
X_transformed = fs.transform(X)
# test using fit_transform
fs = FeatureUnion([("pca", pca), ("select", select)],
transformer_weights={"pca": 10})
X_fit_transformed = fs.fit_transform(X, y)
# test it works with transformers missing fit_transform
fs = FeatureUnion([("mock", TransfT()), ("pca", pca), ("select", select)],
transformer_weights={"mock": 10})
X_fit_transformed_wo_method = fs.fit_transform(X, y)
# check against expected result
# We use a different pca object to control the random_state stream
assert_array_almost_equal(X_transformed[:, :-1], 10 * pca.fit_transform(X))
assert_array_equal(X_transformed[:, -1],
select.fit_transform(X, y).ravel())
assert_array_almost_equal(X_fit_transformed[:, :-1],
10 * pca.fit_transform(X))
assert_array_equal(X_fit_transformed[:, -1],
select.fit_transform(X, y).ravel())
assert_equal(X_fit_transformed_wo_method.shape, (X.shape[0], 7))
def test_feature_union_parallel():
# test that n_jobs work for FeatureUnion
X = JUNK_FOOD_DOCS
fs = FeatureUnion([
("words", CountVectorizer(analyzer='word')),
("chars", CountVectorizer(analyzer='char')),
])
fs_parallel = FeatureUnion([
("words", CountVectorizer(analyzer='word')),
("chars", CountVectorizer(analyzer='char')),
], n_jobs=2)
fs_parallel2 = FeatureUnion([
("words", CountVectorizer(analyzer='word')),
("chars", CountVectorizer(analyzer='char')),
], n_jobs=2)
fs.fit(X)
X_transformed = fs.transform(X)
assert_equal(X_transformed.shape[0], len(X))
fs_parallel.fit(X)
X_transformed_parallel = fs_parallel.transform(X)
assert_equal(X_transformed.shape, X_transformed_parallel.shape)
assert_array_equal(
X_transformed.toarray(),
X_transformed_parallel.toarray()
)
# fit_transform should behave the same
X_transformed_parallel2 = fs_parallel2.fit_transform(X)
assert_array_equal(
X_transformed.toarray(),
X_transformed_parallel2.toarray()
)
# transformers should stay fit after fit_transform
X_transformed_parallel2 = fs_parallel2.transform(X)
assert_array_equal(
X_transformed.toarray(),
X_transformed_parallel2.toarray()
)
def test_feature_union_feature_names():
word_vect = CountVectorizer(analyzer="word")
char_vect = CountVectorizer(analyzer="char_wb", ngram_range=(3, 3))
ft = FeatureUnion([("chars", char_vect), ("words", word_vect)])
ft.fit(JUNK_FOOD_DOCS)
feature_names = ft.get_feature_names()
for feat in feature_names:
assert_true("chars__" in feat or "words__" in feat)
assert_equal(len(feature_names), 35)
def test_classes_property():
iris = load_iris()
X = iris.data
y = iris.target
reg = make_pipeline(SelectKBest(k=1), LinearRegression())
reg.fit(X, y)
assert_raises(AttributeError, getattr, reg, "classes_")
clf = make_pipeline(SelectKBest(k=1), LogisticRegression(random_state=0))
assert_raises(AttributeError, getattr, clf, "classes_")
clf.fit(X, y)
assert_array_equal(clf.classes_, np.unique(y))
def test_X1d_inverse_transform():
transformer = TransfT()
pipeline = make_pipeline(transformer)
X = np.ones(10)
msg = "1d X will not be reshaped in pipeline.inverse_transform"
assert_warns_message(FutureWarning, msg, pipeline.inverse_transform, X)
| bsd-3-clause |
HumanCompatibleAI/imitation | src/imitation/scripts/analyze.py | 2 | 6347 | import logging
import os
import os.path as osp
import tempfile
from collections import OrderedDict
from typing import List, Optional
import pandas as pd
from sacred.observers import FileStorageObserver
import imitation.util.sacred as sacred_util
from imitation.scripts.config.analyze import analysis_ex
from imitation.util.sacred import dict_get_nested as get
@analysis_ex.command
def gather_tb_directories(
source_dir: str,
run_name: Optional[str],
env_name: Optional[str],
skip_failed_runs: bool,
) -> dict:
"""Gather Tensorboard directories from a `parallel_ex` run.
The directories are copied to a unique directory in `/tmp/analysis_tb/` under
subdirectories matching the Tensorboard events' Ray Tune trial names.
Undocumented arguments are the same as in `analyze_imitation()`.
Args:
source_dir: A local_dir for Ray. For example, `~/ray_results/`.
Returns:
A dict with two keys. "gather_dir" (str) is a path to a /tmp/
directory containing all the TensorBoard runs filtered from `source_dir`.
"n_tb_dirs" (int) is the number of TensorBoard directories that were
filtered.
"""
sacred_dicts = _get_sacred_dicts(source_dir, run_name, env_name, skip_failed_runs)
os.makedirs("/tmp/analysis_tb", exist_ok=True)
tmp_dir = tempfile.mkdtemp(dir="/tmp/analysis_tb/")
tb_dirs_count = 0
for sd in sacred_dicts:
# Expecting a path like "~/ray_results/{run_name}/sacred/1".
# Want to search for all Tensorboard dirs inside
# "~/ray_results/{run_name}".
sacred_dir = sd.sacred_dir.rstrip("/")
run_dir = osp.dirname(osp.dirname(sacred_dir))
run_name = osp.basename(run_dir)
# "tb" is TensorBoard directory built by our codebase. "sb_tb" is Stable
# Baselines TensorBoard directory. There should be at most one of each
# directory.
for basename in ["rl", "tb", "sb_tb"]:
tb_src_dirs = tuple(
sacred_util.filter_subdirs(
run_dir, lambda path: osp.basename(path) == basename
)
)
if tb_src_dirs:
assert len(tb_src_dirs) == 1, "expect at most one TB dir of each type"
tb_src_dir = tb_src_dirs[0]
symlinks_dir = osp.join(tmp_dir, basename)
os.makedirs(symlinks_dir, exist_ok=True)
tb_symlink = osp.join(symlinks_dir, run_name)
os.symlink(tb_src_dir, tb_symlink)
tb_dirs_count += 1
logging.info(f"Symlinked {tb_dirs_count} TensorBoard dirs to {tmp_dir}.")
logging.info(f"Start Tensorboard with `tensorboard --logdir {tmp_dir}`.")
return {"n_tb_dirs": tb_dirs_count, "gather_dir": tmp_dir}
@analysis_ex.command
def analyze_imitation(
source_dir: str,
run_name: Optional[str],
env_name: Optional[str],
skip_failed_runs: bool,
csv_output_path: Optional[str],
verbose: bool,
) -> pd.DataFrame:
"""Parse Sacred logs and generate a DataFrame for imitation learning results.
Args:
source_dir: A directory containing Sacred FileObserver subdirectories
associated with the `train_adversarial` Sacred script. Behavior is
undefined if there are Sacred subdirectories associated with other
scripts.
run_name: If provided, then only analyze results from Sacred directories
associated with this run name. `run_name` is compared against the
"experiment.name" key in `run.json`.
skip_failed_runs: If True, then filter out runs where the status is FAILED.
csv_output_path: If provided, then save a CSV output file to this path.
verbose: If True, then print the dataframe.
Returns:
A list of dictionaries used to generate the analysis DataFrame.
"""
sacred_dicts = _get_sacred_dicts(source_dir, run_name, env_name, skip_failed_runs)
rows = []
for sd in sacred_dicts:
row = OrderedDict()
rows.append(row)
# Use get to prevent exceptions when reading in-progress experiments.
row["status"] = get(sd.run, "status")
row["use_gail"] = get(sd.config, "init_trainer_kwargs.use_gail")
row["env_name"] = get(sd.config, "env_name")
row["n_expert_demos"] = get(sd.config, "n_expert_demos")
row["run_name"] = get(sd.run, "experiment.name")
imit_stats = get(sd.run, "result.imit_stats")
expert_stats = get(sd.run, "result.expert_stats")
if imit_stats is not None and expert_stats is not None:
# Assume that `result.imit_stats` and `result.expert_stats` are
# formatted correctly.
row["expert_return_summary"] = _make_return_summary(expert_stats)
row["imit_return_summary"] = _make_return_summary(imit_stats, "monitor_")
row["imit_expert_ratio"] = (
imit_stats["monitor_return_mean"] / expert_stats["return_mean"]
)
df = pd.DataFrame(rows)
if csv_output_path is not None:
df.to_csv(csv_output_path)
if verbose:
print(df.to_string())
return df
def _make_return_summary(stats: dict, prefix="") -> str:
return "{:3g} ± {:3g} (n={})".format(
stats[f"{prefix}return_mean"], stats[f"{prefix}return_std"], stats["n_traj"]
)
def _get_sacred_dicts(
source_dir: str, run_name: str, env_name: str, skip_failed_runs: bool
) -> List[sacred_util.SacredDicts]:
sacred_dirs = sacred_util.filter_subdirs(source_dir)
sacred_dicts = [
sacred_util.SacredDicts.load_from_dir(sacred_dir) for sacred_dir in sacred_dirs
]
if run_name is not None:
sacred_dicts = filter(
lambda sd: get(sd.run, "experiment.name") == run_name, sacred_dicts
)
if env_name is not None:
sacred_dicts = filter(
lambda sd: get(sd.config, "env_name") == env_name, sacred_dicts
)
if skip_failed_runs:
sacred_dicts = filter(
lambda sd: get(sd.run, "status") != "FAILED", sacred_dicts
)
return list(sacred_dicts)
def main_console():
observer = FileStorageObserver(osp.join("output", "sacred", "analyze"))
analysis_ex.observers.append(observer)
analysis_ex.run_commandline()
if __name__ == "__main__": # pragma: no cover
main_console()
| mit |
tian-zhou/InstrumentSegmentation | help/snake_python/morphsnakes.py | 1 | 11921 | # -*- coding: utf-8 -*-
"""
morphsnakes
===========
This is a Python implementation of the algorithms introduced in the paper
Márquez-Neila, P., Baumela, L., Álvarez, L., "A morphological approach
to curvature-based evolution of curves and surfaces". IEEE Transactions
on Pattern Analysis and Machine Intelligence (PAMI), 2013.
This implementation is intended to be as brief, understandable and self-contained
as possible. It does not include any enhancement to make it fast or efficient.
Any practical implementation of this algorithm should work only over the
neighbor pixels of the 0.5-levelset, not over all the embedding function,
and perhaps should feature multi-threading or GPU capabilities.
The classes MorphGAC and MorphACWE provide most of the functionality of this
module. They implement the Morphological Geodesic Active Contours and the
Morphological Active Contours without Edges, respectively. See the
aforementioned paper for full details.
See test.py for examples of usage.
"""
__author__ = "P. Márquez Neila <[email protected]>"
from itertools import cycle
import numpy as np
from scipy import ndimage
from scipy.ndimage import binary_dilation, binary_erosion, \
gaussian_filter, gaussian_gradient_magnitude
class fcycle(object):
def __init__(self, iterable):
"""Call functions from the iterable each time it is called."""
self.funcs = cycle(iterable)
def __call__(self, *args, **kwargs):
f = self.funcs.next()
return f(*args, **kwargs)
# SI and IS operators for 2D and 3D.
_P2 = [np.eye(3), np.array([[0,1,0]]*3), np.flipud(np.eye(3)), np.rot90([[0,1,0]]*3)]
_P3 = [np.zeros((3,3,3)) for i in xrange(9)]
_P3[0][:,:,1] = 1
_P3[1][:,1,:] = 1
_P3[2][1,:,:] = 1
_P3[3][:,[0,1,2],[0,1,2]] = 1
_P3[4][:,[0,1,2],[2,1,0]] = 1
_P3[5][[0,1,2],:,[0,1,2]] = 1
_P3[6][[0,1,2],:,[2,1,0]] = 1
_P3[7][[0,1,2],[0,1,2],:] = 1
_P3[8][[0,1,2],[2,1,0],:] = 1
_aux = np.zeros((0))
def SI(u):
"""SI operator."""
global _aux
if np.ndim(u) == 2:
P = _P2
elif np.ndim(u) == 3:
P = _P3
else:
raise ValueError, "u has an invalid number of dimensions (should be 2 or 3)"
if u.shape != _aux.shape[1:]:
_aux = np.zeros((len(P),) + u.shape)
for i in xrange(len(P)):
_aux[i] = binary_erosion(u, P[i])
return _aux.max(0)
def IS(u):
"""IS operator."""
global _aux
if np.ndim(u) == 2:
P = _P2
elif np.ndim(u) == 3:
P = _P3
else:
raise ValueError, "u has an invalid number of dimensions (should be 2 or 3)"
if u.shape != _aux.shape[1:]:
_aux = np.zeros((len(P),) + u.shape)
for i in xrange(len(P)):
_aux[i] = binary_dilation(u, P[i])
return _aux.min(0)
# SIoIS operator.
SIoIS = lambda u: SI(IS(u))
ISoSI = lambda u: IS(SI(u))
curvop = fcycle([SIoIS, ISoSI])
# Stopping factors (function g(I) in the paper).
def gborders(img, alpha=1.0, sigma=1.0):
"""Stopping criterion for image borders."""
# The norm of the gradient.
gradnorm = gaussian_gradient_magnitude(img, sigma, mode='constant')
return 1.0/np.sqrt(1.0 + alpha*gradnorm)
def glines(img, sigma=1.0):
"""Stopping criterion for image black lines."""
return gaussian_filter(img, sigma)
class MorphACWE(object):
"""Morphological ACWE based on the Chan-Vese energy functional."""
def __init__(self, data, smoothing=1, lambda1=1, lambda2=1):
"""Create a Morphological ACWE solver.
Parameters
----------
data : ndarray
The image data.
smoothing : scalar
The number of repetitions of the smoothing step (the
curv operator) in each iteration. In other terms,
this is the strength of the smoothing. This is the
parameter µ.
lambda1, lambda2 : scalars
Relative importance of the inside pixels (lambda1)
against the outside pixels (lambda2).
"""
self._u = None
self.smoothing = smoothing
self.lambda1 = lambda1
self.lambda2 = lambda2
self.data = data
def set_levelset(self, u):
self._u = np.double(u)
self._u[u>0] = 1
self._u[u<=0] = 0
levelset = property(lambda self: self._u,
set_levelset,
doc="The level set embedding function (u).")
def step(self):
"""Perform a single step of the morphological Chan-Vese evolution."""
# Assign attributes to local variables for convenience.
u = self._u
if u is None:
raise ValueError, "the levelset function is not set (use set_levelset)"
data = self.data
# Determine c0 and c1.
inside = u>0
outside = u<=0
c0 = data[outside].sum() / float(outside.sum())
c1 = data[inside].sum() / float(inside.sum())
# Image attachment.
dres = np.array(np.gradient(u))
abs_dres = np.abs(dres).sum(0)
#aux = abs_dres * (c0 - c1) * (c0 + c1 - 2*data)
aux = abs_dres * (self.lambda1*(data - c1)**2 - self.lambda2*(data - c0)**2)
res = np.copy(u)
res[aux < 0] = 1
res[aux > 0] = 0
# Smoothing.
for i in xrange(self.smoothing):
res = curvop(res)
self._u = res
def run(self, iterations):
"""Run several iterations of the morphological Chan-Vese method."""
for i in xrange(iterations):
self.step()
class MorphGAC(object):
"""Morphological GAC based on the Geodesic Active Contours."""
def __init__(self, data, smoothing=1, threshold=0, balloon=0):
"""Create a Morphological GAC solver.
Parameters
----------
data : array-like
The stopping criterion g(I). See functions gborders and glines.
smoothing : scalar
The number of repetitions of the smoothing step in each
iteration. This is the parameter µ.
threshold : scalar
The threshold that determines which areas are affected
by the morphological balloon. This is the parameter θ.
balloon : scalar
The strength of the morphological balloon. This is the parameter ν.
"""
self._u = None
self._v = balloon
self._theta = threshold
self.smoothing = smoothing
self.set_data(data)
def set_levelset(self, u):
self._u = np.double(u)
self._u[u>0] = 1
self._u[u<=0] = 0
def set_balloon(self, v):
self._v = v
self._update_mask()
def set_threshold(self, theta):
self._theta = theta
self._update_mask()
def set_data(self, data):
self._data = data
self._ddata = np.gradient(data)
self._update_mask()
# The structure element for binary dilation and erosion.
self.structure = np.ones((3,)*np.ndim(data))
def _update_mask(self):
"""Pre-compute masks for speed."""
self._threshold_mask = self._data > self._theta
self._threshold_mask_v = self._data > self._theta/np.abs(self._v)
levelset = property(lambda self: self._u,
set_levelset,
doc="The level set embedding function (u).")
data = property(lambda self: self._data,
set_data,
doc="The data that controls the snake evolution (the image or g(I)).")
balloon = property(lambda self: self._v,
set_balloon,
doc="The morphological balloon parameter (ν (nu, not v)).")
threshold = property(lambda self: self._theta,
set_threshold,
doc="The threshold value (θ).")
def step(self):
"""Perform a single step of the morphological snake evolution."""
# Assign attributes to local variables for convenience.
u = self._u
gI = self._data
dgI = self._ddata
theta = self._theta
v = self._v
if u is None:
raise ValueError, "the levelset is not set (use set_levelset)"
res = np.copy(u)
# Balloon.
if v > 0:
aux = binary_dilation(u, self.structure)
elif v < 0:
aux = binary_erosion(u, self.structure)
if v!= 0:
res[self._threshold_mask_v] = aux[self._threshold_mask_v]
# Image attachment.
aux = np.zeros_like(res)
dres = np.gradient(res)
for el1, el2 in zip(dgI, dres):
aux += el1*el2
res[aux > 0] = 1
res[aux < 0] = 0
# Smoothing.
for i in xrange(self.smoothing):
res = curvop(res)
self._u = res
def run(self, iterations):
"""Run several iterations of the morphological snakes method."""
for i in xrange(iterations):
self.step()
def evolve_visual(msnake, levelset=None, num_iters=20, background=None):
"""
Visual evolution of a morphological snake.
Parameters
----------
msnake : MorphGAC or MorphACWE instance
The morphological snake solver.
levelset : array-like, optional
If given, the levelset of the solver is initialized to this. If not
given, the evolution will use the levelset already set in msnake.
num_iters : int, optional
The number of iterations.
background : array-like, optional
If given, background will be shown behind the contours instead of
msnake.data.
"""
from matplotlib import pyplot as ppl
if levelset is not None:
msnake.levelset = levelset
# Prepare the visual environment.
fig = ppl.gcf()
fig.clf()
ax1 = fig.add_subplot(1,2,1)
if background is None:
ax1.imshow(msnake.data, cmap=ppl.cm.gray)
else:
ax1.imshow(background, cmap=ppl.cm.gray)
ax1.contour(msnake.levelset, [0.5], colors='r')
ax2 = fig.add_subplot(1,2,2)
ax_u = ax2.imshow(msnake.levelset)
ppl.pause(0.001)
# Iterate.
for i in xrange(num_iters):
# Evolve.
msnake.step()
# Update figure.
del ax1.collections[0]
ax1.contour(msnake.levelset, [0.5], colors='r')
ax_u.set_data(msnake.levelset)
fig.canvas.draw()
#ppl.pause(0.001)
# Return the last levelset.
return msnake.levelset
def evolve_visual3d(msnake, levelset=None, num_iters=20):
"""
Visual evolution of a three-dimensional morphological snake.
Parameters
----------
msnake : MorphGAC or MorphACWE instance
The morphological snake solver.
levelset : array-like, optional
If given, the levelset of the solver is initialized to this. If not
given, the evolution will use the levelset already set in msnake.
num_iters : int, optional
The number of iterations.
"""
from mayavi import mlab
import matplotlib.pyplot as ppl
if levelset is not None:
msnake.levelset = levelset
fig = mlab.gcf()
mlab.clf()
src = mlab.pipeline.scalar_field(msnake.data)
mlab.pipeline.image_plane_widget(src, plane_orientation='x_axes', colormap='gray')
cnt = mlab.contour3d(msnake.levelset, contours=[0.5])
@mlab.animate(ui=True)
def anim():
for i in xrange(num_iters):
msnake.step()
cnt.mlab_source.scalars = msnake.levelset
print "Iteration %s/%s..." % (i + 1, num_iters)
yield
anim()
mlab.show()
# Return the last levelset.
return msnake.levelset
| gpl-2.0 |
mfjb/scikit-learn | examples/cluster/plot_agglomerative_clustering.py | 343 | 2931 | """
Agglomerative clustering with and without structure
===================================================
This example shows the effect of imposing a connectivity graph to capture
local structure in the data. The graph is simply the graph of 20 nearest
neighbors.
Two consequences of imposing a connectivity can be seen. First clustering
with a connectivity matrix is much faster.
Second, when using a connectivity matrix, average and complete linkage are
unstable and tend to create a few clusters that grow very quickly. Indeed,
average and complete linkage fight this percolation behavior by considering all
the distances between two clusters when merging them. The connectivity
graph breaks this mechanism. This effect is more pronounced for very
sparse graphs (try decreasing the number of neighbors in
kneighbors_graph) and with complete linkage. In particular, having a very
small number of neighbors in the graph, imposes a geometry that is
close to that of single linkage, which is well known to have this
percolation instability.
"""
# Authors: Gael Varoquaux, Nelle Varoquaux
# License: BSD 3 clause
import time
import matplotlib.pyplot as plt
import numpy as np
from sklearn.cluster import AgglomerativeClustering
from sklearn.neighbors import kneighbors_graph
# Generate sample data
n_samples = 1500
np.random.seed(0)
t = 1.5 * np.pi * (1 + 3 * np.random.rand(1, n_samples))
x = t * np.cos(t)
y = t * np.sin(t)
X = np.concatenate((x, y))
X += .7 * np.random.randn(2, n_samples)
X = X.T
# Create a graph capturing local connectivity. Larger number of neighbors
# will give more homogeneous clusters to the cost of computation
# time. A very large number of neighbors gives more evenly distributed
# cluster sizes, but may not impose the local manifold structure of
# the data
knn_graph = kneighbors_graph(X, 30, include_self=False)
for connectivity in (None, knn_graph):
for n_clusters in (30, 3):
plt.figure(figsize=(10, 4))
for index, linkage in enumerate(('average', 'complete', 'ward')):
plt.subplot(1, 3, index + 1)
model = AgglomerativeClustering(linkage=linkage,
connectivity=connectivity,
n_clusters=n_clusters)
t0 = time.time()
model.fit(X)
elapsed_time = time.time() - t0
plt.scatter(X[:, 0], X[:, 1], c=model.labels_,
cmap=plt.cm.spectral)
plt.title('linkage=%s (time %.2fs)' % (linkage, elapsed_time),
fontdict=dict(verticalalignment='top'))
plt.axis('equal')
plt.axis('off')
plt.subplots_adjust(bottom=0, top=.89, wspace=0,
left=0, right=1)
plt.suptitle('n_cluster=%i, connectivity=%r' %
(n_clusters, connectivity is not None), size=17)
plt.show()
| bsd-3-clause |
harshaneelhg/scikit-learn | examples/linear_model/plot_sgd_loss_functions.py | 249 | 1095 | """
==========================
SGD: convex loss functions
==========================
A plot that compares the various convex loss functions supported by
:class:`sklearn.linear_model.SGDClassifier` .
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
def modified_huber_loss(y_true, y_pred):
z = y_pred * y_true
loss = -4 * z
loss[z >= -1] = (1 - z[z >= -1]) ** 2
loss[z >= 1.] = 0
return loss
xmin, xmax = -4, 4
xx = np.linspace(xmin, xmax, 100)
plt.plot([xmin, 0, 0, xmax], [1, 1, 0, 0], 'k-',
label="Zero-one loss")
plt.plot(xx, np.where(xx < 1, 1 - xx, 0), 'g-',
label="Hinge loss")
plt.plot(xx, -np.minimum(xx, 0), 'm-',
label="Perceptron loss")
plt.plot(xx, np.log2(1 + np.exp(-xx)), 'r-',
label="Log loss")
plt.plot(xx, np.where(xx < 1, 1 - xx, 0) ** 2, 'b-',
label="Squared hinge loss")
plt.plot(xx, modified_huber_loss(xx, 1), 'y--',
label="Modified Huber loss")
plt.ylim((0, 8))
plt.legend(loc="upper right")
plt.xlabel(r"Decision function $f(x)$")
plt.ylabel("$L(y, f(x))$")
plt.show()
| bsd-3-clause |
ageek/confPyNotebooks | sklearn-scipy-2013/figures/svm_gui_frames.py | 2 | 2945 | """
Linear Model Example
--------------------
This is an example plot from the tutorial which accompanies an explanation
of the support vector machine GUI.
"""
import numpy as np
import pylab as pl
import matplotlib
from sklearn import svm
def linear_model(rseed=42, Npts=30):
np.random.seed(rseed)
data = np.random.normal(0, 10, (Npts, 2))
data[:Npts / 2] -= 15
data[Npts / 2:] += 15
labels = np.ones(Npts)
labels[:Npts / 2] = -1
return data, labels
def nonlinear_model(rseed=42, Npts=30):
radius = 40 * np.random.random(Npts)
far_pts = radius > 20
radius[far_pts] *= 1.2
radius[~far_pts] *= 1.1
theta = np.random.random(Npts) * np.pi * 2
data = np.empty((Npts, 2))
data[:, 0] = radius * np.cos(theta)
data[:, 1] = radius * np.sin(theta)
labels = np.ones(Npts)
labels[far_pts] = -1
return data, labels
def plot_linear_model():
X, y = linear_model()
clf = svm.SVC(kernel='linear',
gamma=0.01, coef0=0, degree=3)
clf.fit(X, y)
fig = pl.figure()
ax = pl.subplot(111, xticks=[], yticks=[])
ax.scatter(X[:, 0], X[:, 1], c=y, cmap=pl.cm.bone)
ax.scatter(clf.support_vectors_[:, 0],
clf.support_vectors_[:, 1],
s=80, edgecolors="k", facecolors="none")
delta = 1
y_min, y_max = -50, 50
x_min, x_max = -50, 50
x = np.arange(x_min, x_max + delta, delta)
y = np.arange(y_min, y_max + delta, delta)
X1, X2 = np.meshgrid(x, y)
Z = clf.decision_function(np.c_[X1.ravel(), X2.ravel()])
Z = Z.reshape(X1.shape)
levels = [-1.0, 0.0, 1.0]
linestyles = ['dashed', 'solid', 'dashed']
colors = 'k'
ax.contour(X1, X2, Z, levels,
colors=colors,
linestyles=linestyles)
def plot_rbf_model():
X, y = nonlinear_model()
clf = svm.SVC(kernel='rbf',
gamma=0.001, coef0=0, degree=3)
clf.fit(X, y)
fig = pl.figure()
ax = pl.subplot(111, xticks=[], yticks=[])
ax.scatter(X[:, 0], X[:, 1], c=y, cmap=pl.cm.bone, zorder=2)
ax.scatter(clf.support_vectors_[:, 0],
clf.support_vectors_[:, 1],
s=80, edgecolors="k", facecolors="none")
delta = 1
y_min, y_max = -50, 50
x_min, x_max = -50, 50
x = np.arange(x_min, x_max + delta, delta)
y = np.arange(y_min, y_max + delta, delta)
X1, X2 = np.meshgrid(x, y)
Z = clf.decision_function(np.c_[X1.ravel(), X2.ravel()])
Z = Z.reshape(X1.shape)
levels = [-1.0, 0.0, 1.0]
linestyles = ['dashed', 'solid', 'dashed']
colors = 'k'
ax.contourf(X1, X2, Z, 10,
cmap=matplotlib.cm.bone,
origin='lower',
alpha=0.85, zorder=1)
ax.contour(X1, X2, Z, [0.0],
colors='k',
linestyles=['solid'], zorder=1)
if __name__ == '__main__':
plot_linear_model()
plot_rbf_model()
pl.show()
| gpl-2.0 |
00krishna-research/py_university_gender_dynamics_pkg | pyugend/BasicStochasticModelFixedPromotionDeptSizeStable.py | 1 | 13110 | __author__ = 'krishnab'
from operator import neg, truediv
import numpy as np
import pandas as pd
from numpy.random import binomial
from models.Models import Base_model
class Basic_stochastic_model_fixed_promotion(Base_model):
def __init__(self, **kwds):
Base_model.__init__(self, **kwds)
self.name = "Stochastic Model(sim_orig)"
self.label = "promote-hire fp"
def run_model(self):
## initialize data structure
self.res = np.zeros([self.duration, 12], dtype=np.float32)
self.res[0, 0] = self.nf1
self.res[0, 1] = self.nf2
self.res[0, 2] = self.nf3
self.res[0, 3] = self.nm1
self.res[0, 4] = self.nm2
self.res[0, 5] = self.nm3
self.res[0, 6] = self.vac3
self.res[0, 7] = self.vac2
self.res[0, 8] = self.vac1
self.res[0, 9] = self.female_promotion_probability_1
self.res[0, 10] = self.female_promotion_probability_2
self.res[0, 11] = np.float32(
sum(list([self.nf1, self.nf2, self.nf3])) / sum(list([self.nf1,
self.nf2,
self.nf3,
self.nm1,
self.nm2,
self.nm3])))
hiring_rate_female_level_1 = self.bf1
hiring_rate_female_level_2 = self.bf2
hiring_rate_female_level_3 = self.bf3
attrition_rate_female_level_1 = self.df1
attrition_rate_female_level_2 = self.df2
attrition_rate_female_level_3 = self.df3
attrition_rate_male_level_1 = self.dm1
attrition_rate_male_level_2 = self.dm2
attrition_rate_male_level_3 = self.dm3
probability_of_outside_hire_level_3 = self.phire3
probability_of_outside_hire_level_2 = self.phire2
male_promotion_probability_1_2 = self.male_promotion_probability_1
male_promotion_probability_2_3 = self.male_promotion_probability_2
for i in range(1, self.duration):
# initialize variables for this iteration
prev_number_of_females_level_1 = self.res[i - 1, 0]
prev_number_of_females_level_2 = self.res[i - 1, 1]
prev_number_of_females_level_3 = self.res[i - 1, 2]
prev_number_of_males_level_1 = self.res[i - 1, 3]
prev_number_of_males_level_2 = self.res[i - 1, 4]
prev_number_of_males_level_3 = self.res[i - 1, 5]
prev_number_of_vacancies_level_3 = self.res[i - 1, 6]
prev_number_of_vacancies_level_2 = self.res[i - 1, 7]
prev_number_of_vacancies_level_1 = self.res[i - 1, 8]
prev_promotion_rate_female_level_1 = self.female_promotion_probability_1
prev_promotion_rate_female_level_2 = self.female_promotion_probability_2
if np.isnan(prev_promotion_rate_female_level_1):
prev_promotion_rate_female_level_1 = 0
if np.isnan(prev_promotion_rate_female_level_2):
prev_promotion_rate_female_level_2 = 0
prev_gender_proportion_of_department = np.float32(
sum(list([prev_number_of_females_level_1,
prev_number_of_females_level_2,
prev_number_of_females_level_3])) / (
sum(list([prev_number_of_females_level_1,
prev_number_of_females_level_2,
prev_number_of_females_level_3,
prev_number_of_males_level_1,
prev_number_of_males_level_2,
prev_number_of_males_level_3]))))
# Process Model
# first both female and males leave the department according to binomial probability.
female_attrition_level_3 = binomial(prev_number_of_females_level_3,
attrition_rate_female_level_3)
male_attrition_level_3 = binomial(prev_number_of_males_level_3,
attrition_rate_male_level_3)
# the departures create a set of vacancies. These vacancies are the basis for new hiring
total_vacancies_3 = female_attrition_level_3 + male_attrition_level_3
# women are hired first and then men
hiring_female_3 = binomial(total_vacancies_3,
probability_of_outside_hire_level_3 * hiring_rate_female_level_3)
hiring_male_3 = binomial(max(0, total_vacancies_3 - hiring_female_3),
probability_of_outside_hire_level_3 * (
1 - hiring_rate_female_level_3))
total_hiring_3 = hiring_female_3 + hiring_male_3
# level 3 vacancies that are not filled by new hires create opportunities
# for promotion from level 2. Again women are promoted first and men second.
# Also note the error trap that if we try to promote more professors from
# level 2 than there exist at level 2, then we will prevent this from happening.
vacancies_remaining_after_hiring_3 = total_vacancies_3 - total_hiring_3
potential_promotions_after_hiring_3 = max(0,
vacancies_remaining_after_hiring_3)
promotions_of_females_level_2_3 = binomial(min(
potential_promotions_after_hiring_3,
prev_number_of_females_level_2),
prev_promotion_rate_female_level_2)
promotions_of_males_level_2_3 = binomial(max(0,min(
potential_promotions_after_hiring_3-promotions_of_females_level_2_3,
prev_number_of_males_level_2)), male_promotion_probability_2_3)
# attrition at level 2 - either people leave from attrition or promotion
female_attrition_level_2 = binomial(
max(0,
prev_number_of_females_level_2 - promotions_of_females_level_2_3),
attrition_rate_female_level_2)
male_attrition_level_2 = binomial(max(0,
prev_number_of_males_level_2 - promotions_of_males_level_2_3),
attrition_rate_male_level_2)
# the departures create a set of vacancies. These vacancies are the basis for new hiring
total_vacancies_2 = sum(list([female_attrition_level_2,
male_attrition_level_2,
promotions_of_females_level_2_3,
promotions_of_males_level_2_3]))
hiring_female_2 = binomial(max(0,total_vacancies_2),
probability_of_outside_hire_level_2 * hiring_rate_female_level_2)
hiring_male_2 = binomial(max(0,total_vacancies_2-hiring_female_2),
probability_of_outside_hire_level_2 * (1-hiring_rate_female_level_2))
total_hiring_2 = hiring_female_2 + hiring_male_2
vacancies_remaining_after_hiring_2 = total_vacancies_2 - total_hiring_2
potential_promotions_after_hiring_2 = max(0,
vacancies_remaining_after_hiring_2)
promotions_of_females_level_1_2 = binomial(max(0,
min(potential_promotions_after_hiring_2, prev_number_of_females_level_1)),
prev_promotion_rate_female_level_1)
promotions_of_males_level_1_2 = binomial(max(0,min(
potential_promotions_after_hiring_2 - promotions_of_females_level_1_2, prev_number_of_males_level_1)),
male_promotion_probability_1_2)
## Level 1
female_attrition_level_1 = binomial(max(0,prev_number_of_females_level_1-promotions_of_females_level_1_2),
attrition_rate_female_level_1)
male_attrition_level_1 = binomial(max(0,prev_number_of_males_level_1-promotions_of_males_level_1_2),
attrition_rate_male_level_1)
total_vacancies_1 = sum(list([female_attrition_level_1,
male_attrition_level_1,
promotions_of_females_level_1_2,
promotions_of_males_level_1_2]))
hiring_female_1 = binomial(max(0,total_vacancies_1),
hiring_rate_female_level_1)
hiring_male_1 = binomial(max(0,total_vacancies_1 - hiring_female_1),
1 - hiring_rate_female_level_1)
# Write state variables to array and move to next iteration
self.res[i, 0] = number_of_females_level_1 = sum(
list([prev_number_of_females_level_1,
neg(female_attrition_level_1),
neg(promotions_of_females_level_1_2),
hiring_female_1]))
assert (number_of_females_level_1 >= 0), "negative number of females 1"
self.res[i, 1] = number_of_females_level_2 = max(0, sum(
list([prev_number_of_females_level_2,
neg(female_attrition_level_2),
neg(promotions_of_females_level_2_3),
promotions_of_females_level_1_2,
hiring_female_2])))
self.res[i, 2] = number_of_females_level_3 = sum(list([
prev_number_of_females_level_3,
neg(female_attrition_level_3),
promotions_of_females_level_2_3,
hiring_female_3]))
self.res[i, 3] = number_of_males_level_1 = sum(list([
prev_number_of_males_level_1,
neg(male_attrition_level_1),
neg(promotions_of_males_level_1_2),
hiring_male_1]))
self.res[i, 4] = number_of_males_level_2 = sum(
list([prev_number_of_males_level_2,
neg(male_attrition_level_2),
neg(promotions_of_males_level_2_3),
promotions_of_males_level_1_2,
hiring_male_2]))
self.res[i, 5] = number_of_males_level_3 = sum(
list([prev_number_of_males_level_3,
neg(male_attrition_level_3),
promotions_of_males_level_2_3,
hiring_male_3]))
self.res[i, 6] = number_of_vacancies_level_3 = sum(list([
male_attrition_level_3,
female_attrition_level_3]))
self.res[i, 7] = number_of_vacancies_level_2 = sum(list([
male_attrition_level_2,
female_attrition_level_2,
promotions_of_females_level_2_3,
promotions_of_males_level_2_3]))
self.res[i, 8] = number_of_vacancies_level_1 = sum(list([
male_attrition_level_1,
female_attrition_level_1,
promotions_of_males_level_1_2,
promotions_of_females_level_1_2]))
self.res[i, 9] = promotion_rate_female_level_1 = np.float32(
number_of_females_level_1 / sum(list([number_of_females_level_1,
number_of_males_level_1])))
self.res[i, 10] = promotion_rate_women_level_2 = np.float32(
number_of_females_level_2 / sum(list([number_of_females_level_2,
number_of_males_level_2])))
self.res[i, 11] = gender_proportion_of_department = np.float32(
truediv(sum(list([number_of_females_level_1,
number_of_females_level_2,
number_of_females_level_3])), sum(list([
number_of_females_level_1,
number_of_females_level_2,
number_of_females_level_3,
number_of_males_level_1,
number_of_males_level_2,
number_of_males_level_3]))))
# print(self.res[i,:])
## Print Data matrix
df_ = pd.DataFrame(self.res)
df_.columns = ['f1',
'f2',
'f3',
'm1',
'm2',
'm3',
't3',
't2',
't1',
'prom1',
'prom2',
'gendprop']
# print(df_)
recarray_results = df_.to_records(index=True)
self.run = recarray_results
return recarray_results
| mit |
rrohan/scikit-learn | examples/linear_model/plot_polynomial_interpolation.py | 251 | 1895 | #!/usr/bin/env python
"""
========================
Polynomial interpolation
========================
This example demonstrates how to approximate a function with a polynomial of
degree n_degree by using ridge regression. Concretely, from n_samples 1d
points, it suffices to build the Vandermonde matrix, which is n_samples x
n_degree+1 and has the following form:
[[1, x_1, x_1 ** 2, x_1 ** 3, ...],
[1, x_2, x_2 ** 2, x_2 ** 3, ...],
...]
Intuitively, this matrix can be interpreted as a matrix of pseudo features (the
points raised to some power). The matrix is akin to (but different from) the
matrix induced by a polynomial kernel.
This example shows that you can do non-linear regression with a linear model,
using a pipeline to add non-linear features. Kernel methods extend this idea
and can induce very high (even infinite) dimensional feature spaces.
"""
print(__doc__)
# Author: Mathieu Blondel
# Jake Vanderplas
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
from sklearn.linear_model import Ridge
from sklearn.preprocessing import PolynomialFeatures
from sklearn.pipeline import make_pipeline
def f(x):
""" function to approximate by polynomial interpolation"""
return x * np.sin(x)
# generate points used to plot
x_plot = np.linspace(0, 10, 100)
# generate points and keep a subset of them
x = np.linspace(0, 10, 100)
rng = np.random.RandomState(0)
rng.shuffle(x)
x = np.sort(x[:20])
y = f(x)
# create matrix versions of these arrays
X = x[:, np.newaxis]
X_plot = x_plot[:, np.newaxis]
plt.plot(x_plot, f(x_plot), label="ground truth")
plt.scatter(x, y, label="training points")
for degree in [3, 4, 5]:
model = make_pipeline(PolynomialFeatures(degree), Ridge())
model.fit(X, y)
y_plot = model.predict(X_plot)
plt.plot(x_plot, y_plot, label="degree %d" % degree)
plt.legend(loc='lower left')
plt.show()
| bsd-3-clause |
DiamondLightSource/auto_tomo_calibration-experimental | old_code_scripts/measure_resolution/lmfit-py/tests/test_algebraic_constraint2.py | 4 | 3112 | from numpy import linspace, zeros, sin, exp, random, sqrt, pi, sign
from lmfit import Parameters, Parameter, Minimizer
from lmfit.lineshapes import gaussian, lorentzian, pvoigt
from lmfit.printfuncs import report_fit
import sys
# Turn off plotting if run by nosetests.
WITHPLOT = True
for arg in sys.argv:
if 'nose' in arg:
WITHPLOT = False
if WITHPLOT:
try:
import matplotlib
import pylab
except ImportError:
WITHPLOT = False
def test_constraints(with_plot=True):
with_plot = with_plot and WITHPLOT
def residual(pars, x, sigma=None, data=None):
yg = gaussian(x, pars['amp_g'].value,
pars['cen_g'].value, pars['wid_g'].value)
yl = lorentzian(x, pars['amp_l'].value,
pars['cen_l'].value, pars['wid_l'].value)
slope = pars['line_slope'].value
offset = pars['line_off'].value
model = yg + yl + offset + x * slope
if data is None:
return model
if sigma is None:
return (model - data)
return (model - data) / sigma
n = 201
xmin = 0.
xmax = 20.0
x = linspace(xmin, xmax, n)
data = (gaussian(x, 21, 8.1, 1.2) +
lorentzian(x, 10, 9.6, 2.4) +
random.normal(scale=0.23, size=n) +
x*0.5)
if with_plot:
pylab.plot(x, data, 'r+')
pfit = Parameters()
pfit.add(name='amp_g', value=10)
pfit.add(name='cen_g', value=9)
pfit.add(name='wid_g', value=1)
pfit.add(name='amp_tot', value=20)
pfit.add(name='amp_l', expr='amp_tot - amp_g')
pfit.add(name='cen_l', expr='1.5+cen_g')
pfit.add(name='wid_l', expr='2*wid_g')
pfit.add(name='line_slope', value=0.0)
pfit.add(name='line_off', value=0.0)
sigma = 0.021 # estimate of data error (for all data points)
myfit = Minimizer(residual, pfit,
fcn_args=(x,), fcn_kws={'sigma':sigma, 'data':data},
scale_covar=True)
myfit.prepare_fit()
init = residual(myfit.params, x)
myfit.leastsq()
print(' Nfev = ', myfit.nfev)
print( myfit.chisqr, myfit.redchi, myfit.nfree)
report_fit(myfit.params, min_correl=0.3)
fit = residual(myfit.params, x)
if with_plot:
pylab.plot(x, fit, 'b-')
assert(myfit.params['cen_l'].value == 1.5 + myfit.params['cen_g'].value)
assert(myfit.params['amp_l'].value == myfit.params['amp_tot'].value - myfit.params['amp_g'].value)
assert(myfit.params['wid_l'].value == 2 * myfit.params['wid_g'].value)
# now, change fit slightly and re-run
myfit.params['wid_l'].expr = '1.25*wid_g'
myfit.leastsq()
report_fit(myfit.params, min_correl=0.4)
fit2 = residual(myfit.params, x)
if with_plot:
pylab.plot(x, fit2, 'k')
pylab.show()
assert(myfit.params['cen_l'].value == 1.5 + myfit.params['cen_g'].value)
assert(myfit.params['amp_l'].value == myfit.params['amp_tot'].value - myfit.params['amp_g'].value)
assert(myfit.params['wid_l'].value == 1.25 * myfit.params['wid_g'].value)
test_constraints()
| apache-2.0 |
pravsripad/mne-python | mne/preprocessing/maxwell.py | 4 | 103431 | # -*- coding: utf-8 -*-
# Authors: Mark Wronkiewicz <[email protected]>
# Eric Larson <[email protected]>
# Jussi Nurminen <[email protected]>
# License: BSD (3-clause)
from collections import Counter, OrderedDict
from functools import partial
from math import factorial
from os import path as op
import numpy as np
from .. import __version__
from ..annotations import _annotations_starts_stops
from ..bem import _check_origin
from ..transforms import (_str_to_frame, _get_trans, Transform, apply_trans,
_find_vector_rotation, _cart_to_sph, _get_n_moments,
_sph_to_cart_partials, _deg_ord_idx, _average_quats,
_sh_complex_to_real, _sh_real_to_complex, _sh_negate,
quat_to_rot, rot_to_quat)
from ..forward import _concatenate_coils, _prep_meg_channels, _create_meg_coils
from ..surface import _normalize_vectors
from ..io.constants import FIFF, FWD
from ..io.meas_info import _simplify_info, Info
from ..io.proc_history import _read_ctc
from ..io.write import _generate_meas_id, DATE_NONE
from ..io import (_loc_to_coil_trans, _coil_trans_to_loc, BaseRaw, RawArray,
Projection)
from ..io.pick import pick_types, pick_info
from ..utils import (verbose, logger, _clean_names, warn, _time_mask, _pl,
_check_option, _ensure_int, _validate_type, use_log_level)
from ..fixes import _safe_svd, einsum, bincount
from ..channels.channels import _get_T1T2_mag_inds, fix_mag_coil_types
# Note: MF uses single precision and some algorithms might use
# truncated versions of constants (e.g., μ0), which could lead to small
# differences between algorithms
# Changes to arguments here should also be made in find_bad_channels_maxwell
@verbose
def maxwell_filter(raw, origin='auto', int_order=8, ext_order=3,
calibration=None, cross_talk=None, st_duration=None,
st_correlation=0.98, coord_frame='head', destination=None,
regularize='in', ignore_ref=False, bad_condition='error',
head_pos=None, st_fixed=True, st_only=False, mag_scale=100.,
skip_by_annotation=('edge', 'bad_acq_skip'),
extended_proj=(), verbose=None):
"""Maxwell filter data using multipole moments.
Parameters
----------
raw : instance of mne.io.Raw
Data to be filtered.
.. warning:: It is critical to mark bad channels in
``raw.info['bads']`` prior to processing in order to
prevent artifact spreading. Manual inspection and use
of :func:`~find_bad_channels_maxwell` is recommended.
%(maxwell_origin)s
%(maxwell_int)s
%(maxwell_ext)s
%(maxwell_cal)s
%(maxwell_cross)s
st_duration : float | None
If not None, apply spatiotemporal SSS with specified buffer duration
(in seconds). MaxFilter™'s default is 10.0 seconds in v2.2.
Spatiotemporal SSS acts as implicitly as a high-pass filter where the
cut-off frequency is 1/st_duration Hz. For this (and other) reasons,
longer buffers are generally better as long as your system can handle
the higher memory usage. To ensure that each window is processed
identically, choose a buffer length that divides evenly into your data.
Any data at the trailing edge that doesn't fit evenly into a whole
buffer window will be lumped into the previous buffer.
st_correlation : float
Correlation limit between inner and outer subspaces used to reject
ovwrlapping intersecting inner/outer signals during spatiotemporal SSS.
%(maxwell_coord)s
%(maxwell_dest)s
%(maxwell_reg)s
%(maxwell_ref)s
%(maxwell_cond)s
%(maxwell_pos)s
.. versionadded:: 0.12
%(maxwell_st_fixed_only)s
%(maxwell_mag)s
.. versionadded:: 0.13
%(maxwell_skip)s
.. versionadded:: 0.17
%(maxwell_extended)s
%(verbose)s
Returns
-------
raw_sss : instance of mne.io.Raw
The raw data with Maxwell filtering applied.
See Also
--------
mne.preprocessing.annotate_flat
mne.preprocessing.find_bad_channels_maxwell
mne.chpi.filter_chpi
mne.chpi.read_head_pos
mne.epochs.average_movements
Notes
-----
.. versionadded:: 0.11
Some of this code was adapted and relicensed (with BSD form) with
permission from Jussi Nurminen. These algorithms are based on work
from :footcite:`TauluKajola2005` and :footcite:`TauluSimola2006`.
It will likely use multiple CPU cores, see the :ref:`FAQ <faq_cpu>`
for more information.
.. warning:: Maxwell filtering in MNE is not designed or certified
for clinical use.
Compared to the MEGIN MaxFilter™ software, the MNE Maxwell filtering
routines currently provide the following features:
.. table::
:widths: auto
+-----------------------------------------------------------------------------+-----+-----------+
| Feature | MNE | MaxFilter |
+=============================================================================+=====+===========+
| Maxwell filtering software shielding | ✓ | ✓ |
+-----------------------------------------------------------------------------+-----+-----------+
| Bad channel reconstruction | ✓ | ✓ |
+-----------------------------------------------------------------------------+-----+-----------+
| Cross-talk cancellation | ✓ | ✓ |
+-----------------------------------------------------------------------------+-----+-----------+
| Fine calibration correction (1D) | ✓ | ✓ |
+-----------------------------------------------------------------------------+-----+-----------+
| Fine calibration correction (3D) | ✓ | |
+-----------------------------------------------------------------------------+-----+-----------+
| Spatio-temporal SSS (tSSS) | ✓ | ✓ |
+-----------------------------------------------------------------------------+-----+-----------+
| Coordinate frame translation | ✓ | ✓ |
+-----------------------------------------------------------------------------+-----+-----------+
| Regularization using information theory | ✓ | ✓ |
+-----------------------------------------------------------------------------+-----+-----------+
| Movement compensation (raw) | ✓ | ✓ |
+-----------------------------------------------------------------------------+-----+-----------+
| Movement compensation (:func:`epochs <mne.epochs.average_movements>`) | ✓ | |
+-----------------------------------------------------------------------------+-----+-----------+
| :func:`cHPI subtraction <mne.chpi.filter_chpi>` | ✓ | ✓ |
+-----------------------------------------------------------------------------+-----+-----------+
| Double floating point precision | ✓ | |
+-----------------------------------------------------------------------------+-----+-----------+
| Seamless processing of split (``-1.fif``) and concatenated files | ✓ | |
+-----------------------------------------------------------------------------+-----+-----------+
| Automatic bad channel detection (:func:`~find_bad_channels_maxwell`) | ✓ | ✓ |
+-----------------------------------------------------------------------------+-----+-----------+
| Head position estimation (:func:`~mne.chpi.compute_head_pos`) | ✓ | ✓ |
+-----------------------------------------------------------------------------+-----+-----------+
| Certified for clinical use | | ✓ |
+-----------------------------------------------------------------------------+-----+-----------+
| Extended external basis (eSSS) | ✓ | |
+-----------------------------------------------------------------------------+-----+-----------+
Epoch-based movement compensation is described in :footcite:`TauluKajola2005`.
Use of Maxwell filtering routines with non-Neuromag systems is currently
**experimental**. Worse results for non-Neuromag systems are expected due
to (at least):
* Missing fine-calibration and cross-talk cancellation data for
other systems.
* Processing with reference sensors has not been vetted.
* Regularization of components may not work well for all systems.
* Coil integration has not been optimized using Abramowitz/Stegun
definitions.
.. note:: Various Maxwell filtering algorithm components are covered by
patents owned by MEGIN. These patents include, but may not be
limited to:
- US2006031038 (Signal Space Separation)
- US6876196 (Head position determination)
- WO2005067789 (DC fields)
- WO2005078467 (MaxShield)
- WO2006114473 (Temporal Signal Space Separation)
These patents likely preclude the use of Maxwell filtering code
in commercial applications. Consult a lawyer if necessary.
Currently, in order to perform Maxwell filtering, the raw data must not
have any projectors applied. During Maxwell filtering, the spatial
structure of the data is modified, so projectors are discarded (unless
in ``st_only=True`` mode).
References
----------
.. footbibliography::
""" # noqa: E501
logger.info('Maxwell filtering raw data')
params = _prep_maxwell_filter(
raw=raw, origin=origin, int_order=int_order, ext_order=ext_order,
calibration=calibration, cross_talk=cross_talk,
st_duration=st_duration, st_correlation=st_correlation,
coord_frame=coord_frame, destination=destination,
regularize=regularize, ignore_ref=ignore_ref,
bad_condition=bad_condition, head_pos=head_pos, st_fixed=st_fixed,
st_only=st_only, mag_scale=mag_scale,
skip_by_annotation=skip_by_annotation, extended_proj=extended_proj)
raw_sss = _run_maxwell_filter(raw, **params)
# Update info
_update_sss_info(raw_sss, **params['update_kwargs'])
logger.info('[done]')
return raw_sss
@verbose
def _prep_maxwell_filter(
raw, origin='auto', int_order=8, ext_order=3,
calibration=None, cross_talk=None, st_duration=None,
st_correlation=0.98, coord_frame='head', destination=None,
regularize='in', ignore_ref=False, bad_condition='error',
head_pos=None, st_fixed=True, st_only=False,
mag_scale=100.,
skip_by_annotation=('edge', 'bad_acq_skip'), extended_proj=(),
reconstruct='in', verbose=None):
# There are an absurd number of different possible notations for spherical
# coordinates, which confounds the notation for spherical harmonics. Here,
# we purposefully stay away from shorthand notation in both and use
# explicit terms (like 'azimuth' and 'polar') to avoid confusion.
# See mathworld.wolfram.com/SphericalHarmonic.html for more discussion.
# Our code follows the same standard that ``scipy`` uses for ``sph_harm``.
# triage inputs ASAP to avoid late-thrown errors
_validate_type(raw, BaseRaw, 'raw')
_check_usable(raw)
_check_regularize(regularize)
st_correlation = float(st_correlation)
if st_correlation <= 0. or st_correlation > 1.:
raise ValueError('Need 0 < st_correlation <= 1., got %s'
% st_correlation)
_check_option('coord_frame', coord_frame, ['head', 'meg'])
head_frame = True if coord_frame == 'head' else False
recon_trans = _check_destination(destination, raw.info, head_frame)
if st_duration is not None:
st_duration = float(st_duration)
st_correlation = float(st_correlation)
st_duration = int(round(st_duration * raw.info['sfreq']))
if not 0. < st_correlation <= 1:
raise ValueError('st_correlation must be between 0. and 1.')
_check_option('bad_condition', bad_condition,
['error', 'warning', 'ignore', 'info'])
if raw.info['dev_head_t'] is None and coord_frame == 'head':
raise RuntimeError('coord_frame cannot be "head" because '
'info["dev_head_t"] is None; if this is an '
'empty room recording, consider using '
'coord_frame="meg"')
if st_only and st_duration is None:
raise ValueError('st_duration must not be None if st_only is True')
head_pos = _check_pos(head_pos, head_frame, raw, st_fixed,
raw.info['sfreq'])
_check_info(raw.info, sss=not st_only, tsss=st_duration is not None,
calibration=not st_only and calibration is not None,
ctc=not st_only and cross_talk is not None)
# Now we can actually get moving
info = raw.info.copy()
meg_picks, mag_picks, grad_picks, good_mask, mag_or_fine = \
_get_mf_picks_fix_mags(info, int_order, ext_order, ignore_ref)
# Magnetometers are scaled to improve numerical stability
coil_scale, mag_scale = _get_coil_scale(
meg_picks, mag_picks, grad_picks, mag_scale, info)
#
# Extended projection vectors
#
_validate_type(extended_proj, (list, tuple), 'extended_proj')
good_names = [info['ch_names'][c] for c in meg_picks[good_mask]]
if len(extended_proj) > 0:
extended_proj_ = list()
for pi, proj in enumerate(extended_proj):
item = 'extended_proj[%d]' % (pi,)
_validate_type(proj, Projection, item)
got_names = proj['data']['col_names']
missing = sorted(set(good_names) - set(got_names))
if missing:
raise ValueError('%s channel names were missing some '
'good MEG channel names:\n%s'
% (item, ', '.join(missing)))
idx = [got_names.index(name) for name in good_names]
extended_proj_.append(proj['data']['data'][:, idx])
extended_proj = np.concatenate(extended_proj_)
logger.info(' Extending external SSS basis using %d projection '
'vectors' % (len(extended_proj),))
#
# Fine calibration processing (load fine cal and overwrite sensor geometry)
#
sss_cal = dict()
if calibration is not None:
calibration, sss_cal = _update_sensor_geometry(
info, calibration, ignore_ref)
mag_or_fine.fill(True) # all channels now have some mag-type data
# Determine/check the origin of the expansion
origin = _check_origin(origin, info, coord_frame, disp=True)
# Convert to the head frame
if coord_frame == 'meg' and info['dev_head_t'] is not None:
origin_head = apply_trans(info['dev_head_t'], origin)
else:
origin_head = origin
update_kwargs = dict(
origin=origin, coord_frame=coord_frame, sss_cal=sss_cal,
int_order=int_order, ext_order=ext_order,
extended_proj=extended_proj)
del origin, coord_frame, sss_cal
origin_head.setflags(write=False)
#
# Cross-talk processing
#
meg_ch_names = [info['ch_names'][p] for p in meg_picks]
ctc, sss_ctc = _read_cross_talk(cross_talk, meg_ch_names)
update_kwargs['sss_ctc'] = sss_ctc
del sss_ctc
#
# Translate to destination frame (always use non-fine-cal bases)
#
exp = dict(origin=origin_head, int_order=int_order, ext_order=0)
all_coils = _prep_mf_coils(info, ignore_ref)
S_recon = _trans_sss_basis(exp, all_coils, recon_trans, coil_scale)
exp['ext_order'] = ext_order
exp['extended_proj'] = extended_proj
del extended_proj
# Reconstruct data from internal space only (Eq. 38), and rescale S_recon
S_recon /= coil_scale
if recon_trans is not None:
# warn if we have translated too far
diff = 1000 * (info['dev_head_t']['trans'][:3, 3] -
recon_trans['trans'][:3, 3])
dist = np.sqrt(np.sum(_sq(diff)))
if dist > 25.:
warn('Head position change is over 25 mm (%s) = %0.1f mm'
% (', '.join('%0.1f' % x for x in diff), dist))
# Reconstruct raw file object with spatiotemporal processed data
max_st = dict()
if st_duration is not None:
if st_only:
job = FIFF.FIFFV_SSS_JOB_TPROJ
else:
job = FIFF.FIFFV_SSS_JOB_ST
max_st.update(job=job, subspcorr=st_correlation,
buflen=st_duration / info['sfreq'])
logger.info(' Processing data using tSSS with st_duration=%s'
% max_st['buflen'])
st_when = 'before' if st_fixed else 'after' # relative to movecomp
else:
# st_duration from here on will act like the chunk size
st_duration = min(max(int(round(10. * info['sfreq'])), 1),
len(raw.times))
st_correlation = None
st_when = 'never'
update_kwargs['max_st'] = max_st
del st_fixed, max_st
# Figure out which transforms we need for each tSSS block
# (and transform pos[1] to times)
head_pos[1] = raw.time_as_index(head_pos[1], use_rounding=True)
# Compute the first bit of pos_data for cHPI reporting
if info['dev_head_t'] is not None and head_pos[0] is not None:
this_pos_quat = np.concatenate([
rot_to_quat(info['dev_head_t']['trans'][:3, :3]),
info['dev_head_t']['trans'][:3, 3],
np.zeros(3)])
else:
this_pos_quat = None
_get_this_decomp_trans = partial(
_get_decomp, all_coils=all_coils,
cal=calibration, regularize=regularize,
exp=exp, ignore_ref=ignore_ref, coil_scale=coil_scale,
grad_picks=grad_picks, mag_picks=mag_picks, good_mask=good_mask,
mag_or_fine=mag_or_fine, bad_condition=bad_condition,
mag_scale=mag_scale)
update_kwargs.update(
nchan=good_mask.sum(), st_only=st_only, recon_trans=recon_trans)
params = dict(
skip_by_annotation=skip_by_annotation,
st_duration=st_duration, st_correlation=st_correlation,
st_only=st_only, st_when=st_when, ctc=ctc, coil_scale=coil_scale,
this_pos_quat=this_pos_quat, meg_picks=meg_picks,
good_mask=good_mask, grad_picks=grad_picks, head_pos=head_pos,
info=info, _get_this_decomp_trans=_get_this_decomp_trans,
S_recon=S_recon, update_kwargs=update_kwargs)
return params
def _run_maxwell_filter(
raw, skip_by_annotation, st_duration, st_correlation, st_only,
st_when, ctc, coil_scale, this_pos_quat, meg_picks, good_mask,
grad_picks, head_pos, info, _get_this_decomp_trans, S_recon,
update_kwargs,
reconstruct='in', copy=True):
# Eventually find_bad_channels_maxwell could be sped up by moving this
# outside the loop (e.g., in the prep function) but regularization depends
# on which channels are being used, so easier just to include it here.
# The time it takes to recompute S and pS themselves is roughly on par
# with the np.dot with the data, so not a huge gain to be made there.
S_decomp, S_decomp_full, pS_decomp, reg_moments, n_use_in = \
_get_this_decomp_trans(info['dev_head_t'], t=0.)
update_kwargs.update(reg_moments=reg_moments.copy())
if ctc is not None:
ctc = ctc[good_mask][:, good_mask]
add_channels = (head_pos[0] is not None) and (not st_only) and copy
raw_sss, pos_picks = _copy_preload_add_channels(
raw, add_channels, copy, info)
sfreq = info['sfreq']
del raw
if not st_only:
# remove MEG projectors, they won't apply now
_remove_meg_projs(raw_sss)
# Figure out which segments of data we can use
onsets, ends = _annotations_starts_stops(
raw_sss, skip_by_annotation, invert=True)
max_samps = (ends - onsets).max()
if not 0. < st_duration <= max_samps + 1.:
raise ValueError('st_duration (%0.1fs) must be between 0 and the '
'longest contiguous duration of the data '
'(%0.1fs).' % (st_duration / sfreq,
max_samps / sfreq))
# Generate time points to break up data into equal-length windows
starts, stops = list(), list()
for onset, end in zip(onsets, ends):
read_lims = np.arange(onset, end + 1, st_duration)
if len(read_lims) == 1:
read_lims = np.concatenate([read_lims, [end]])
if read_lims[-1] != end:
read_lims[-1] = end
# fold it into the previous buffer
n_last_buf = read_lims[-1] - read_lims[-2]
if st_correlation is not None and len(read_lims) > 2:
if n_last_buf >= st_duration:
logger.info(
' Spatiotemporal window did not fit evenly into'
'contiguous data segment. %0.2f seconds were lumped '
'into the previous window.'
% ((n_last_buf - st_duration) / sfreq,))
else:
logger.info(
' Contiguous data segment of duration %0.2f '
'seconds is too short to be processed with tSSS '
'using duration %0.2f'
% (n_last_buf / sfreq, st_duration / sfreq))
assert len(read_lims) >= 2
assert read_lims[0] == onset and read_lims[-1] == end
starts.extend(read_lims[:-1])
stops.extend(read_lims[1:])
del read_lims
st_duration = min(max_samps, st_duration)
# Loop through buffer windows of data
n_sig = int(np.floor(np.log10(max(len(starts), 0)))) + 1
logger.info(
' Processing %s data chunk%s' % (len(starts), _pl(starts)))
for ii, (start, stop) in enumerate(zip(starts, stops)):
if start == stop:
continue # Skip zero-length annotations
tsss_valid = (stop - start) >= st_duration
rel_times = raw_sss.times[start:stop]
t_str = '%8.3f - %8.3f sec' % tuple(rel_times[[0, -1]])
t_str += ('(#%d/%d)' % (ii + 1, len(starts))).rjust(2 * n_sig + 5)
# Get original data
orig_data = raw_sss._data[meg_picks[good_mask], start:stop]
# This could just be np.empty if not st_only, but shouldn't be slow
# this way so might as well just always take the original data
out_meg_data = raw_sss._data[meg_picks, start:stop]
# Apply cross-talk correction
if ctc is not None:
orig_data = ctc.dot(orig_data)
out_pos_data = np.empty((len(pos_picks), stop - start))
# Figure out which positions to use
t_s_s_q_a = _trans_starts_stops_quats(head_pos, start, stop,
this_pos_quat)
n_positions = len(t_s_s_q_a[0])
# Set up post-tSSS or do pre-tSSS
if st_correlation is not None:
# If doing tSSS before movecomp...
resid = orig_data.copy() # to be safe let's operate on a copy
if st_when == 'after':
orig_in_data = np.empty((len(meg_picks), stop - start))
else: # 'before'
avg_trans = t_s_s_q_a[-1]
if avg_trans is not None:
# if doing movecomp
S_decomp_st, _, pS_decomp_st, _, n_use_in_st = \
_get_this_decomp_trans(avg_trans, t=rel_times[0])
else:
S_decomp_st, pS_decomp_st = S_decomp, pS_decomp
n_use_in_st = n_use_in
orig_in_data = np.dot(np.dot(S_decomp_st[:, :n_use_in_st],
pS_decomp_st[:n_use_in_st]),
resid)
resid -= np.dot(np.dot(S_decomp_st[:, n_use_in_st:],
pS_decomp_st[n_use_in_st:]), resid)
resid -= orig_in_data
# Here we operate on our actual data
proc = out_meg_data if st_only else orig_data
_do_tSSS(proc, orig_in_data, resid, st_correlation,
n_positions, t_str, tsss_valid)
if not st_only or st_when == 'after':
# Do movement compensation on the data
for trans, rel_start, rel_stop, this_pos_quat in \
zip(*t_s_s_q_a[:4]):
# Recalculate bases if necessary (trans will be None iff the
# first position in this interval is the same as last of the
# previous interval)
if trans is not None:
S_decomp, S_decomp_full, pS_decomp, reg_moments, \
n_use_in = _get_this_decomp_trans(
trans, t=rel_times[rel_start])
# Determine multipole moments for this interval
mm_in = np.dot(pS_decomp[:n_use_in],
orig_data[:, rel_start:rel_stop])
# Our output data
if not st_only:
if reconstruct == 'in':
proj = S_recon.take(reg_moments[:n_use_in], axis=1)
mult = mm_in
else:
assert reconstruct == 'orig'
proj = S_decomp_full # already picked reg
mm_out = np.dot(pS_decomp[n_use_in:],
orig_data[:, rel_start:rel_stop])
mult = np.concatenate((mm_in, mm_out))
out_meg_data[:, rel_start:rel_stop] = \
np.dot(proj, mult)
if len(pos_picks) > 0:
out_pos_data[:, rel_start:rel_stop] = \
this_pos_quat[:, np.newaxis]
# Transform orig_data to store just the residual
if st_when == 'after':
# Reconstruct data using original location from external
# and internal spaces and compute residual
rel_resid_data = resid[:, rel_start:rel_stop]
orig_in_data[:, rel_start:rel_stop] = \
np.dot(S_decomp[:, :n_use_in], mm_in)
rel_resid_data -= np.dot(np.dot(S_decomp[:, n_use_in:],
pS_decomp[n_use_in:]),
rel_resid_data)
rel_resid_data -= orig_in_data[:, rel_start:rel_stop]
# If doing tSSS at the end
if st_when == 'after':
_do_tSSS(out_meg_data, orig_in_data, resid, st_correlation,
n_positions, t_str, tsss_valid)
elif st_when == 'never' and head_pos[0] is not None:
logger.info(' Used % 2d head position%s for %s'
% (n_positions, _pl(n_positions), t_str))
raw_sss._data[meg_picks, start:stop] = out_meg_data
raw_sss._data[pos_picks, start:stop] = out_pos_data
return raw_sss
def _get_coil_scale(meg_picks, mag_picks, grad_picks, mag_scale, info):
"""Get the magnetometer scale factor."""
if isinstance(mag_scale, str):
if mag_scale != 'auto':
raise ValueError('mag_scale must be a float or "auto", got "%s"'
% mag_scale)
if len(mag_picks) in (0, len(meg_picks)):
mag_scale = 100. # only one coil type, doesn't matter
logger.info(' Setting mag_scale=%0.2f because only one '
'coil type is present' % mag_scale)
else:
# Find our physical distance between gradiometer pickup loops
# ("base line")
coils = _create_meg_coils([info['chs'][pick]
for pick in meg_picks], 'accurate')
grad_base = {coils[pick]['base'] for pick in grad_picks}
if len(grad_base) != 1 or list(grad_base)[0] <= 0:
raise RuntimeError('Could not automatically determine '
'mag_scale, could not find one '
'proper gradiometer distance from: %s'
% list(grad_base))
grad_base = list(grad_base)[0]
mag_scale = 1. / grad_base
logger.info(' Setting mag_scale=%0.2f based on gradiometer '
'distance %0.2f mm' % (mag_scale, 1000 * grad_base))
mag_scale = float(mag_scale)
coil_scale = np.ones((len(meg_picks), 1))
coil_scale[mag_picks] = mag_scale
return coil_scale, mag_scale
def _remove_meg_projs(inst):
"""Remove inplace existing MEG projectors (assumes inactive)."""
meg_picks = pick_types(inst.info, meg=True, exclude=[])
meg_channels = [inst.ch_names[pi] for pi in meg_picks]
non_meg_proj = list()
for proj in inst.info['projs']:
if not any(c in meg_channels for c in proj['data']['col_names']):
non_meg_proj.append(proj)
inst.add_proj(non_meg_proj, remove_existing=True, verbose=False)
def _check_destination(destination, info, head_frame):
"""Triage our reconstruction trans."""
if destination is None:
return info['dev_head_t']
if not head_frame:
raise RuntimeError('destination can only be set if using the '
'head coordinate frame')
if isinstance(destination, str):
recon_trans = _get_trans(destination, 'meg', 'head')[0]
elif isinstance(destination, Transform):
recon_trans = destination
else:
destination = np.array(destination, float)
if destination.shape != (3,):
raise ValueError('destination must be a 3-element vector, '
'str, or None')
recon_trans = np.eye(4)
recon_trans[:3, 3] = destination
recon_trans = Transform('meg', 'head', recon_trans)
if recon_trans.to_str != 'head' or recon_trans.from_str != 'MEG device':
raise RuntimeError('Destination transform is not MEG device -> head, '
'got %s -> %s' % (recon_trans.from_str,
recon_trans.to_str))
return recon_trans
@verbose
def _prep_mf_coils(info, ignore_ref=True, verbose=None):
"""Get all coil integration information loaded and sorted."""
coils, comp_coils = _prep_meg_channels(
info, accurate=True, head_frame=False,
ignore_ref=ignore_ref, do_picking=False, verbose=False)[:2]
mag_mask = _get_mag_mask(coils)
if len(comp_coils) > 0:
meg_picks = pick_types(info, meg=True, ref_meg=False, exclude=[])
ref_picks = pick_types(info, meg=False, ref_meg=True, exclude=[])
inserts = np.searchsorted(meg_picks, ref_picks)
# len(inserts) == len(comp_coils)
for idx, comp_coil in zip(inserts[::-1], comp_coils[::-1]):
coils.insert(idx, comp_coil)
# Now we have:
# [c['chname'] for c in coils] ==
# [info['ch_names'][ii]
# for ii in pick_types(info, meg=True, ref_meg=True)]
# Now coils is a sorted list of coils. Time to do some vectorization.
n_coils = len(coils)
rmags = np.concatenate([coil['rmag'] for coil in coils])
cosmags = np.concatenate([coil['cosmag'] for coil in coils])
ws = np.concatenate([coil['w'] for coil in coils])
cosmags *= ws[:, np.newaxis]
del ws
n_int = np.array([len(coil['rmag']) for coil in coils])
bins = np.repeat(np.arange(len(n_int)), n_int)
bd = np.concatenate(([0], np.cumsum(n_int)))
slice_map = {ii: slice(start, stop)
for ii, (start, stop) in enumerate(zip(bd[:-1], bd[1:]))}
return rmags, cosmags, bins, n_coils, mag_mask, slice_map
def _trans_starts_stops_quats(pos, start, stop, this_pos_data):
"""Get all trans and limits we need."""
pos_idx = np.arange(*np.searchsorted(pos[1], [start, stop]))
used = np.zeros(stop - start, bool)
trans = list()
rel_starts = list()
rel_stops = list()
quats = list()
weights = list()
for ti in range(-1, len(pos_idx)):
# first iteration for this block of data
if ti < 0:
rel_start = 0
rel_stop = pos[1][pos_idx[0]] if len(pos_idx) > 0 else stop
rel_stop = rel_stop - start
if rel_start == rel_stop:
continue # our first pos occurs on first time sample
# Don't calculate S_decomp here, use the last one
trans.append(None) # meaning: use previous
quats.append(this_pos_data)
else:
rel_start = pos[1][pos_idx[ti]] - start
if ti == len(pos_idx) - 1:
rel_stop = stop - start
else:
rel_stop = pos[1][pos_idx[ti + 1]] - start
trans.append(pos[0][pos_idx[ti]])
quats.append(pos[2][pos_idx[ti]])
assert 0 <= rel_start
assert rel_start < rel_stop
assert rel_stop <= stop - start
assert not used[rel_start:rel_stop].any()
used[rel_start:rel_stop] = True
rel_starts.append(rel_start)
rel_stops.append(rel_stop)
weights.append(rel_stop - rel_start)
assert used.all()
# Use weighted average for average trans over the window
if this_pos_data is None:
avg_trans = None
else:
weights = np.array(weights)
quats = np.array(quats)
weights = weights / weights.sum().astype(float) # int -> float
avg_quat = _average_quats(quats[:, :3], weights)
avg_t = np.dot(weights, quats[:, 3:6])
avg_trans = np.vstack([
np.hstack([quat_to_rot(avg_quat), avg_t[:, np.newaxis]]),
[[0., 0., 0., 1.]]])
return trans, rel_starts, rel_stops, quats, avg_trans
def _do_tSSS(clean_data, orig_in_data, resid, st_correlation,
n_positions, t_str, tsss_valid):
"""Compute and apply SSP-like projection vectors based on min corr."""
if not tsss_valid:
t_proj = np.empty((clean_data.shape[1], 0))
else:
np.asarray_chkfinite(resid)
t_proj = _overlap_projector(orig_in_data, resid, st_correlation)
# Apply projector according to Eq. 12 in :footcite:`TauluSimola2006`
msg = (' Projecting %2d intersecting tSSS component%s '
'for %s' % (t_proj.shape[1], _pl(t_proj.shape[1], ' '), t_str))
if n_positions > 1:
msg += ' (across %2d position%s)' % (n_positions,
_pl(n_positions, ' '))
logger.info(msg)
clean_data -= np.dot(np.dot(clean_data, t_proj), t_proj.T)
def _copy_preload_add_channels(raw, add_channels, copy, info):
"""Load data for processing and (maybe) add cHPI pos channels."""
if copy:
raw = raw.copy()
raw.info['chs'] = info['chs'] # updated coil types
if add_channels:
kinds = [FIFF.FIFFV_QUAT_1, FIFF.FIFFV_QUAT_2, FIFF.FIFFV_QUAT_3,
FIFF.FIFFV_QUAT_4, FIFF.FIFFV_QUAT_5, FIFF.FIFFV_QUAT_6,
FIFF.FIFFV_HPI_G, FIFF.FIFFV_HPI_ERR, FIFF.FIFFV_HPI_MOV]
out_shape = (len(raw.ch_names) + len(kinds), len(raw.times))
out_data = np.zeros(out_shape, np.float64)
msg = ' Appending head position result channels and '
if raw.preload:
logger.info(msg + 'copying original raw data')
out_data[:len(raw.ch_names)] = raw._data
raw._data = out_data
else:
logger.info(msg + 'loading raw data from disk')
with use_log_level(False):
raw._preload_data(out_data[:len(raw.ch_names)])
raw._data = out_data
assert raw.preload is True
off = len(raw.ch_names)
chpi_chs = [
dict(ch_name='CHPI%03d' % (ii + 1), logno=ii + 1,
scanno=off + ii + 1, unit_mul=-1, range=1., unit=-1,
kind=kinds[ii], coord_frame=FIFF.FIFFV_COORD_UNKNOWN,
cal=1e-4, coil_type=FWD.COIL_UNKNOWN, loc=np.zeros(12))
for ii in range(len(kinds))]
raw.info['chs'].extend(chpi_chs)
raw.info._update_redundant()
raw.info._check_consistency()
assert raw._data.shape == (raw.info['nchan'], len(raw.times))
# Return the pos picks
pos_picks = np.arange(len(raw.ch_names) - len(chpi_chs),
len(raw.ch_names))
return raw, pos_picks
else:
if copy:
if not raw.preload:
logger.info(' Loading raw data from disk')
raw.load_data(verbose=False)
else:
logger.info(' Using loaded raw data')
return raw, np.array([], int)
def _check_pos(pos, head_frame, raw, st_fixed, sfreq):
"""Check for a valid pos array and transform it to a more usable form."""
_validate_type(pos, (np.ndarray, None), 'head_pos')
if pos is None:
return [None, np.array([-1])]
if not head_frame:
raise ValueError('positions can only be used if coord_frame="head"')
if not st_fixed:
warn('st_fixed=False is untested, use with caution!')
if not isinstance(pos, np.ndarray):
raise TypeError('pos must be an ndarray')
if pos.ndim != 2 or pos.shape[1] != 10:
raise ValueError('pos must be an array of shape (N, 10)')
t = pos[:, 0]
if not np.array_equal(t, np.unique(t)):
raise ValueError('Time points must unique and in ascending order')
# We need an extra 1e-3 (1 ms) here because MaxFilter outputs values
# only out to 3 decimal places
if not _time_mask(t, tmin=raw._first_time - 1e-3, tmax=None,
sfreq=sfreq).all():
raise ValueError('Head position time points must be greater than '
'first sample offset, but found %0.4f < %0.4f'
% (t[0], raw._first_time))
max_dist = np.sqrt(np.sum(pos[:, 4:7] ** 2, axis=1)).max()
if max_dist > 1.:
warn('Found a distance greater than 1 m (%0.3g m) from the device '
'origin, positions may be invalid and Maxwell filtering could '
'fail' % (max_dist,))
dev_head_ts = np.zeros((len(t), 4, 4))
dev_head_ts[:, 3, 3] = 1.
dev_head_ts[:, :3, 3] = pos[:, 4:7]
dev_head_ts[:, :3, :3] = quat_to_rot(pos[:, 1:4])
pos = [dev_head_ts, t - raw._first_time, pos[:, 1:]]
return pos
def _get_decomp(trans, all_coils, cal, regularize, exp, ignore_ref,
coil_scale, grad_picks, mag_picks, good_mask, mag_or_fine,
bad_condition, t, mag_scale):
"""Get a decomposition matrix and pseudoinverse matrices."""
from scipy import linalg
#
# Fine calibration processing (point-like magnetometers and calib. coeffs)
#
S_decomp_full = _get_s_decomp(
exp, all_coils, trans, coil_scale, cal, ignore_ref, grad_picks,
mag_picks, mag_scale)
S_decomp = S_decomp_full[good_mask]
#
# Extended SSS basis (eSSS)
#
extended_proj = exp.get('extended_proj', ())
if len(extended_proj) > 0:
rcond = 1e-4
thresh = 1e-4
extended_proj = extended_proj.T * coil_scale[good_mask]
extended_proj /= np.linalg.norm(extended_proj, axis=0)
n_int = _get_n_moments(exp['int_order'])
if S_decomp.shape[1] > n_int:
S_ext = S_decomp[:, n_int:].copy()
S_ext /= np.linalg.norm(S_ext, axis=0)
S_ext_orth = linalg.orth(S_ext, rcond=rcond)
assert S_ext_orth.shape[1] == S_ext.shape[1]
extended_proj -= np.dot(S_ext_orth,
np.dot(S_ext_orth.T, extended_proj))
scale = np.mean(np.linalg.norm(S_decomp[n_int:], axis=0))
else:
scale = np.mean(np.linalg.norm(S_decomp[:n_int], axis=0))
mask = np.linalg.norm(extended_proj, axis=0) > thresh
extended_remove = list(np.where(~mask)[0] + S_decomp.shape[1])
logger.debug(' Reducing %d -> %d'
% (extended_proj.shape[1], mask.sum()))
extended_proj /= np.linalg.norm(extended_proj, axis=0) / scale
S_decomp = np.concatenate([S_decomp, extended_proj], axis=-1)
if extended_proj.shape[1]:
S_decomp_full = np.pad(
S_decomp_full, ((0, 0), (0, extended_proj.shape[1])),
'constant')
S_decomp_full[good_mask, -extended_proj.shape[1]:] = extended_proj
else:
extended_remove = list()
del extended_proj
#
# Regularization
#
S_decomp, reg_moments, n_use_in = _regularize(
regularize, exp, S_decomp, mag_or_fine, extended_remove, t=t)
S_decomp_full = S_decomp_full.take(reg_moments, axis=1)
#
# Pseudo-inverse of total multipolar moment basis set (Part of Eq. 37)
#
pS_decomp, sing = _col_norm_pinv(S_decomp.copy())
cond = sing[0] / sing[-1]
if bad_condition != 'ignore' and cond >= 1000.:
msg = 'Matrix is badly conditioned: %0.0f >= 1000' % cond
if bad_condition == 'error':
raise RuntimeError(msg)
elif bad_condition == 'warning':
warn(msg)
else: # condition == 'info'
logger.info(msg)
# Build in our data scaling here
pS_decomp *= coil_scale[good_mask].T
S_decomp /= coil_scale[good_mask]
S_decomp_full /= coil_scale
return S_decomp, S_decomp_full, pS_decomp, reg_moments, n_use_in
def _get_s_decomp(exp, all_coils, trans, coil_scale, cal, ignore_ref,
grad_picks, mag_picks, mag_scale):
"""Get S_decomp."""
S_decomp = _trans_sss_basis(exp, all_coils, trans, coil_scale)
if cal is not None:
# Compute point-like mags to incorporate gradiometer imbalance
grad_cals = _sss_basis_point(exp, trans, cal, ignore_ref, mag_scale)
# Add point like magnetometer data to bases.
if len(grad_picks) > 0:
S_decomp[grad_picks, :] += grad_cals
# Scale magnetometers by calibration coefficient
if len(mag_picks) > 0:
S_decomp[mag_picks, :] /= cal['mag_cals']
# We need to be careful about KIT gradiometers
return S_decomp
@verbose
def _regularize(regularize, exp, S_decomp, mag_or_fine, extended_remove, t,
verbose=None):
"""Regularize a decomposition matrix."""
# ALWAYS regularize the out components according to norm, since
# gradiometer-only setups (e.g., KIT) can have zero first-order
# (homogeneous field) components
int_order, ext_order = exp['int_order'], exp['ext_order']
n_in = _get_n_moments(int_order)
n_out = S_decomp.shape[1] - n_in
t_str = '%8.3f' % t
if regularize is not None: # regularize='in'
in_removes, out_removes = _regularize_in(
int_order, ext_order, S_decomp, mag_or_fine, extended_remove)
else:
in_removes = []
out_removes = _regularize_out(int_order, ext_order, mag_or_fine,
extended_remove)
reg_in_moments = np.setdiff1d(np.arange(n_in), in_removes)
reg_out_moments = np.setdiff1d(np.arange(n_in, S_decomp.shape[1]),
out_removes)
n_use_in = len(reg_in_moments)
n_use_out = len(reg_out_moments)
reg_moments = np.concatenate((reg_in_moments, reg_out_moments))
S_decomp = S_decomp.take(reg_moments, axis=1)
if regularize is not None or n_use_out != n_out:
logger.info(' Using %s/%s harmonic components for %s '
'(%s/%s in, %s/%s out)'
% (n_use_in + n_use_out, n_in + n_out, t_str,
n_use_in, n_in, n_use_out, n_out))
return S_decomp, reg_moments, n_use_in
@verbose
def _get_mf_picks_fix_mags(info, int_order, ext_order, ignore_ref=False,
verbose=None):
"""Pick types for Maxwell filtering and fix magnetometers."""
# Check for T1/T2 mag types
mag_inds_T1T2 = _get_T1T2_mag_inds(info, use_cal=True)
if len(mag_inds_T1T2) > 0:
fix_mag_coil_types(info, use_cal=True)
# Get indices of channels to use in multipolar moment calculation
ref = not ignore_ref
meg_picks = pick_types(info, meg=True, ref_meg=ref, exclude=[])
meg_info = pick_info(_simplify_info(info), meg_picks)
del info
good_mask = np.zeros(len(meg_picks,), bool)
good_mask[pick_types(meg_info, meg=True, ref_meg=ref, exclude='bads')] = 1
n_bases = _get_n_moments([int_order, ext_order]).sum()
if n_bases > good_mask.sum():
raise ValueError('Number of requested bases (%s) exceeds number of '
'good sensors (%s)' % (str(n_bases), good_mask.sum()))
recons = [ch for ch in meg_info['bads']]
if len(recons) > 0:
msg = ' Bad MEG channels being reconstructed: %s' % recons
else:
msg = ' No bad MEG channels'
logger.info(msg)
ref_meg = False if ignore_ref else 'mag'
mag_picks = pick_types(meg_info, meg='mag', ref_meg=ref_meg, exclude=[])
ref_meg = False if ignore_ref else 'grad'
grad_picks = pick_types(meg_info, meg='grad', ref_meg=ref_meg, exclude=[])
assert len(mag_picks) + len(grad_picks) == len(meg_info['ch_names'])
# Determine which are magnetometers for external basis purposes
mag_or_fine = np.zeros(len(meg_picks), bool)
mag_or_fine[mag_picks] = True
# KIT gradiometers are marked as having units T, not T/M (argh)
# We need a separate variable for this because KIT grads should be
# treated mostly like magnetometers (e.g., scaled by 100) for reg
coil_types = np.array([ch['coil_type'] for ch in meg_info['chs']])
mag_or_fine[(coil_types & 0xFFFF) == FIFF.FIFFV_COIL_KIT_GRAD] = False
# The same thing goes for CTF gradiometers...
ctf_grads = [FIFF.FIFFV_COIL_CTF_GRAD,
FIFF.FIFFV_COIL_CTF_REF_GRAD,
FIFF.FIFFV_COIL_CTF_OFFDIAG_REF_GRAD]
mag_or_fine[np.in1d(coil_types, ctf_grads)] = False
msg = (' Processing %s gradiometers and %s magnetometers'
% (len(grad_picks), len(mag_picks)))
n_kit = len(mag_picks) - mag_or_fine.sum()
if n_kit > 0:
msg += ' (of which %s are actually KIT gradiometers)' % n_kit
logger.info(msg)
return meg_picks, mag_picks, grad_picks, good_mask, mag_or_fine
def _check_regularize(regularize):
"""Ensure regularize is valid."""
if not (regularize is None or (isinstance(regularize, str) and
regularize in ('in',))):
raise ValueError('regularize must be None or "in"')
def _check_usable(inst):
"""Ensure our data are clean."""
if inst.proj:
raise RuntimeError('Projectors cannot be applied to data during '
'Maxwell filtering.')
current_comp = inst.compensation_grade
if current_comp not in (0, None):
raise RuntimeError('Maxwell filter cannot be done on compensated '
'channels, but data have been compensated with '
'grade %s.' % current_comp)
def _col_norm_pinv(x):
"""Compute the pinv with column-normalization to stabilize calculation.
Note: will modify/overwrite x.
"""
norm = np.sqrt(np.sum(x * x, axis=0))
x /= norm
u, s, v = _safe_svd(x, full_matrices=False, **check_disable)
v /= norm
return np.dot(v.T * 1. / s, u.T), s
def _sq(x):
"""Square quickly."""
return x * x
def _check_finite(data):
"""Ensure data is finite."""
if not np.isfinite(data).all():
raise RuntimeError('data contains non-finite numbers')
def _sph_harm_norm(order, degree):
"""Compute normalization factor for spherical harmonics."""
# we could use scipy.special.poch(degree + order + 1, -2 * order)
# here, but it's slower for our fairly small degree
norm = np.sqrt((2 * degree + 1.) / (4 * np.pi))
if order != 0:
norm *= np.sqrt(factorial(degree - order) /
float(factorial(degree + order)))
return norm
def _concatenate_sph_coils(coils):
"""Concatenate MEG coil parameters for spherical harmoncs."""
rs = np.concatenate([coil['r0_exey'] for coil in coils])
wcoils = np.concatenate([coil['w'] for coil in coils])
ezs = np.concatenate([np.tile(coil['ez'][np.newaxis, :],
(len(coil['rmag']), 1))
for coil in coils])
bins = np.repeat(np.arange(len(coils)),
[len(coil['rmag']) for coil in coils])
return rs, wcoils, ezs, bins
_mu_0 = 4e-7 * np.pi # magnetic permeability
def _get_mag_mask(coils):
"""Get the coil_scale for Maxwell filtering."""
return np.array([coil['coil_class'] == FWD.COILC_MAG for coil in coils])
def _sss_basis_basic(exp, coils, mag_scale=100., method='standard'):
"""Compute SSS basis using non-optimized (but more readable) algorithms."""
from scipy.special import sph_harm
int_order, ext_order = exp['int_order'], exp['ext_order']
origin = exp['origin']
assert 'extended_proj' not in exp # advanced option not supported
# Compute vector between origin and coil, convert to spherical coords
if method == 'standard':
# Get position, normal, weights, and number of integration pts.
rmags, cosmags, ws, bins = _concatenate_coils(coils)
rmags -= origin
# Convert points to spherical coordinates
rad, az, pol = _cart_to_sph(rmags).T
cosmags *= ws[:, np.newaxis]
del rmags, ws
out_type = np.float64
else: # testing equivalence method
rs, wcoils, ezs, bins = _concatenate_sph_coils(coils)
rs -= origin
rad, az, pol = _cart_to_sph(rs).T
ezs *= wcoils[:, np.newaxis]
del rs, wcoils
out_type = np.complex128
del origin
# Set up output matrices
n_in, n_out = _get_n_moments([int_order, ext_order])
S_tot = np.empty((len(coils), n_in + n_out), out_type)
S_in = S_tot[:, :n_in]
S_out = S_tot[:, n_in:]
coil_scale = np.ones((len(coils), 1))
coil_scale[_get_mag_mask(coils)] = mag_scale
# Compute internal/external basis vectors (exclude degree 0; L/RHS Eq. 5)
for degree in range(1, max(int_order, ext_order) + 1):
# Only loop over positive orders, negative orders are handled
# for efficiency within
for order in range(degree + 1):
S_in_out = list()
grads_in_out = list()
# Same spherical harmonic is used for both internal and external
sph = sph_harm(order, degree, az, pol)
sph_norm = _sph_harm_norm(order, degree)
# Compute complex gradient for all integration points
# in spherical coordinates (Eq. 6). The gradient for rad, az, pol
# is obtained by taking the partial derivative of Eq. 4 w.r.t. each
# coordinate.
az_factor = 1j * order * sph / np.sin(np.maximum(pol, 1e-16))
pol_factor = (-sph_norm * np.sin(pol) * np.exp(1j * order * az) *
_alegendre_deriv(order, degree, np.cos(pol)))
if degree <= int_order:
S_in_out.append(S_in)
in_norm = _mu_0 * rad ** -(degree + 2)
g_rad = in_norm * (-(degree + 1.) * sph)
g_az = in_norm * az_factor
g_pol = in_norm * pol_factor
grads_in_out.append(_sph_to_cart_partials(az, pol,
g_rad, g_az, g_pol))
if degree <= ext_order:
S_in_out.append(S_out)
out_norm = _mu_0 * rad ** (degree - 1)
g_rad = out_norm * degree * sph
g_az = out_norm * az_factor
g_pol = out_norm * pol_factor
grads_in_out.append(_sph_to_cart_partials(az, pol,
g_rad, g_az, g_pol))
for spc, grads in zip(S_in_out, grads_in_out):
# We could convert to real at the end, but it's more efficient
# to do it now
if method == 'standard':
grads_pos_neg = [_sh_complex_to_real(grads, order)]
orders_pos_neg = [order]
# Deal with the negative orders
if order > 0:
# it's faster to use the conjugation property for
# our normalized spherical harmonics than recalculate
grads_pos_neg.append(_sh_complex_to_real(
_sh_negate(grads, order), -order))
orders_pos_neg.append(-order)
for gr, oo in zip(grads_pos_neg, orders_pos_neg):
# Gradients dotted w/integration point weighted normals
gr = einsum('ij,ij->i', gr, cosmags)
vals = np.bincount(bins, gr, len(coils))
spc[:, _deg_ord_idx(degree, oo)] = -vals
else:
grads = einsum('ij,ij->i', grads, ezs)
v = (np.bincount(bins, grads.real, len(coils)) +
1j * np.bincount(bins, grads.imag, len(coils)))
spc[:, _deg_ord_idx(degree, order)] = -v
if order > 0:
spc[:, _deg_ord_idx(degree, -order)] = \
-_sh_negate(v, order)
# Scale magnetometers
S_tot *= coil_scale
if method != 'standard':
# Eventually we could probably refactor this for 2x mem (and maybe CPU)
# savings by changing how spc/S_tot is assigned above (real only)
S_tot = _bases_complex_to_real(S_tot, int_order, ext_order)
return S_tot
def _sss_basis(exp, all_coils):
"""Compute SSS basis for given conditions.
Parameters
----------
exp : dict
Must contain the following keys:
origin : ndarray, shape (3,)
Origin of the multipolar moment space in meters
int_order : int
Order of the internal multipolar moment space
ext_order : int
Order of the external multipolar moment space
coils : list
List of MEG coils. Each should contain coil information dict specifying
position, normals, weights, number of integration points and channel
type. All coil geometry must be in the same coordinate frame
as ``origin`` (``head`` or ``meg``).
Returns
-------
bases : ndarray, shape (n_coils, n_mult_moments)
Internal and external basis sets as a single ndarray.
Notes
-----
Does not incorporate magnetometer scaling factor or normalize spaces.
Adapted from code provided by Jukka Nenonen.
"""
rmags, cosmags, bins, n_coils = all_coils[:4]
int_order, ext_order = exp['int_order'], exp['ext_order']
n_in, n_out = _get_n_moments([int_order, ext_order])
rmags = rmags - exp['origin']
# do the heavy lifting
max_order = max(int_order, ext_order)
L = _tabular_legendre(rmags, max_order)
phi = np.arctan2(rmags[:, 1], rmags[:, 0])
r_n = np.sqrt(np.sum(rmags * rmags, axis=1))
r_xy = np.sqrt(rmags[:, 0] * rmags[:, 0] + rmags[:, 1] * rmags[:, 1])
cos_pol = rmags[:, 2] / r_n # cos(theta); theta 0...pi
sin_pol = np.sqrt(1. - cos_pol * cos_pol) # sin(theta)
z_only = (r_xy <= 1e-16)
sin_pol_nz = sin_pol.copy()
sin_pol_nz[z_only] = 1. # will be overwritten later
r_xy[z_only] = 1.
cos_az = rmags[:, 0] / r_xy # cos(phi)
cos_az[z_only] = 1.
sin_az = rmags[:, 1] / r_xy # sin(phi)
sin_az[z_only] = 0.
# Appropriate vector spherical harmonics terms
# JNE 2012-02-08: modified alm -> 2*alm, blm -> -2*blm
r_nn2 = r_n.copy()
r_nn1 = 1.0 / (r_n * r_n)
S_tot = np.empty((n_coils, n_in + n_out), np.float64)
S_in = S_tot[:, :n_in]
S_out = S_tot[:, n_in:]
for degree in range(max_order + 1):
if degree <= ext_order:
r_nn1 *= r_n # r^(l-1)
if degree <= int_order:
r_nn2 *= r_n # r^(l+2)
# mu_0*sqrt((2l+1)/4pi (l-m)!/(l+m)!)
mult = 2e-7 * np.sqrt((2 * degree + 1) * np.pi)
if degree > 0:
idx = _deg_ord_idx(degree, 0)
# alpha
if degree <= int_order:
b_r = mult * (degree + 1) * L[degree][0] / r_nn2
b_pol = -mult * L[degree][1] / r_nn2
S_in[:, idx] = _integrate_points(
cos_az, sin_az, cos_pol, sin_pol, b_r, 0., b_pol,
cosmags, bins, n_coils)
# beta
if degree <= ext_order:
b_r = -mult * degree * L[degree][0] * r_nn1
b_pol = -mult * L[degree][1] * r_nn1
S_out[:, idx] = _integrate_points(
cos_az, sin_az, cos_pol, sin_pol, b_r, 0., b_pol,
cosmags, bins, n_coils)
for order in range(1, degree + 1):
ord_phi = order * phi
sin_order = np.sin(ord_phi)
cos_order = np.cos(ord_phi)
mult /= np.sqrt((degree - order + 1) * (degree + order))
factor = mult * np.sqrt(2) # equivalence fix (MF uses 2.)
# Real
idx = _deg_ord_idx(degree, order)
r_fact = factor * L[degree][order] * cos_order
az_fact = factor * order * sin_order * L[degree][order]
pol_fact = -factor * (L[degree][order + 1] -
(degree + order) * (degree - order + 1) *
L[degree][order - 1]) * cos_order
# alpha
if degree <= int_order:
b_r = (degree + 1) * r_fact / r_nn2
b_az = az_fact / (sin_pol_nz * r_nn2)
b_az[z_only] = 0.
b_pol = pol_fact / (2 * r_nn2)
S_in[:, idx] = _integrate_points(
cos_az, sin_az, cos_pol, sin_pol, b_r, b_az, b_pol,
cosmags, bins, n_coils)
# beta
if degree <= ext_order:
b_r = -degree * r_fact * r_nn1
b_az = az_fact * r_nn1 / sin_pol_nz
b_az[z_only] = 0.
b_pol = pol_fact * r_nn1 / 2.
S_out[:, idx] = _integrate_points(
cos_az, sin_az, cos_pol, sin_pol, b_r, b_az, b_pol,
cosmags, bins, n_coils)
# Imaginary
idx = _deg_ord_idx(degree, -order)
r_fact = factor * L[degree][order] * sin_order
az_fact = factor * order * cos_order * L[degree][order]
pol_fact = factor * (L[degree][order + 1] -
(degree + order) * (degree - order + 1) *
L[degree][order - 1]) * sin_order
# alpha
if degree <= int_order:
b_r = -(degree + 1) * r_fact / r_nn2
b_az = az_fact / (sin_pol_nz * r_nn2)
b_az[z_only] = 0.
b_pol = pol_fact / (2 * r_nn2)
S_in[:, idx] = _integrate_points(
cos_az, sin_az, cos_pol, sin_pol, b_r, b_az, b_pol,
cosmags, bins, n_coils)
# beta
if degree <= ext_order:
b_r = degree * r_fact * r_nn1
b_az = az_fact * r_nn1 / sin_pol_nz
b_az[z_only] = 0.
b_pol = pol_fact * r_nn1 / 2.
S_out[:, idx] = _integrate_points(
cos_az, sin_az, cos_pol, sin_pol, b_r, b_az, b_pol,
cosmags, bins, n_coils)
return S_tot
def _integrate_points(cos_az, sin_az, cos_pol, sin_pol, b_r, b_az, b_pol,
cosmags, bins, n_coils):
"""Integrate points in spherical coords."""
grads = _sp_to_cart(cos_az, sin_az, cos_pol, sin_pol, b_r, b_az, b_pol).T
grads = (grads * cosmags).sum(axis=1)
return bincount(bins, grads, n_coils)
def _tabular_legendre(r, nind):
"""Compute associated Legendre polynomials."""
r_n = np.sqrt(np.sum(r * r, axis=1))
x = r[:, 2] / r_n # cos(theta)
L = list()
for degree in range(nind + 1):
L.append(np.zeros((degree + 2, len(r))))
L[0][0] = 1.
pnn = np.ones(x.shape)
fact = 1.
sx2 = np.sqrt((1. - x) * (1. + x))
for degree in range(nind + 1):
L[degree][degree] = pnn
pnn *= (-fact * sx2)
fact += 2.
if degree < nind:
L[degree + 1][degree] = x * (2 * degree + 1) * L[degree][degree]
if degree >= 2:
for order in range(degree - 1):
L[degree][order] = (x * (2 * degree - 1) *
L[degree - 1][order] -
(degree + order - 1) *
L[degree - 2][order]) / (degree - order)
return L
def _sp_to_cart(cos_az, sin_az, cos_pol, sin_pol, b_r, b_az, b_pol):
"""Convert spherical coords to cartesian."""
out = np.empty((3,) + sin_pol.shape)
out[0] = sin_pol * cos_az * b_r + cos_pol * cos_az * b_pol - sin_az * b_az
out[1] = sin_pol * sin_az * b_r + cos_pol * sin_az * b_pol + cos_az * b_az
out[2] = cos_pol * b_r - sin_pol * b_pol
return out
def _get_degrees_orders(order):
"""Get the set of degrees used in our basis functions."""
degrees = np.zeros(_get_n_moments(order), int)
orders = np.zeros_like(degrees)
for degree in range(1, order + 1):
# Only loop over positive orders, negative orders are handled
# for efficiency within
for order in range(degree + 1):
ii = _deg_ord_idx(degree, order)
degrees[ii] = degree
orders[ii] = order
ii = _deg_ord_idx(degree, -order)
degrees[ii] = degree
orders[ii] = -order
return degrees, orders
def _alegendre_deriv(order, degree, val):
"""Compute the derivative of the associated Legendre polynomial at a value.
Parameters
----------
order : int
Order of spherical harmonic. (Usually) corresponds to 'm'.
degree : int
Degree of spherical harmonic. (Usually) corresponds to 'l'.
val : float
Value to evaluate the derivative at.
Returns
-------
dPlm : float
Associated Legendre function derivative
"""
from scipy.special import lpmv
assert order >= 0
return (order * val * lpmv(order, degree, val) + (degree + order) *
(degree - order + 1.) * np.sqrt(1. - val * val) *
lpmv(order - 1, degree, val)) / (1. - val * val)
def _bases_complex_to_real(complex_tot, int_order, ext_order):
"""Convert complex spherical harmonics to real."""
n_in, n_out = _get_n_moments([int_order, ext_order])
complex_in = complex_tot[:, :n_in]
complex_out = complex_tot[:, n_in:]
real_tot = np.empty(complex_tot.shape, np.float64)
real_in = real_tot[:, :n_in]
real_out = real_tot[:, n_in:]
for comp, real, exp_order in zip([complex_in, complex_out],
[real_in, real_out],
[int_order, ext_order]):
for deg in range(1, exp_order + 1):
for order in range(deg + 1):
idx_pos = _deg_ord_idx(deg, order)
idx_neg = _deg_ord_idx(deg, -order)
real[:, idx_pos] = _sh_complex_to_real(comp[:, idx_pos], order)
if order != 0:
# This extra mult factor baffles me a bit, but it works
# in round-trip testing, so we'll keep it :(
mult = (-1 if order % 2 == 0 else 1)
real[:, idx_neg] = mult * _sh_complex_to_real(
comp[:, idx_neg], -order)
return real_tot
def _bases_real_to_complex(real_tot, int_order, ext_order):
"""Convert real spherical harmonics to complex."""
n_in, n_out = _get_n_moments([int_order, ext_order])
real_in = real_tot[:, :n_in]
real_out = real_tot[:, n_in:]
comp_tot = np.empty(real_tot.shape, np.complex128)
comp_in = comp_tot[:, :n_in]
comp_out = comp_tot[:, n_in:]
for real, comp, exp_order in zip([real_in, real_out],
[comp_in, comp_out],
[int_order, ext_order]):
for deg in range(1, exp_order + 1):
# only loop over positive orders, figure out neg from pos
for order in range(deg + 1):
idx_pos = _deg_ord_idx(deg, order)
idx_neg = _deg_ord_idx(deg, -order)
this_comp = _sh_real_to_complex([real[:, idx_pos],
real[:, idx_neg]], order)
comp[:, idx_pos] = this_comp
comp[:, idx_neg] = _sh_negate(this_comp, order)
return comp_tot
def _check_info(info, sss=True, tsss=True, calibration=True, ctc=True):
"""Ensure that Maxwell filtering has not been applied yet."""
for ent in info['proc_history']:
for msg, key, doing in (('SSS', 'sss_info', sss),
('tSSS', 'max_st', tsss),
('fine calibration', 'sss_cal', calibration),
('cross-talk cancellation', 'sss_ctc', ctc)):
if not doing:
continue
if len(ent['max_info'][key]) > 0:
raise RuntimeError('Maxwell filtering %s step has already '
'been applied, cannot reapply' % msg)
def _update_sss_info(raw, origin, int_order, ext_order, nchan, coord_frame,
sss_ctc, sss_cal, max_st, reg_moments, st_only,
recon_trans, extended_proj):
"""Update info inplace after Maxwell filtering.
Parameters
----------
raw : instance of mne.io.Raw
Data to be filtered
origin : array-like, shape (3,)
Origin of internal and external multipolar moment space in head coords
(in meters)
int_order : int
Order of internal component of spherical expansion
ext_order : int
Order of external component of spherical expansion
nchan : int
Number of sensors
sss_ctc : dict
The cross talk information.
sss_cal : dict
The calibration information.
max_st : dict
The tSSS information.
reg_moments : ndarray | slice
The moments that were used.
st_only : bool
Whether tSSS only was performed.
recon_trans : instance of Transformation
The reconstruction trans.
extended_proj : ndarray
Extended external bases.
"""
n_in, n_out = _get_n_moments([int_order, ext_order])
raw.info['maxshield'] = False
components = np.zeros(n_in + n_out + len(extended_proj)).astype('int32')
components[reg_moments] = 1
sss_info_dict = dict(in_order=int_order, out_order=ext_order,
nchan=nchan, origin=origin.astype('float32'),
job=FIFF.FIFFV_SSS_JOB_FILTER,
nfree=np.sum(components[:n_in]),
frame=_str_to_frame[coord_frame],
components=components)
max_info_dict = dict(max_st=max_st)
if st_only:
max_info_dict.update(sss_info=dict(), sss_cal=dict(), sss_ctc=dict())
else:
max_info_dict.update(sss_info=sss_info_dict, sss_cal=sss_cal,
sss_ctc=sss_ctc)
# Reset 'bads' for any MEG channels since they've been reconstructed
_reset_meg_bads(raw.info)
# set the reconstruction transform
raw.info['dev_head_t'] = recon_trans
block_id = _generate_meas_id()
raw.info['proc_history'].insert(0, dict(
max_info=max_info_dict, block_id=block_id, date=DATE_NONE,
creator='mne-python v%s' % __version__, experimenter=''))
def _reset_meg_bads(info):
"""Reset MEG bads."""
meg_picks = pick_types(info, meg=True, exclude=[])
info['bads'] = [bad for bad in info['bads']
if info['ch_names'].index(bad) not in meg_picks]
check_disable = dict(check_finite=False)
def _orth_overwrite(A):
"""Create a slightly more efficient 'orth'."""
# adapted from scipy/linalg/decomp_svd.py
u, s = _safe_svd(A, full_matrices=False, **check_disable)[:2]
M, N = A.shape
eps = np.finfo(float).eps
tol = max(M, N) * np.amax(s) * eps
num = np.sum(s > tol, dtype=int)
return u[:, :num]
def _overlap_projector(data_int, data_res, corr):
"""Calculate projector for removal of subspace intersection in tSSS."""
# corr necessary to deal with noise when finding identical signal
# directions in the subspace. See the end of the Results section in
# :footcite:`TauluSimola2006`
# Note that the procedure here is an updated version of
# :footcite:`TauluSimola2006` (and used in MF's tSSS) that uses residuals
# instead of internal/external spaces directly. This provides more degrees
# of freedom when analyzing for intersections between internal and
# external spaces.
# Normalize data, then compute orth to get temporal bases. Matrices
# must have shape (n_samps x effective_rank) when passed into svd
# computation
# we use np.linalg.norm instead of sp.linalg.norm here: ~2x faster!
from scipy import linalg
n = np.linalg.norm(data_int)
n = 1. if n == 0 else n # all-zero data should gracefully continue
data_int = _orth_overwrite((data_int / n).T)
n = np.linalg.norm(data_res)
n = 1. if n == 0 else n
data_res = _orth_overwrite((data_res / n).T)
if data_int.shape[1] == 0 or data_res.shape[1] == 0:
return np.empty((data_int.shape[0], 0))
Q_int = linalg.qr(data_int,
overwrite_a=True, mode='economic', **check_disable)[0].T
Q_res = linalg.qr(data_res,
overwrite_a=True, mode='economic', **check_disable)[0]
C_mat = np.dot(Q_int, Q_res)
del Q_int
# Compute angles between subspace and which bases to keep
S_intersect, Vh_intersect = _safe_svd(C_mat, full_matrices=False,
**check_disable)[1:]
del C_mat
intersect_mask = (S_intersect >= corr)
del S_intersect
# Compute projection operator as (I-LL_T) Eq. 12 in
# :footcite:`TauluSimola2006` V_principal should be shape
# (n_time_pts x n_retained_inds)
Vh_intersect = Vh_intersect[intersect_mask].T
V_principal = np.dot(Q_res, Vh_intersect)
return V_principal
def _prep_fine_cal(info, fine_cal):
from ._fine_cal import read_fine_calibration
_validate_type(fine_cal, (dict, 'path-like'))
if not isinstance(fine_cal, dict):
extra = op.basename(str(fine_cal))
fine_cal = read_fine_calibration(fine_cal)
else:
extra = 'dict'
logger.info(f' Using fine calibration {extra}')
ch_names = _clean_names(info['ch_names'], remove_whitespace=True)
info_to_cal = OrderedDict()
missing = list()
for ci, name in enumerate(fine_cal['ch_names']):
if name not in ch_names:
missing.append(name)
else:
oi = ch_names.index(name)
info_to_cal[oi] = ci
meg_picks = pick_types(info, meg=True, exclude=[])
if len(info_to_cal) != len(meg_picks):
raise RuntimeError(
'Not all MEG channels found in fine calibration file, missing:\n%s'
% sorted(list({ch_names[pick] for pick in meg_picks} -
set(fine_cal['ch_names']))))
if len(missing):
warn('Found cal channel%s not in data: %s' % (_pl(missing), missing))
return info_to_cal, fine_cal, ch_names
def _update_sensor_geometry(info, fine_cal, ignore_ref):
"""Replace sensor geometry information and reorder cal_chs."""
info_to_cal, fine_cal, ch_names = _prep_fine_cal(info, fine_cal)
grad_picks = pick_types(info, meg='grad', exclude=())
mag_picks = pick_types(info, meg='mag', exclude=())
# Determine gradiometer imbalances and magnetometer calibrations
grad_imbalances = np.array([fine_cal['imb_cals'][info_to_cal[gi]]
for gi in grad_picks]).T
if grad_imbalances.shape[0] not in [0, 1, 3]:
raise ValueError('Must have 1 (x) or 3 (x, y, z) point-like ' +
'magnetometers. Currently have %i' %
grad_imbalances.shape[0])
mag_cals = np.array([fine_cal['imb_cals'][info_to_cal[mi]]
for mi in mag_picks])
# Now let's actually construct our point-like adjustment coils for grads
grad_coilsets = _get_grad_point_coilsets(
info, n_types=len(grad_imbalances), ignore_ref=ignore_ref)
calibration = dict(grad_imbalances=grad_imbalances,
grad_coilsets=grad_coilsets, mag_cals=mag_cals)
# Replace sensor locations (and track differences) for fine calibration
ang_shift = list()
used = np.zeros(len(info['chs']), bool)
cal_corrs = list()
cal_chans = list()
adjust_logged = False
for oi, ci in info_to_cal.items():
assert not used[oi]
used[oi] = True
info_ch = info['chs'][oi]
ch_num = int(fine_cal['ch_names'][ci].lstrip('MEG').lstrip('0'))
cal_chans.append([ch_num, info_ch['coil_type']])
# Some .dat files might only rotate EZ, so we must check first that
# EX and EY are orthogonal to EZ. If not, we find the rotation between
# the original and fine-cal ez, and rotate EX and EY accordingly:
ch_coil_rot = _loc_to_coil_trans(info_ch['loc'])[:3, :3]
cal_loc = fine_cal['locs'][ci].copy()
cal_coil_rot = _loc_to_coil_trans(cal_loc)[:3, :3]
if np.max([np.abs(np.dot(cal_coil_rot[:, ii], cal_coil_rot[:, 2]))
for ii in range(2)]) > 1e-6: # X or Y not orthogonal
if not adjust_logged:
logger.info(' Adjusting non-orthogonal EX and EY')
adjust_logged = True
# find the rotation matrix that goes from one to the other
this_trans = _find_vector_rotation(
ch_coil_rot[:, 2], cal_coil_rot[:, 2])
cal_loc[3:] = np.dot(this_trans, ch_coil_rot).T.ravel()
# calculate shift angle
v1 = _loc_to_coil_trans(cal_loc)[:3, :3]
_normalize_vectors(v1)
v2 = _loc_to_coil_trans(info_ch['loc'])[:3, :3]
_normalize_vectors(v2)
ang_shift.append(np.sum(v1 * v2, axis=0))
if oi in grad_picks:
extra = [1., fine_cal['imb_cals'][ci][0]]
else:
extra = [fine_cal['imb_cals'][ci][0], 0.]
cal_corrs.append(np.concatenate([extra, cal_loc]))
# Adjust channel normal orientations with those from fine calibration
# Channel positions are not changed
info_ch['loc'][3:] = cal_loc[3:]
assert (info_ch['coord_frame'] == FIFF.FIFFV_COORD_DEVICE)
meg_picks = pick_types(info, meg=True, exclude=())
assert used[meg_picks].all()
assert not used[np.setdiff1d(np.arange(len(used)), meg_picks)].any()
# This gets written to the Info struct
sss_cal = dict(cal_corrs=np.array(cal_corrs),
cal_chans=np.array(cal_chans))
# Log quantification of sensor changes
# Deal with numerical precision giving absolute vals slightly more than 1.
ang_shift = np.array(ang_shift)
np.clip(ang_shift, -1., 1., ang_shift)
np.rad2deg(np.arccos(ang_shift), ang_shift) # Convert to degrees
logger.info(' Adjusted coil positions by (μ ± σ): '
'%0.1f° ± %0.1f° (max: %0.1f°)' %
(np.mean(ang_shift), np.std(ang_shift),
np.max(np.abs(ang_shift))))
return calibration, sss_cal
def _get_grad_point_coilsets(info, n_types, ignore_ref):
"""Get point-type coilsets for gradiometers."""
_rotations = dict(
x=np.array([[0, 0, 1, 0], [0, 1, 0, 0], [1, 0, 0, 0], [0, 0, 0, 1.]]),
y=np.array([[1, 0, 0, 0], [0, 0, 1, 0], [0, 1, 0, 0], [0, 0, 0, 1.]]),
z=np.eye(4))
grad_coilsets = list()
grad_picks = pick_types(info, meg='grad', exclude=[])
if len(grad_picks) == 0:
return grad_coilsets
grad_info = pick_info(_simplify_info(info), grad_picks)
# Coil_type values for x, y, z point magnetometers
# Note: 1D correction files only have x-direction corrections
for ch in grad_info['chs']:
ch['coil_type'] = FIFF.FIFFV_COIL_POINT_MAGNETOMETER
orig_locs = [ch['loc'].copy() for ch in grad_info['chs']]
for rot in 'xyz'[:n_types]:
# Rotate the Z magnetometer orientation to the destination orientation
for ci, ch in enumerate(grad_info['chs']):
ch['loc'][3:] = _coil_trans_to_loc(np.dot(
_loc_to_coil_trans(orig_locs[ci]),
_rotations[rot]))[3:]
grad_coilsets.append(_prep_mf_coils(grad_info, ignore_ref))
return grad_coilsets
def _sss_basis_point(exp, trans, cal, ignore_ref=False, mag_scale=100.):
"""Compute multipolar moments for point-like mags (in fine cal)."""
# Loop over all coordinate directions desired and create point mags
S_tot = 0.
# These are magnetometers, so use a uniform coil_scale of 100.
this_cs = np.array([mag_scale], float)
for imb, coils in zip(cal['grad_imbalances'], cal['grad_coilsets']):
S_add = _trans_sss_basis(exp, coils, trans, this_cs)
# Scale spaces by gradiometer imbalance
S_add *= imb[:, np.newaxis]
S_tot += S_add
# Return point-like mag bases
return S_tot
def _regularize_out(int_order, ext_order, mag_or_fine, extended_remove):
"""Regularize out components based on norm."""
n_in = _get_n_moments(int_order)
remove_homog = ext_order > 0 and not mag_or_fine.any()
return list(range(n_in, n_in + 3 * remove_homog)) + extended_remove
def _regularize_in(int_order, ext_order, S_decomp, mag_or_fine,
extended_remove):
"""Regularize basis set using idealized SNR measure."""
n_in, n_out = _get_n_moments([int_order, ext_order])
# The "signal" terms depend only on the inner expansion order
# (i.e., not sensor geometry or head position / expansion origin)
a_lm_sq, rho_i = _compute_sphere_activation_in(
np.arange(int_order + 1))
degrees, orders = _get_degrees_orders(int_order)
a_lm_sq = a_lm_sq[degrees]
I_tots = np.zeros(n_in) # we might not traverse all, so use np.zeros
in_keepers = list(range(n_in))
out_removes = _regularize_out(int_order, ext_order, mag_or_fine,
extended_remove)
out_keepers = list(np.setdiff1d(np.arange(n_in, S_decomp.shape[1]),
out_removes))
remove_order = []
S_decomp = S_decomp.copy()
use_norm = np.sqrt(np.sum(S_decomp * S_decomp, axis=0))
S_decomp /= use_norm
eigs = np.zeros((n_in, 2))
# plot = False # for debugging
# if plot:
# import matplotlib.pyplot as plt
# fig, axs = plt.subplots(3, figsize=[6, 12])
# plot_ord = np.empty(n_in, int)
# plot_ord.fill(-1)
# count = 0
# # Reorder plot to match MF
# for degree in range(1, int_order + 1):
# for order in range(0, degree + 1):
# assert plot_ord[count] == -1
# plot_ord[count] = _deg_ord_idx(degree, order)
# count += 1
# if order > 0:
# assert plot_ord[count] == -1
# plot_ord[count] = _deg_ord_idx(degree, -order)
# count += 1
# assert count == n_in
# assert (plot_ord >= 0).all()
# assert len(np.unique(plot_ord)) == n_in
noise_lev = 5e-13 # noise level in T/m
noise_lev *= noise_lev # effectively what would happen by earlier multiply
for ii in range(n_in):
this_S = S_decomp.take(in_keepers + out_keepers, axis=1)
u, s, v = _safe_svd(this_S, full_matrices=False, **check_disable)
del this_S
eigs[ii] = s[[0, -1]]
v = v.T[:len(in_keepers)]
v /= use_norm[in_keepers][:, np.newaxis]
eta_lm_sq = np.dot(v * 1. / s, u.T)
del u, s, v
eta_lm_sq *= eta_lm_sq
eta_lm_sq = eta_lm_sq.sum(axis=1)
eta_lm_sq *= noise_lev
# Mysterious scale factors to match MF, likely due to differences
# in the basis normalizations...
eta_lm_sq[orders[in_keepers] == 0] *= 2
eta_lm_sq *= 0.0025
snr = a_lm_sq[in_keepers] / eta_lm_sq
I_tots[ii] = 0.5 * np.log2(snr + 1.).sum()
remove_order.append(in_keepers[np.argmin(snr)])
in_keepers.pop(in_keepers.index(remove_order[-1]))
# heuristic to quit if we're past the peak to save cycles
if ii > 10 and (I_tots[ii - 1:ii + 1] < 0.95 * I_tots.max()).all():
break
# if plot and ii == 0:
# axs[0].semilogy(snr[plot_ord[in_keepers]], color='k')
# if plot:
# axs[0].set(ylabel='SNR', ylim=[0.1, 500], xlabel='Component')
# axs[1].plot(I_tots)
# axs[1].set(ylabel='Information', xlabel='Iteration')
# axs[2].plot(eigs[:, 0] / eigs[:, 1])
# axs[2].set(ylabel='Condition', xlabel='Iteration')
# Pick the components that give at least 98% of max info
# This is done because the curves can be quite flat, and we err on the
# side of including rather than excluding components
max_info = np.max(I_tots)
lim_idx = np.where(I_tots >= 0.98 * max_info)[0][0]
in_removes = remove_order[:lim_idx]
for ii, ri in enumerate(in_removes):
logger.debug(' Condition %0.3f/%0.3f = %03.1f, '
'Removing in component %s: l=%s, m=%+0.0f'
% (tuple(eigs[ii]) + (eigs[ii, 0] / eigs[ii, 1],
ri, degrees[ri], orders[ri])))
logger.debug(' Resulting information: %0.1f bits/sample '
'(%0.1f%% of peak %0.1f)'
% (I_tots[lim_idx], 100 * I_tots[lim_idx] / max_info,
max_info))
return in_removes, out_removes
def _compute_sphere_activation_in(degrees):
u"""Compute the "in" power from random currents in a sphere.
Parameters
----------
degrees : ndarray
The degrees to evaluate.
Returns
-------
a_power : ndarray
The a_lm associated for the associated degrees (see
:footcite:`KnuutilaEtAl1993`).
rho_i : float
The current density.
References
----------
.. footbibliography::
"""
r_in = 0.080 # radius of the randomly-activated sphere
# set the observation point r=r_s, az=el=0, so we can just look at m=0 term
# compute the resulting current density rho_i
# This is the "surface" version of the equation:
# b_r_in = 100e-15 # fixed radial field amplitude at distance r_s = 100 fT
# r_s = 0.13 # 5 cm from the surface
# rho_degrees = np.arange(1, 100)
# in_sum = (rho_degrees * (rho_degrees + 1.) /
# ((2. * rho_degrees + 1.)) *
# (r_in / r_s) ** (2 * rho_degrees + 2)).sum() * 4. * np.pi
# rho_i = b_r_in * 1e7 / np.sqrt(in_sum)
# rho_i = 5.21334885574e-07 # value for r_s = 0.125
rho_i = 5.91107375632e-07 # deterministic from above, so just store it
a_power = _sq(rho_i) * (degrees * r_in ** (2 * degrees + 4) /
(_sq(2. * degrees + 1.) *
(degrees + 1.)))
return a_power, rho_i
def _trans_sss_basis(exp, all_coils, trans=None, coil_scale=100.):
"""Compute SSS basis (optionally) using a dev<->head trans."""
if trans is not None:
if not isinstance(trans, Transform):
trans = Transform('meg', 'head', trans)
assert not np.isnan(trans['trans']).any()
all_coils = (apply_trans(trans, all_coils[0]),
apply_trans(trans, all_coils[1], move=False),
) + all_coils[2:]
if not isinstance(coil_scale, np.ndarray):
# Scale all magnetometers (with `coil_class` == 1.0) by `mag_scale`
cs = coil_scale
coil_scale = np.ones((all_coils[3], 1))
coil_scale[all_coils[4]] = cs
S_tot = _sss_basis(exp, all_coils)
S_tot *= coil_scale
return S_tot
# intentionally omitted: st_duration, st_correlation, destination, st_fixed,
# st_only
@verbose
def find_bad_channels_maxwell(
raw, limit=7., duration=5., min_count=5, return_scores=False,
origin='auto', int_order=8, ext_order=3, calibration=None,
cross_talk=None, coord_frame='head', regularize='in', ignore_ref=False,
bad_condition='error', head_pos=None, mag_scale=100.,
skip_by_annotation=('edge', 'bad_acq_skip'), h_freq=40.0,
extended_proj=(), verbose=None):
r"""Find bad channels using Maxwell filtering.
Parameters
----------
raw : instance of Raw
Raw data to process.
limit : float
Detection limit for noisy segments (default is 7.). Smaller values will
find more bad channels at increased risk of including good ones. This
value can be interpreted as the standard score of differences between
the original and Maxwell-filtered data. See the ``Notes`` section for
details.
.. note:: This setting only concerns *noisy* channel detection.
The limit for *flat* channel detection currently cannot be
controlled by the user. Flat channel detection is always run
before noisy channel detection.
duration : float
Duration of the segments into which to slice the data for processing,
in seconds. Default is 5.
min_count : int
Minimum number of times a channel must show up as bad in a chunk.
Default is 5.
return_scores : bool
If ``True``, return a dictionary with scoring information for each
evaluated segment of the data. Default is ``False``.
.. warning:: This feature is experimental and may change in a future
version of MNE-Python without prior notice. Please
report any problems and enhancement proposals to the
developers.
.. versionadded:: 0.21
%(maxwell_origin)s
%(maxwell_int)s
%(maxwell_ext)s
%(maxwell_cal)s
%(maxwell_cross)s
%(maxwell_coord)s
%(maxwell_reg)s
%(maxwell_ref)s
%(maxwell_cond)s
%(maxwell_pos)s
%(maxwell_mag)s
%(maxwell_skip)s
h_freq : float | None
The cutoff frequency (in Hz) of the low-pass filter that will be
applied before processing the data. This defaults to ``40.``, which
should provide similar results to MaxFilter. If you do not wish to
apply a filter, set this to ``None``.
%(maxwell_extended)s
%(verbose)s
Returns
-------
noisy_chs : list
List of bad MEG channels that were automatically detected as being
noisy among the good MEG channels.
flat_chs : list
List of MEG channels that were detected as being flat in at least
``min_count`` segments.
scores : dict
A dictionary with information produced by the scoring algorithms.
Only returned when ``return_scores`` is ``True``. It contains the
following keys:
- ``ch_names`` : ndarray, shape (n_meg,)
The names of the MEG channels. Their order corresponds to the
order of rows in the ``scores`` and ``limits`` arrays.
- ``ch_types`` : ndarray, shape (n_meg,)
The types of the MEG channels in ``ch_names`` (``'mag'``,
``'grad'``).
- ``bins`` : ndarray, shape (n_windows, 2)
The inclusive window boundaries (start and stop; in seconds) used
to calculate the scores.
- ``scores_flat`` : ndarray, shape (n_meg, n_windows)
The scores for testing whether MEG channels are flat. These values
correspond to the standard deviation of a segment.
See the ``Notes`` section for details.
- ``limits_flat`` : ndarray, shape (n_meg, 1)
The score thresholds (in standard deviation) above which a segment
was classified as "flat".
- ``scores_noisy`` : ndarray, shape (n_meg, n_windows)
The scores for testing whether MEG channels are noisy. These values
correspond to the standard score of a segment.
See the ``Notes`` section for details.
- ``limits_noisy`` : ndarray, shape (n_meg, 1)
The score thresholds (in standard scores) above which a segment was
classified as "noisy".
.. note:: The scores and limits for channels marked as ``bad`` in the
input data will be set to ``np.nan``.
See Also
--------
annotate_flat
maxwell_filter
Notes
-----
All arguments after ``raw``, ``limit``, ``duration``, ``min_count``, and
``return_scores`` are the same as :func:`~maxwell_filter`, except that the
following are not allowed in this function because they are unused:
``st_duration``, ``st_correlation``, ``destination``, ``st_fixed``, and
``st_only``.
This algorithm, for a given chunk of data:
1. Runs SSS on the data, without removing external components.
2. Excludes channels as *flat* that have had low variability
(standard deviation < 0.01 fT or fT/cm in a 30 ms window) in the given
or any previous chunk.
3. For each channel :math:`k`, computes the *range* or peak-to-peak
:math:`d_k` of the difference between the reconstructed and original
data.
4. Computes the average :math:`\mu_d` and standard deviation
:math:`\sigma_d` of the differences (after scaling magnetometer data
to roughly match the scale of the gradiometer data using ``mag_scale``).
5. Marks channels as bad for the chunk when
:math:`d_k > \mu_d + \textrm{limit} \times \sigma_d`. Note that this
expression can be easily transformed into
:math:`(d_k - \mu_d) / \sigma_d > \textrm{limit}`, which is equivalent
to :math:`z(d_k) > \textrm{limit}`, with :math:`z(d_k)` being the
standard or z-score of the difference.
Data are processed in chunks of the given ``duration``, and channels that
are bad for at least ``min_count`` chunks are returned.
Channels marked as *flat* in step 2 are excluded from all subsequent steps
of noisy channel detection.
This algorithm gives results similar to, but not identical with,
MaxFilter. Differences arise because MaxFilter processes on a
buffer-by-buffer basis (using buffer-size-dependent downsampling logic),
uses different filtering characteristics, and possibly other factors.
Channels that are near the ``limit`` for a given ``min_count`` are
particularly susceptible to being different between the two
implementations.
.. versionadded:: 0.20
"""
if h_freq is not None:
if raw.info.get('lowpass') and raw.info['lowpass'] < h_freq:
msg = (f'The input data has already been low-pass filtered with a '
f'{raw.info["lowpass"]} Hz cutoff frequency, which is '
f'below the requested cutoff of {h_freq} Hz. Not applying '
f'low-pass filter.')
logger.info(msg)
else:
logger.info(f'Applying low-pass filter with {h_freq} Hz cutoff '
f'frequency ...')
raw = raw.copy().load_data().filter(l_freq=None, h_freq=h_freq)
limit = float(limit)
onsets, ends = _annotations_starts_stops(
raw, skip_by_annotation, invert=True)
del skip_by_annotation
# operate on chunks
starts = list()
stops = list()
step = int(round(raw.info['sfreq'] * duration))
for onset, end in zip(onsets, ends):
if end - onset >= step:
ss = np.arange(onset, end - step + 1, step)
starts.extend(ss)
ss = ss + step
ss[-1] = end
stops.extend(ss)
min_count = min(_ensure_int(min_count, 'min_count'), len(starts))
logger.info('Scanning for bad channels in %d interval%s (%0.1f sec) ...'
% (len(starts), _pl(starts), step / raw.info['sfreq']))
params = _prep_maxwell_filter(
raw, skip_by_annotation=[], # already accounted for
origin=origin, int_order=int_order, ext_order=ext_order,
calibration=calibration, cross_talk=cross_talk,
coord_frame=coord_frame, regularize=regularize,
ignore_ref=ignore_ref, bad_condition=bad_condition, head_pos=head_pos,
mag_scale=mag_scale, extended_proj=extended_proj)
del origin, int_order, ext_order, calibration, cross_talk, coord_frame
del regularize, ignore_ref, bad_condition, head_pos, mag_scale
good_meg_picks = params['meg_picks'][params['good_mask']]
assert len(params['meg_picks']) == len(params['coil_scale'])
assert len(params['good_mask']) == len(params['meg_picks'])
noisy_chs = Counter()
flat_chs = Counter()
flat_limits = dict(grad=0.01e-13, mag=0.01e-15)
these_limits = np.array([
flat_limits['grad']
if pick in params['grad_picks'] else
flat_limits['mag']
for pick in good_meg_picks])
flat_step = max(20, int(30 * raw.info['sfreq'] / 1000.))
all_flats = set()
# Prepare variables to return if `return_scores=True`.
bins = np.empty((len(starts), 2)) # To store start, stop of each segment
# We create ndarrays with one row per channel, regardless of channel type
# and whether the channel has been marked as "bad" in info or not. This
# makes indexing in the loop easier. We only filter this down to the subset
# of MEG channels after all processing is done.
ch_names = np.array(raw.ch_names)
ch_types = np.array(raw.get_channel_types())
scores_flat = np.full((len(ch_names), len(starts)), np.nan)
scores_noisy = np.full_like(scores_flat, fill_value=np.nan)
thresh_flat = np.full((len(ch_names), 1), np.nan)
thresh_noisy = np.full_like(thresh_flat, fill_value=np.nan)
for si, (start, stop) in enumerate(zip(starts, stops)):
n_iter = 0
orig_data = raw.get_data(None, start, stop, verbose=False)
chunk_raw = RawArray(
orig_data, params['info'],
first_samp=raw.first_samp + start, copy='data', verbose=False)
t = chunk_raw.times[[0, -1]] + start / raw.info['sfreq']
logger.info(' Interval %3d: %8.3f - %8.3f'
% ((si + 1,) + tuple(t[[0, -1]])))
# Flat pass: SD < 0.01 fT/cm or 0.01 fT for at 30 ms (or 20 samples)
n = stop - start
flat_stop = n - (n % flat_step)
data = chunk_raw.get_data(good_meg_picks, 0, flat_stop)
data.shape = (data.shape[0], -1, flat_step)
delta = np.std(data, axis=-1).min(-1) # min std across segments
# We may want to return this later if `return_scores=True`.
bins[si, :] = t[0], t[-1]
scores_flat[good_meg_picks, si] = delta
thresh_flat[good_meg_picks] = these_limits.reshape(-1, 1)
chunk_flats = delta < these_limits
chunk_flats = np.where(chunk_flats)[0]
chunk_flats = [raw.ch_names[good_meg_picks[chunk_flat]]
for chunk_flat in chunk_flats]
flat_chs.update(chunk_flats)
all_flats |= set(chunk_flats)
chunk_flats = sorted(all_flats)
these_picks = [pick for pick in good_meg_picks
if raw.ch_names[pick] not in chunk_flats]
# Bad pass
chunk_noisy = list()
params['st_duration'] = int(round(
chunk_raw.times[-1] * raw.info['sfreq']))
for n_iter in range(1, 101): # iteratively exclude the worst ones
assert set(raw.info['bads']) & set(chunk_noisy) == set()
params['good_mask'][:] = [
chunk_raw.ch_names[pick] not in
raw.info['bads'] + chunk_noisy + chunk_flats
for pick in params['meg_picks']]
chunk_raw._data[:] = orig_data
delta = chunk_raw.get_data(these_picks)
with use_log_level(False):
_run_maxwell_filter(
chunk_raw, reconstruct='orig', copy=False, **params)
if n_iter == 1 and len(chunk_flats):
logger.info(' Flat (%2d): %s'
% (len(chunk_flats), ' '.join(chunk_flats)))
delta -= chunk_raw.get_data(these_picks)
# p2p
range_ = np.ptp(delta, axis=-1)
cs_picks = np.searchsorted(params['meg_picks'], these_picks)
range_ *= params['coil_scale'][cs_picks, 0]
mean, std = np.mean(range_), np.std(range_)
# z score
z = (range_ - mean) / std
idx = np.argmax(z)
max_ = z[idx]
# We may want to return this later if `return_scores=True`.
scores_noisy[these_picks, si] = z
thresh_noisy[these_picks] = limit
if max_ < limit:
break
name = raw.ch_names[these_picks[idx]]
logger.debug(' Bad: %s %0.1f'
% (name, max_))
these_picks.pop(idx)
chunk_noisy.append(name)
noisy_chs.update(chunk_noisy)
noisy_chs = sorted((b for b, c in noisy_chs.items() if c >= min_count),
key=lambda x: raw.ch_names.index(x))
flat_chs = sorted((f for f, c in flat_chs.items() if c >= min_count),
key=lambda x: raw.ch_names.index(x))
# Only include MEG channels.
ch_names = ch_names[params['meg_picks']]
ch_types = ch_types[params['meg_picks']]
scores_flat = scores_flat[params['meg_picks']]
thresh_flat = thresh_flat[params['meg_picks']]
scores_noisy = scores_noisy[params['meg_picks']]
thresh_noisy = thresh_noisy[params['meg_picks']]
logger.info(' Static bad channels: %s' % (noisy_chs,))
logger.info(' Static flat channels: %s' % (flat_chs,))
logger.info('[done]')
if return_scores:
scores = dict(ch_names=ch_names,
ch_types=ch_types,
bins=bins,
scores_flat=scores_flat,
limits_flat=thresh_flat,
scores_noisy=scores_noisy,
limits_noisy=thresh_noisy)
return noisy_chs, flat_chs, scores
else:
return noisy_chs, flat_chs
def _read_cross_talk(cross_talk, ch_names):
sss_ctc = dict()
ctc = None
if cross_talk is not None:
sss_ctc = _read_ctc(cross_talk)
ctc_chs = sss_ctc['proj_items_chs']
# checking for extra space ambiguity in channel names
# between old and new fif files
if ch_names[0] not in ctc_chs:
ctc_chs = _clean_names(ctc_chs, remove_whitespace=True)
ch_names = _clean_names(ch_names, remove_whitespace=True)
missing = sorted(list(set(ch_names) - set(ctc_chs)))
if len(missing) != 0:
raise RuntimeError('Missing MEG channels in cross-talk matrix:\n%s'
% missing)
missing = sorted(list(set(ctc_chs) - set(ch_names)))
if len(missing) > 0:
warn('Not all cross-talk channels in raw:\n%s' % missing)
ctc_picks = [ctc_chs.index(name) for name in ch_names]
ctc = sss_ctc['decoupler'][ctc_picks][:, ctc_picks]
# I have no idea why, but MF transposes this for storage..
sss_ctc['decoupler'] = sss_ctc['decoupler'].T.tocsc()
return ctc, sss_ctc
@verbose
def compute_maxwell_basis(info, origin='auto', int_order=8, ext_order=3,
calibration=None, coord_frame='head',
regularize='in', ignore_ref=True,
bad_condition='error', mag_scale=100.,
extended_proj=(), verbose=None):
r"""Compute the SSS basis for a given measurement info structure.
Parameters
----------
info : instance of Info
The measurement info.
%(maxwell_origin)s
%(maxwell_int)s
%(maxwell_ext)s
%(maxwell_cal)s
%(maxwell_coord)s
%(maxwell_reg)s
%(maxwell_ref)s
%(maxwell_cond)s
%(maxwell_mag)s
%(maxwell_extended)s
%(verbose)s
Returns
-------
S : ndarray, shape (n_meg, n_moments)
The basis that can be used to reconstruct the data.
pS : ndarray, shape (n_moments, n_good_meg)
The (stabilized) pseudoinverse of the S array.
reg_moments : ndarray, shape (n_moments,)
The moments that were kept after regularization.
n_use_in : int
The number of kept moments that were in the internal space.
Notes
-----
This outputs variants of :math:`\mathbf{S}` and :math:`\mathbf{S^\dagger}`
from equations 27 and 37 of :footcite:`TauluKajola2005` with the coil scale
for magnetometers already factored in so that the resulting denoising
transform of the data to obtain :math:`\hat{\phi}_{in}` from equation
38 would be::
phi_in = S[:, :n_use_in] @ pS[:n_use_in] @ data_meg_good
.. versionadded:: 0.23
References
----------
.. footbibliography::
"""
from ..io import RawArray
_validate_type(info, Info, 'info')
raw = RawArray(np.zeros((len(info['ch_names']), 1)), info.copy(),
verbose=False)
logger.info('Computing Maxwell basis')
params = _prep_maxwell_filter(
raw=raw, origin=origin, int_order=int_order, ext_order=ext_order,
calibration=calibration, coord_frame=coord_frame, destination=None,
regularize=regularize, ignore_ref=ignore_ref,
bad_condition=bad_condition, mag_scale=mag_scale,
extended_proj=extended_proj)
_, S_decomp_full, pS_decomp, reg_moments, n_use_in = \
params['_get_this_decomp_trans'](info['dev_head_t'], t=0.)
return S_decomp_full, pS_decomp, reg_moments, n_use_in
| bsd-3-clause |
NixaSoftware/CVis | venv/lib/python2.7/site-packages/pandas/core/sparse/frame.py | 3 | 35083 | """
Data structures for sparse float data. Life is made simpler by dealing only
with float64 data
"""
from __future__ import division
# pylint: disable=E1101,E1103,W0231,E0202
import warnings
from pandas.compat import lmap
from pandas import compat
import numpy as np
from pandas.core.dtypes.missing import isna, notna
from pandas.core.dtypes.cast import maybe_upcast, find_common_type
from pandas.core.dtypes.common import _ensure_platform_int, is_scipy_sparse
from pandas.core.common import _try_sort
from pandas.compat.numpy import function as nv
from pandas.core.index import Index, MultiIndex, _ensure_index
from pandas.core.series import Series
from pandas.core.frame import (DataFrame, extract_index, _prep_ndarray,
_default_index)
import pandas.core.algorithms as algos
from pandas.core.internals import (BlockManager,
create_block_manager_from_arrays)
import pandas.core.generic as generic
from pandas.core.sparse.series import SparseSeries, SparseArray
from pandas._libs.sparse import BlockIndex, get_blocks
from pandas.util._decorators import Appender
import pandas.core.ops as ops
_shared_doc_kwargs = dict(klass='SparseDataFrame')
class SparseDataFrame(DataFrame):
"""
DataFrame containing sparse floating point data in the form of SparseSeries
objects
Parameters
----------
data : same types as can be passed to DataFrame or scipy.sparse.spmatrix
index : array-like, optional
column : array-like, optional
default_kind : {'block', 'integer'}, default 'block'
Default sparse kind for converting Series to SparseSeries. Will not
override SparseSeries passed into constructor
default_fill_value : float
Default fill_value for converting Series to SparseSeries
(default: nan). Will not override SparseSeries passed in.
"""
_subtyp = 'sparse_frame'
def __init__(self, data=None, index=None, columns=None, default_kind=None,
default_fill_value=None, dtype=None, copy=False):
# pick up the defaults from the Sparse structures
if isinstance(data, SparseDataFrame):
if index is None:
index = data.index
if columns is None:
columns = data.columns
if default_fill_value is None:
default_fill_value = data.default_fill_value
if default_kind is None:
default_kind = data.default_kind
elif isinstance(data, (SparseSeries, SparseArray)):
if index is None:
index = data.index
if default_fill_value is None:
default_fill_value = data.fill_value
if columns is None and hasattr(data, 'name'):
columns = [data.name]
if columns is None:
raise Exception("cannot pass a series w/o a name or columns")
data = {columns[0]: data}
if default_fill_value is None:
default_fill_value = np.nan
if default_kind is None:
default_kind = 'block'
self._default_kind = default_kind
self._default_fill_value = default_fill_value
if is_scipy_sparse(data):
mgr = self._init_spmatrix(data, index, columns, dtype=dtype,
fill_value=default_fill_value)
elif isinstance(data, dict):
mgr = self._init_dict(data, index, columns, dtype=dtype)
elif isinstance(data, (np.ndarray, list)):
mgr = self._init_matrix(data, index, columns, dtype=dtype)
elif isinstance(data, SparseDataFrame):
mgr = self._init_mgr(data._data,
dict(index=index, columns=columns),
dtype=dtype, copy=copy)
elif isinstance(data, DataFrame):
mgr = self._init_dict(data, data.index, data.columns, dtype=dtype)
elif isinstance(data, BlockManager):
mgr = self._init_mgr(data, axes=dict(index=index, columns=columns),
dtype=dtype, copy=copy)
elif data is None:
data = DataFrame()
if index is None:
index = Index([])
else:
index = _ensure_index(index)
if columns is None:
columns = Index([])
else:
for c in columns:
data[c] = SparseArray(np.nan, index=index,
kind=self._default_kind,
fill_value=self._default_fill_value)
mgr = to_manager(data, columns, index)
if dtype is not None:
mgr = mgr.astype(dtype)
generic.NDFrame.__init__(self, mgr)
@property
def _constructor(self):
return SparseDataFrame
_constructor_sliced = SparseSeries
def _init_dict(self, data, index, columns, dtype=None):
# pre-filter out columns if we passed it
if columns is not None:
columns = _ensure_index(columns)
data = dict((k, v) for k, v in compat.iteritems(data)
if k in columns)
else:
columns = Index(_try_sort(list(data.keys())))
if index is None:
index = extract_index(list(data.values()))
sp_maker = lambda x: SparseArray(x, kind=self._default_kind,
fill_value=self._default_fill_value,
copy=True, dtype=dtype)
sdict = {}
for k, v in compat.iteritems(data):
if isinstance(v, Series):
# Force alignment, no copy necessary
if not v.index.equals(index):
v = v.reindex(index)
if not isinstance(v, SparseSeries):
v = sp_maker(v.values)
elif isinstance(v, SparseArray):
v = v.copy()
else:
if isinstance(v, dict):
v = [v.get(i, np.nan) for i in index]
v = sp_maker(v)
sdict[k] = v
# TODO: figure out how to handle this case, all nan's?
# add in any other columns we want to have (completeness)
nan_arr = np.empty(len(index), dtype='float64')
nan_arr.fill(np.nan)
nan_arr = sp_maker(nan_arr)
sdict.update((c, nan_arr) for c in columns if c not in sdict)
return to_manager(sdict, columns, index)
def _init_matrix(self, data, index, columns, dtype=None):
""" Init self from ndarray or list of lists """
data = _prep_ndarray(data, copy=False)
index, columns = self._prep_index(data, index, columns)
data = dict([(idx, data[:, i]) for i, idx in enumerate(columns)])
return self._init_dict(data, index, columns, dtype)
def _init_spmatrix(self, data, index, columns, dtype=None,
fill_value=None):
""" Init self from scipy.sparse matrix """
index, columns = self._prep_index(data, index, columns)
data = data.tocoo()
N = len(index)
# Construct a dict of SparseSeries
sdict = {}
values = Series(data.data, index=data.row, copy=False)
for col, rowvals in values.groupby(data.col):
# get_blocks expects int32 row indices in sorted order
rowvals = rowvals.sort_index()
rows = rowvals.index.values.astype(np.int32)
blocs, blens = get_blocks(rows)
sdict[columns[col]] = SparseSeries(
rowvals.values, index=index,
fill_value=fill_value,
sparse_index=BlockIndex(N, blocs, blens))
# Add any columns that were empty and thus not grouped on above
sdict.update({column: SparseSeries(index=index,
fill_value=fill_value,
sparse_index=BlockIndex(N, [], []))
for column in columns
if column not in sdict})
return self._init_dict(sdict, index, columns, dtype)
def _prep_index(self, data, index, columns):
N, K = data.shape
if index is None:
index = _default_index(N)
if columns is None:
columns = _default_index(K)
if len(columns) != K:
raise ValueError('Column length mismatch: {columns} vs. {K}'
.format(columns=len(columns), K=K))
if len(index) != N:
raise ValueError('Index length mismatch: {index} vs. {N}'
.format(index=len(index), N=N))
return index, columns
def to_coo(self):
"""
Return the contents of the frame as a sparse SciPy COO matrix.
.. versionadded:: 0.20.0
Returns
-------
coo_matrix : scipy.sparse.spmatrix
If the caller is heterogeneous and contains booleans or objects,
the result will be of dtype=object. See Notes.
Notes
-----
The dtype will be the lowest-common-denominator type (implicit
upcasting); that is to say if the dtypes (even of numeric types)
are mixed, the one that accommodates all will be chosen.
e.g. If the dtypes are float16 and float32, dtype will be upcast to
float32. By numpy.find_common_type convention, mixing int64 and
and uint64 will result in a float64 dtype.
"""
try:
from scipy.sparse import coo_matrix
except ImportError:
raise ImportError('Scipy is not installed')
dtype = find_common_type(self.dtypes)
cols, rows, datas = [], [], []
for col, name in enumerate(self):
s = self[name]
row = s.sp_index.to_int_index().indices
cols.append(np.repeat(col, len(row)))
rows.append(row)
datas.append(s.sp_values.astype(dtype, copy=False))
cols = np.concatenate(cols)
rows = np.concatenate(rows)
datas = np.concatenate(datas)
return coo_matrix((datas, (rows, cols)), shape=self.shape)
def __array_wrap__(self, result):
return self._constructor(
result, index=self.index, columns=self.columns,
default_kind=self._default_kind,
default_fill_value=self._default_fill_value).__finalize__(self)
def __getstate__(self):
# pickling
return dict(_typ=self._typ, _subtyp=self._subtyp, _data=self._data,
_default_fill_value=self._default_fill_value,
_default_kind=self._default_kind)
def _unpickle_sparse_frame_compat(self, state):
""" original pickle format """
series, cols, idx, fv, kind = state
if not isinstance(cols, Index): # pragma: no cover
from pandas.io.pickle import _unpickle_array
columns = _unpickle_array(cols)
else:
columns = cols
if not isinstance(idx, Index): # pragma: no cover
from pandas.io.pickle import _unpickle_array
index = _unpickle_array(idx)
else:
index = idx
series_dict = DataFrame()
for col, (sp_index, sp_values) in compat.iteritems(series):
series_dict[col] = SparseSeries(sp_values, sparse_index=sp_index,
fill_value=fv)
self._data = to_manager(series_dict, columns, index)
self._default_fill_value = fv
self._default_kind = kind
def to_dense(self):
"""
Convert to dense DataFrame
Returns
-------
df : DataFrame
"""
data = dict((k, v.to_dense()) for k, v in compat.iteritems(self))
return DataFrame(data, index=self.index, columns=self.columns)
def _apply_columns(self, func):
""" get new SparseDataFrame applying func to each columns """
new_data = {}
for col, series in compat.iteritems(self):
new_data[col] = func(series)
return self._constructor(
data=new_data, index=self.index, columns=self.columns,
default_fill_value=self.default_fill_value).__finalize__(self)
def astype(self, dtype):
return self._apply_columns(lambda x: x.astype(dtype))
def copy(self, deep=True):
"""
Make a copy of this SparseDataFrame
"""
result = super(SparseDataFrame, self).copy(deep=deep)
result._default_fill_value = self._default_fill_value
result._default_kind = self._default_kind
return result
@property
def default_fill_value(self):
return self._default_fill_value
@property
def default_kind(self):
return self._default_kind
@property
def density(self):
"""
Ratio of non-sparse points to total (dense) data points
represented in the frame
"""
tot_nonsparse = sum([ser.sp_index.npoints
for _, ser in compat.iteritems(self)])
tot = len(self.index) * len(self.columns)
return tot_nonsparse / float(tot)
def fillna(self, value=None, method=None, axis=0, inplace=False,
limit=None, downcast=None):
new_self = super(SparseDataFrame,
self).fillna(value=value, method=method, axis=axis,
inplace=inplace, limit=limit,
downcast=downcast)
if not inplace:
self = new_self
# set the fill value if we are filling as a scalar with nothing special
# going on
if (value is not None and value == value and method is None and
limit is None):
self._default_fill_value = value
if not inplace:
return self
# ----------------------------------------------------------------------
# Support different internal representation of SparseDataFrame
def _sanitize_column(self, key, value, **kwargs):
"""
Creates a new SparseArray from the input value.
Parameters
----------
key : object
value : scalar, Series, or array-like
kwargs : dict
Returns
-------
sanitized_column : SparseArray
"""
sp_maker = lambda x, index=None: SparseArray(
x, index=index, fill_value=self._default_fill_value,
kind=self._default_kind)
if isinstance(value, SparseSeries):
clean = value.reindex(self.index).as_sparse_array(
fill_value=self._default_fill_value, kind=self._default_kind)
elif isinstance(value, SparseArray):
if len(value) != len(self.index):
raise AssertionError('Length of values does not match '
'length of index')
clean = value
elif hasattr(value, '__iter__'):
if isinstance(value, Series):
clean = value.reindex(self.index)
if not isinstance(value, SparseSeries):
clean = sp_maker(clean)
else:
if len(value) != len(self.index):
raise AssertionError('Length of values does not match '
'length of index')
clean = sp_maker(value)
# Scalar
else:
clean = sp_maker(value, self.index)
# always return a SparseArray!
return clean
def __getitem__(self, key):
"""
Retrieve column or slice from DataFrame
"""
if isinstance(key, slice):
date_rng = self.index[key]
return self.reindex(date_rng)
elif isinstance(key, (np.ndarray, list, Series)):
return self._getitem_array(key)
else:
return self._get_item_cache(key)
def get_value(self, index, col, takeable=False):
"""
Quickly retrieve single value at passed column and index
.. deprecated:: 0.21.0
Please use .at[] or .iat[] accessors.
Parameters
----------
index : row label
col : column label
takeable : interpret the index/col as indexers, default False
Returns
-------
value : scalar value
"""
warnings.warn("get_value is deprecated and will be removed "
"in a future release. Please use "
".at[] or .iat[] accessors instead", FutureWarning,
stacklevel=2)
return self._get_value(index, col, takeable=takeable)
def _get_value(self, index, col, takeable=False):
if takeable is True:
series = self._iget_item_cache(col)
else:
series = self._get_item_cache(col)
return series._get_value(index, takeable=takeable)
_get_value.__doc__ = get_value.__doc__
def set_value(self, index, col, value, takeable=False):
"""
Put single value at passed column and index
.. deprecated:: 0.21.0
Please use .at[] or .iat[] accessors.
Parameters
----------
index : row label
col : column label
value : scalar value
takeable : interpret the index/col as indexers, default False
Notes
-----
This method *always* returns a new object. It is currently not
particularly efficient (and potentially very expensive) but is provided
for API compatibility with DataFrame
Returns
-------
frame : DataFrame
"""
warnings.warn("set_value is deprecated and will be removed "
"in a future release. Please use "
".at[] or .iat[] accessors instead", FutureWarning,
stacklevel=2)
return self._set_value(index, col, value, takeable=takeable)
def _set_value(self, index, col, value, takeable=False):
dense = self.to_dense()._set_value(
index, col, value, takeable=takeable)
return dense.to_sparse(kind=self._default_kind,
fill_value=self._default_fill_value)
_set_value.__doc__ = set_value.__doc__
def _slice(self, slobj, axis=0, kind=None):
if axis == 0:
new_index = self.index[slobj]
new_columns = self.columns
else:
new_index = self.index
new_columns = self.columns[slobj]
return self.reindex(index=new_index, columns=new_columns)
def xs(self, key, axis=0, copy=False):
"""
Returns a row (cross-section) from the SparseDataFrame as a Series
object.
Parameters
----------
key : some index contained in the index
Returns
-------
xs : Series
"""
if axis == 1:
data = self[key]
return data
i = self.index.get_loc(key)
data = self.take([i]).get_values()[0]
return Series(data, index=self.columns)
# ----------------------------------------------------------------------
# Arithmetic-related methods
def _combine_frame(self, other, func, fill_value=None, level=None,
try_cast=True):
this, other = self.align(other, join='outer', level=level, copy=False)
new_index, new_columns = this.index, this.columns
if level is not None:
raise NotImplementedError("'level' argument is not supported")
if self.empty and other.empty:
return self._constructor(index=new_index).__finalize__(self)
new_data = {}
new_fill_value = None
if fill_value is not None:
# TODO: be a bit more intelligent here
for col in new_columns:
if col in this and col in other:
dleft = this[col].to_dense()
dright = other[col].to_dense()
result = dleft._binop(dright, func, fill_value=fill_value)
result = result.to_sparse(fill_value=this[col].fill_value)
new_data[col] = result
else:
for col in new_columns:
if col in this and col in other:
new_data[col] = func(this[col], other[col])
# if the fill values are the same use them? or use a valid one
other_fill_value = getattr(other, 'default_fill_value', np.nan)
if self.default_fill_value == other_fill_value:
new_fill_value = self.default_fill_value
elif np.isnan(self.default_fill_value) and not np.isnan(
other_fill_value):
new_fill_value = other_fill_value
elif not np.isnan(self.default_fill_value) and np.isnan(
other_fill_value):
new_fill_value = self.default_fill_value
return self._constructor(data=new_data, index=new_index,
columns=new_columns,
default_fill_value=new_fill_value
).__finalize__(self)
def _combine_match_index(self, other, func, level=None, fill_value=None,
try_cast=True):
new_data = {}
if fill_value is not None:
raise NotImplementedError("'fill_value' argument is not supported")
if level is not None:
raise NotImplementedError("'level' argument is not supported")
new_index = self.index.union(other.index)
this = self
if self.index is not new_index:
this = self.reindex(new_index)
if other.index is not new_index:
other = other.reindex(new_index)
for col, series in compat.iteritems(this):
new_data[col] = func(series.values, other.values)
# fill_value is a function of our operator
if isna(other.fill_value) or isna(self.default_fill_value):
fill_value = np.nan
else:
fill_value = func(np.float64(self.default_fill_value),
np.float64(other.fill_value))
return self._constructor(
new_data, index=new_index, columns=self.columns,
default_fill_value=fill_value).__finalize__(self)
def _combine_match_columns(self, other, func, level=None, fill_value=None,
try_cast=True):
# patched version of DataFrame._combine_match_columns to account for
# NumPy circumventing __rsub__ with float64 types, e.g.: 3.0 - series,
# where 3.0 is numpy.float64 and series is a SparseSeries. Still
# possible for this to happen, which is bothersome
if fill_value is not None:
raise NotImplementedError("'fill_value' argument is not supported")
if level is not None:
raise NotImplementedError("'level' argument is not supported")
new_data = {}
union = intersection = self.columns
if not union.equals(other.index):
union = other.index.union(self.columns)
intersection = other.index.intersection(self.columns)
for col in intersection:
new_data[col] = func(self[col], float(other[col]))
return self._constructor(
new_data, index=self.index, columns=union,
default_fill_value=self.default_fill_value).__finalize__(self)
def _combine_const(self, other, func, errors='raise', try_cast=True):
return self._apply_columns(lambda x: func(x, other))
def _reindex_index(self, index, method, copy, level, fill_value=np.nan,
limit=None, takeable=False):
if level is not None:
raise TypeError('Reindex by level not supported for sparse')
if self.index.equals(index):
if copy:
return self.copy()
else:
return self
if len(self.index) == 0:
return self._constructor(
index=index, columns=self.columns).__finalize__(self)
indexer = self.index.get_indexer(index, method, limit=limit)
indexer = _ensure_platform_int(indexer)
mask = indexer == -1
need_mask = mask.any()
new_series = {}
for col, series in self.iteritems():
if mask.all():
continue
values = series.values
# .take returns SparseArray
new = values.take(indexer)
if need_mask:
new = new.values
# convert integer to float if necessary. need to do a lot
# more than that, handle boolean etc also
new, fill_value = maybe_upcast(new, fill_value=fill_value)
np.putmask(new, mask, fill_value)
new_series[col] = new
return self._constructor(
new_series, index=index, columns=self.columns,
default_fill_value=self._default_fill_value).__finalize__(self)
def _reindex_columns(self, columns, method, copy, level, fill_value=None,
limit=None, takeable=False):
if level is not None:
raise TypeError('Reindex by level not supported for sparse')
if notna(fill_value):
raise NotImplementedError("'fill_value' argument is not supported")
if limit:
raise NotImplementedError("'limit' argument is not supported")
if method is not None:
raise NotImplementedError("'method' argument is not supported")
# TODO: fill value handling
sdict = dict((k, v) for k, v in compat.iteritems(self) if k in columns)
return self._constructor(
sdict, index=self.index, columns=columns,
default_fill_value=self._default_fill_value).__finalize__(self)
def _reindex_with_indexers(self, reindexers, method=None, fill_value=None,
limit=None, copy=False, allow_dups=False):
if method is not None or limit is not None:
raise NotImplementedError("cannot reindex with a method or limit "
"with sparse")
if fill_value is None:
fill_value = np.nan
index, row_indexer = reindexers.get(0, (None, None))
columns, col_indexer = reindexers.get(1, (None, None))
if columns is None:
columns = self.columns
new_arrays = {}
for col in columns:
if col not in self:
continue
if row_indexer is not None:
new_arrays[col] = algos.take_1d(self[col].get_values(),
row_indexer,
fill_value=fill_value)
else:
new_arrays[col] = self[col]
return self._constructor(new_arrays, index=index,
columns=columns).__finalize__(self)
def _join_compat(self, other, on=None, how='left', lsuffix='', rsuffix='',
sort=False):
if on is not None:
raise NotImplementedError("'on' keyword parameter is not yet "
"implemented")
return self._join_index(other, how, lsuffix, rsuffix)
def _join_index(self, other, how, lsuffix, rsuffix):
if isinstance(other, Series):
if other.name is None:
raise ValueError('Other Series must have a name')
other = SparseDataFrame(
{other.name: other},
default_fill_value=self._default_fill_value)
join_index = self.index.join(other.index, how=how)
this = self.reindex(join_index)
other = other.reindex(join_index)
this, other = this._maybe_rename_join(other, lsuffix, rsuffix)
from pandas import concat
return concat([this, other], axis=1, verify_integrity=True)
def _maybe_rename_join(self, other, lsuffix, rsuffix):
to_rename = self.columns.intersection(other.columns)
if len(to_rename) > 0:
if not lsuffix and not rsuffix:
raise ValueError('columns overlap but no suffix specified: '
'{to_rename}'.format(to_rename=to_rename))
def lrenamer(x):
if x in to_rename:
return '{x}{lsuffix}'.format(x=x, lsuffix=lsuffix)
return x
def rrenamer(x):
if x in to_rename:
return '{x}{rsuffix}'.format(x=x, rsuffix=rsuffix)
return x
this = self.rename(columns=lrenamer)
other = other.rename(columns=rrenamer)
else:
this = self
return this, other
def transpose(self, *args, **kwargs):
"""
Returns a DataFrame with the rows/columns switched.
"""
nv.validate_transpose(args, kwargs)
return self._constructor(
self.values.T, index=self.columns, columns=self.index,
default_fill_value=self._default_fill_value,
default_kind=self._default_kind).__finalize__(self)
T = property(transpose)
@Appender(DataFrame.count.__doc__)
def count(self, axis=0, **kwds):
if axis is None:
axis = self._stat_axis_number
return self.apply(lambda x: x.count(), axis=axis)
def cumsum(self, axis=0, *args, **kwargs):
"""
Return SparseDataFrame of cumulative sums over requested axis.
Parameters
----------
axis : {0, 1}
0 for row-wise, 1 for column-wise
Returns
-------
y : SparseDataFrame
"""
nv.validate_cumsum(args, kwargs)
if axis is None:
axis = self._stat_axis_number
return self.apply(lambda x: x.cumsum(), axis=axis)
@Appender(generic._shared_docs['isna'])
def isna(self):
return self._apply_columns(lambda x: x.isna())
isnull = isna
@Appender(generic._shared_docs['notna'])
def notna(self):
return self._apply_columns(lambda x: x.notna())
notnull = notna
def apply(self, func, axis=0, broadcast=False, reduce=False):
"""
Analogous to DataFrame.apply, for SparseDataFrame
Parameters
----------
func : function
Function to apply to each column
axis : {0, 1, 'index', 'columns'}
broadcast : bool, default False
For aggregation functions, return object of same size with values
propagated
Returns
-------
applied : Series or SparseDataFrame
"""
if not len(self.columns):
return self
axis = self._get_axis_number(axis)
if isinstance(func, np.ufunc):
new_series = {}
for k, v in compat.iteritems(self):
applied = func(v)
applied.fill_value = func(v.fill_value)
new_series[k] = applied
return self._constructor(
new_series, index=self.index, columns=self.columns,
default_fill_value=self._default_fill_value,
default_kind=self._default_kind).__finalize__(self)
else:
if not broadcast:
return self._apply_standard(func, axis, reduce=reduce)
else:
return self._apply_broadcast(func, axis)
def applymap(self, func):
"""
Apply a function to a DataFrame that is intended to operate
elementwise, i.e. like doing map(func, series) for each series in the
DataFrame
Parameters
----------
func : function
Python function, returns a single value from a single value
Returns
-------
applied : DataFrame
"""
return self.apply(lambda x: lmap(func, x))
def to_manager(sdf, columns, index):
""" create and return the block manager from a dataframe of series,
columns, index
"""
# from BlockManager perspective
axes = [_ensure_index(columns), _ensure_index(index)]
return create_block_manager_from_arrays(
[sdf[c] for c in columns], columns, axes)
def stack_sparse_frame(frame):
"""
Only makes sense when fill_value is NaN
"""
lengths = [s.sp_index.npoints for _, s in compat.iteritems(frame)]
nobs = sum(lengths)
# this is pretty fast
minor_labels = np.repeat(np.arange(len(frame.columns)), lengths)
inds_to_concat = []
vals_to_concat = []
# TODO: Figure out whether this can be reached.
# I think this currently can't be reached because you can't build a
# SparseDataFrame with a non-np.NaN fill value (fails earlier).
for _, series in compat.iteritems(frame):
if not np.isnan(series.fill_value):
raise TypeError('This routine assumes NaN fill value')
int_index = series.sp_index.to_int_index()
inds_to_concat.append(int_index.indices)
vals_to_concat.append(series.sp_values)
major_labels = np.concatenate(inds_to_concat)
stacked_values = np.concatenate(vals_to_concat)
index = MultiIndex(levels=[frame.index, frame.columns],
labels=[major_labels, minor_labels],
verify_integrity=False)
lp = DataFrame(stacked_values.reshape((nobs, 1)), index=index,
columns=['foo'])
return lp.sort_index(level=0)
def homogenize(series_dict):
"""
Conform a set of SparseSeries (with NaN fill_value) to a common SparseIndex
corresponding to the locations where they all have data
Parameters
----------
series_dict : dict or DataFrame
Notes
-----
Using the dumbest algorithm I could think of. Should put some more thought
into this
Returns
-------
homogenized : dict of SparseSeries
"""
index = None
need_reindex = False
for _, series in compat.iteritems(series_dict):
if not np.isnan(series.fill_value):
raise TypeError('this method is only valid with NaN fill values')
if index is None:
index = series.sp_index
elif not series.sp_index.equals(index):
need_reindex = True
index = index.intersect(series.sp_index)
if need_reindex:
output = {}
for name, series in compat.iteritems(series_dict):
if not series.sp_index.equals(index):
series = series.sparse_reindex(index)
output[name] = series
else:
output = series_dict
return output
# use unaccelerated ops for sparse objects
ops.add_flex_arithmetic_methods(SparseDataFrame, use_numexpr=False,
**ops.frame_flex_funcs)
ops.add_special_arithmetic_methods(SparseDataFrame, use_numexpr=False,
**ops.frame_special_funcs)
| apache-2.0 |
gfyoung/pandas | pandas/compat/pickle_compat.py | 1 | 7934 | """
Support pre-0.12 series pickle compatibility.
"""
from __future__ import annotations
import contextlib
import copy
import io
import pickle as pkl
from typing import TYPE_CHECKING, Optional
import warnings
from pandas._libs.tslibs import BaseOffset
from pandas import Index
if TYPE_CHECKING:
from pandas import DataFrame, Series
def load_reduce(self):
stack = self.stack
args = stack.pop()
func = stack[-1]
if len(args) and type(args[0]) is type:
n = args[0].__name__ # noqa
try:
stack[-1] = func(*args)
return
except TypeError as err:
# If we have a deprecated function,
# try to replace and try again.
msg = "_reconstruct: First argument must be a sub-type of ndarray"
if msg in str(err):
try:
cls = args[0]
stack[-1] = object.__new__(cls)
return
except TypeError:
pass
elif args and issubclass(args[0], BaseOffset):
# TypeError: object.__new__(Day) is not safe, use Day.__new__()
cls = args[0]
stack[-1] = cls.__new__(*args)
return
raise
_sparse_msg = """\
Loading a saved '{cls}' as a {new} with sparse values.
'{cls}' is now removed. You should re-save this dataset in its new format.
"""
class _LoadSparseSeries:
# To load a SparseSeries as a Series[Sparse]
# https://github.com/python/mypy/issues/1020
# error: Incompatible return type for "__new__" (returns "Series", but must return
# a subtype of "_LoadSparseSeries")
def __new__(cls) -> Series: # type: ignore[misc]
from pandas import Series
warnings.warn(
_sparse_msg.format(cls="SparseSeries", new="Series"),
FutureWarning,
stacklevel=6,
)
return Series(dtype=object)
class _LoadSparseFrame:
# To load a SparseDataFrame as a DataFrame[Sparse]
# https://github.com/python/mypy/issues/1020
# error: Incompatible return type for "__new__" (returns "DataFrame", but must
# return a subtype of "_LoadSparseFrame")
def __new__(cls) -> DataFrame: # type: ignore[misc]
from pandas import DataFrame
warnings.warn(
_sparse_msg.format(cls="SparseDataFrame", new="DataFrame"),
FutureWarning,
stacklevel=6,
)
return DataFrame()
# If classes are moved, provide compat here.
_class_locations_map = {
("pandas.core.sparse.array", "SparseArray"): ("pandas.core.arrays", "SparseArray"),
# 15477
("pandas.core.base", "FrozenNDArray"): ("numpy", "ndarray"),
("pandas.core.indexes.frozen", "FrozenNDArray"): ("numpy", "ndarray"),
("pandas.core.base", "FrozenList"): ("pandas.core.indexes.frozen", "FrozenList"),
# 10890
("pandas.core.series", "TimeSeries"): ("pandas.core.series", "Series"),
("pandas.sparse.series", "SparseTimeSeries"): (
"pandas.core.sparse.series",
"SparseSeries",
),
# 12588, extensions moving
("pandas._sparse", "BlockIndex"): ("pandas._libs.sparse", "BlockIndex"),
("pandas.tslib", "Timestamp"): ("pandas._libs.tslib", "Timestamp"),
# 18543 moving period
("pandas._period", "Period"): ("pandas._libs.tslibs.period", "Period"),
("pandas._libs.period", "Period"): ("pandas._libs.tslibs.period", "Period"),
# 18014 moved __nat_unpickle from _libs.tslib-->_libs.tslibs.nattype
("pandas.tslib", "__nat_unpickle"): (
"pandas._libs.tslibs.nattype",
"__nat_unpickle",
),
("pandas._libs.tslib", "__nat_unpickle"): (
"pandas._libs.tslibs.nattype",
"__nat_unpickle",
),
# 15998 top-level dirs moving
("pandas.sparse.array", "SparseArray"): (
"pandas.core.arrays.sparse",
"SparseArray",
),
("pandas.sparse.series", "SparseSeries"): (
"pandas.compat.pickle_compat",
"_LoadSparseSeries",
),
("pandas.sparse.frame", "SparseDataFrame"): (
"pandas.core.sparse.frame",
"_LoadSparseFrame",
),
("pandas.indexes.base", "_new_Index"): ("pandas.core.indexes.base", "_new_Index"),
("pandas.indexes.base", "Index"): ("pandas.core.indexes.base", "Index"),
("pandas.indexes.numeric", "Int64Index"): (
"pandas.core.indexes.numeric",
"Int64Index",
),
("pandas.indexes.range", "RangeIndex"): ("pandas.core.indexes.range", "RangeIndex"),
("pandas.indexes.multi", "MultiIndex"): ("pandas.core.indexes.multi", "MultiIndex"),
("pandas.tseries.index", "_new_DatetimeIndex"): (
"pandas.core.indexes.datetimes",
"_new_DatetimeIndex",
),
("pandas.tseries.index", "DatetimeIndex"): (
"pandas.core.indexes.datetimes",
"DatetimeIndex",
),
("pandas.tseries.period", "PeriodIndex"): (
"pandas.core.indexes.period",
"PeriodIndex",
),
# 19269, arrays moving
("pandas.core.categorical", "Categorical"): ("pandas.core.arrays", "Categorical"),
# 19939, add timedeltaindex, float64index compat from 15998 move
("pandas.tseries.tdi", "TimedeltaIndex"): (
"pandas.core.indexes.timedeltas",
"TimedeltaIndex",
),
("pandas.indexes.numeric", "Float64Index"): (
"pandas.core.indexes.numeric",
"Float64Index",
),
("pandas.core.sparse.series", "SparseSeries"): (
"pandas.compat.pickle_compat",
"_LoadSparseSeries",
),
("pandas.core.sparse.frame", "SparseDataFrame"): (
"pandas.compat.pickle_compat",
"_LoadSparseFrame",
),
}
# our Unpickler sub-class to override methods and some dispatcher
# functions for compat and uses a non-public class of the pickle module.
# error: Name 'pkl._Unpickler' is not defined
class Unpickler(pkl._Unpickler): # type: ignore[name-defined]
def find_class(self, module, name):
# override superclass
key = (module, name)
module, name = _class_locations_map.get(key, key)
return super().find_class(module, name)
Unpickler.dispatch = copy.copy(Unpickler.dispatch)
Unpickler.dispatch[pkl.REDUCE[0]] = load_reduce
def load_newobj(self):
args = self.stack.pop()
cls = self.stack[-1]
# compat
if issubclass(cls, Index):
obj = object.__new__(cls)
else:
obj = cls.__new__(cls, *args)
self.stack[-1] = obj
Unpickler.dispatch[pkl.NEWOBJ[0]] = load_newobj
def load_newobj_ex(self):
kwargs = self.stack.pop()
args = self.stack.pop()
cls = self.stack.pop()
# compat
if issubclass(cls, Index):
obj = object.__new__(cls)
else:
obj = cls.__new__(cls, *args, **kwargs)
self.append(obj)
try:
Unpickler.dispatch[pkl.NEWOBJ_EX[0]] = load_newobj_ex
except (AttributeError, KeyError):
pass
def load(fh, encoding: Optional[str] = None, is_verbose: bool = False):
"""
Load a pickle, with a provided encoding,
Parameters
----------
fh : a filelike object
encoding : an optional encoding
is_verbose : show exception output
"""
try:
fh.seek(0)
if encoding is not None:
up = Unpickler(fh, encoding=encoding)
else:
up = Unpickler(fh)
up.is_verbose = is_verbose
return up.load()
except (ValueError, TypeError):
raise
def loads(
bytes_object: bytes,
*,
fix_imports: bool = True,
encoding: str = "ASCII",
errors: str = "strict",
):
"""
Analogous to pickle._loads.
"""
fd = io.BytesIO(bytes_object)
return Unpickler(
fd, fix_imports=fix_imports, encoding=encoding, errors=errors
).load()
@contextlib.contextmanager
def patch_pickle():
"""
Temporarily patch pickle to use our unpickler.
"""
orig_loads = pkl.loads
try:
setattr(pkl, "loads", loads)
yield
finally:
setattr(pkl, "loads", orig_loads)
| bsd-3-clause |
Sentient07/scikit-learn | examples/decomposition/plot_pca_vs_fa_model_selection.py | 70 | 4523 | """
===============================================================
Model selection with Probabilistic PCA and Factor Analysis (FA)
===============================================================
Probabilistic PCA and Factor Analysis are probabilistic models.
The consequence is that the likelihood of new data can be used
for model selection and covariance estimation.
Here we compare PCA and FA with cross-validation on low rank data corrupted
with homoscedastic noise (noise variance
is the same for each feature) or heteroscedastic noise (noise variance
is the different for each feature). In a second step we compare the model
likelihood to the likelihoods obtained from shrinkage covariance estimators.
One can observe that with homoscedastic noise both FA and PCA succeed
in recovering the size of the low rank subspace. The likelihood with PCA
is higher than FA in this case. However PCA fails and overestimates
the rank when heteroscedastic noise is present. Under appropriate
circumstances the low rank models are more likely than shrinkage models.
The automatic estimation from
Automatic Choice of Dimensionality for PCA. NIPS 2000: 598-604
by Thomas P. Minka is also compared.
"""
# Authors: Alexandre Gramfort
# Denis A. Engemann
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
from scipy import linalg
from sklearn.decomposition import PCA, FactorAnalysis
from sklearn.covariance import ShrunkCovariance, LedoitWolf
from sklearn.model_selection import cross_val_score
from sklearn.model_selection import GridSearchCV
print(__doc__)
###############################################################################
# Create the data
n_samples, n_features, rank = 1000, 50, 10
sigma = 1.
rng = np.random.RandomState(42)
U, _, _ = linalg.svd(rng.randn(n_features, n_features))
X = np.dot(rng.randn(n_samples, rank), U[:, :rank].T)
# Adding homoscedastic noise
X_homo = X + sigma * rng.randn(n_samples, n_features)
# Adding heteroscedastic noise
sigmas = sigma * rng.rand(n_features) + sigma / 2.
X_hetero = X + rng.randn(n_samples, n_features) * sigmas
###############################################################################
# Fit the models
n_components = np.arange(0, n_features, 5) # options for n_components
def compute_scores(X):
pca = PCA(svd_solver='full')
fa = FactorAnalysis()
pca_scores, fa_scores = [], []
for n in n_components:
pca.n_components = n
fa.n_components = n
pca_scores.append(np.mean(cross_val_score(pca, X)))
fa_scores.append(np.mean(cross_val_score(fa, X)))
return pca_scores, fa_scores
def shrunk_cov_score(X):
shrinkages = np.logspace(-2, 0, 30)
cv = GridSearchCV(ShrunkCovariance(), {'shrinkage': shrinkages})
return np.mean(cross_val_score(cv.fit(X).best_estimator_, X))
def lw_score(X):
return np.mean(cross_val_score(LedoitWolf(), X))
for X, title in [(X_homo, 'Homoscedastic Noise'),
(X_hetero, 'Heteroscedastic Noise')]:
pca_scores, fa_scores = compute_scores(X)
n_components_pca = n_components[np.argmax(pca_scores)]
n_components_fa = n_components[np.argmax(fa_scores)]
pca = PCA(svd_solver='full', n_components='mle')
pca.fit(X)
n_components_pca_mle = pca.n_components_
print("best n_components by PCA CV = %d" % n_components_pca)
print("best n_components by FactorAnalysis CV = %d" % n_components_fa)
print("best n_components by PCA MLE = %d" % n_components_pca_mle)
plt.figure()
plt.plot(n_components, pca_scores, 'b', label='PCA scores')
plt.plot(n_components, fa_scores, 'r', label='FA scores')
plt.axvline(rank, color='g', label='TRUTH: %d' % rank, linestyle='-')
plt.axvline(n_components_pca, color='b',
label='PCA CV: %d' % n_components_pca, linestyle='--')
plt.axvline(n_components_fa, color='r',
label='FactorAnalysis CV: %d' % n_components_fa,
linestyle='--')
plt.axvline(n_components_pca_mle, color='k',
label='PCA MLE: %d' % n_components_pca_mle, linestyle='--')
# compare with other covariance estimators
plt.axhline(shrunk_cov_score(X), color='violet',
label='Shrunk Covariance MLE', linestyle='-.')
plt.axhline(lw_score(X), color='orange',
label='LedoitWolf MLE' % n_components_pca_mle, linestyle='-.')
plt.xlabel('nb of components')
plt.ylabel('CV scores')
plt.legend(loc='lower right')
plt.title(title)
plt.show()
| bsd-3-clause |
jwass/mplleaflet | examples/quiver.py | 2 | 1885 | import json
import os
import matplotlib.pyplot as plt
import numpy as np
import pyproj
import mplleaflet
# Load up the geojson data
filename = os.path.join(os.path.dirname(__file__), 'data', 'track.geojson')
with open(filename) as f:
gj = json.load(f)
features = [feat for feat in gj['features'][::10]]
xy = np.array([feat['geometry']['coordinates'] for feat in features])
# Transform the data to EPSG:26986 (Mass. state plane)
proj_in = pyproj.Proj(preserve_units=True, init='epsg:4326', no_defs=True)
crs_out = {'init': 'epsg:26986', 'no_defs': True}
proj_out = pyproj.Proj(preserve_units=True, **crs_out)
xy = np.array([pyproj.transform(proj_in, proj_out, c[0], c[1]) for c in xy])
# Grab the speed (m/s)
speed = np.array([feat['properties']['speed'] for feat in features])
# Grab the course. Course is 0 degrees due North, increasing clockwise
course = np.array([feat['properties']['course'] for feat in features])
angle = np.deg2rad(-course + 90) # Convert to angle in xy plane
# Normalize the speed to use as the length of the arrows
r = speed / max(speed)
uv = r[:, np.newaxis] * np.column_stack([np.cos(angle), np.sin(angle)])
# For each point, plot an arrow pointing in the direction of the iPhone's
# course estimate. The arrow length is proportional to the phone's speed
# estimate. For a bigger effect, color each other based on its speed
plt.quiver(xy[:,0], xy[:,1], uv[:,0], uv[:,1], speed)
root, ext = os.path.splitext(__file__)
mapfile = root + '.html'
# Create the map
mplleaflet.show(path=mapfile, crs=crs_out, tiles=('https://api.mapbox.com/styles/v1/jwasserman/cir51iqda0010bmnic1s5sb71/tiles/256/{z}/{x}/{y}?access_token=pk.eyJ1Ijoiandhc3Nlcm1hbiIsImEiOiJjaW9kNnRiaXUwNGh0dmFrajlqZ25wZnFsIn0.CU4YynqRJkmG1PwWDMBJSA', '<a href="https://mapbox.com/about/maps">© 2017 Mapbox</a> | <a href=https://www.openstreetmap.org/about">© OpenStreetMap</a>'))
| bsd-3-clause |
cdsi/grima | src/python/grima/plot2.py | 1 | 16620 | from __future__ import division
from __future__ import with_statement
# standard python libraries
try:
import json
except:
import simplejson as json
import re
import os
import time
# matplotlib.sf.net
from matplotlib.figure import Figure
from matplotlib.backends.backend_gtkagg import FigureCanvasGTKAgg as FigureCanvas
from matplotlib.backends.backend_gtkagg import NavigationToolbar2GTKAgg as NavigationToolbar
from mpl_toolkits.mplot3d import Axes3D
# www.gtk.org
import gtk
import gobject
gobject.threads_init()
# our own libraries
from elrond.static import *
from elrond.ui import Widget, Playable, SaveAs
from elrond.util import APINotImplemented, Object, Property
class SubPlot(Widget):
@Property
def axl():
def fget(self):
return self.__axes['axl']
def fset(self, value):
pass
return locals()
@Property
def axr():
def fget(self):
return self.__axes['axr']
def fset(self, value):
pass
return locals()
def __set_title(self, axes, title):
if title:
axes.set_title(title)
def __set_limits(self, axes, xlimits, ylimits):
axes.axis('auto')
if xlimits[0] or xlimits[1]:
axes.set_xlim(xlimits)
if ylimits[0] or ylimits[1]:
axes.set_ylim(ylimits)
def __set_labels(self, axes, xlabel, ylabel):
if xlabel:
axes.set_xlabel(xlabel)
if ylabel:
axes.set_ylabel(ylabel)
def __plot__(self, axes, xlabel, ylabel):
if not self.overlay:
self.clear()
self.__set_labels(axes, xlabel, ylabel)
axes.grid(True)
def plotl(self, x, y, xlabel=None, ylabel=None, style='-', color=0xFF0000,\
mec='r', mfc='None', mew=1, ms=6, linewidth=1, label=None, picker=None):
axes = self.__axes['axl']
self.__plot__(axes, xlabel, ylabel)
pl = axes.plot(x, y, style, color='#%06X' % (color), mec=mec,
mfc=mfc, mew=mew, ms=ms, linewidth=linewidth, label=label, picker=picker)
def plotlh(self, y, xlabel=None, ylabel=None, style='--', color=0xFF0000):
axes = self.__axes['axl']
self.__plot__(axes, xlabel, ylabel)
axes.axhline(y, ls=style, color='#%06X' % (color))
def plotlv(self, x, xlabel=None, ylabel=None, style='--', color=0xFF0000):
axes = self.__axes['axl']
self.__plot__(axes, xlabel, ylabel)
axes.axvline(x, ls=style, color='#%06X' % (color))
def text(self, x, y, s, fontsize=12, backgroundcolor='w', va='bottom', ha='left', transform=False):
axes=self.__axes['axl']
if transform:
axes.text(x, y, s, fontsize=fontsize, backgroundcolor=backgroundcolor, \
va=va, ha=ha, transform = axes.transAxes)
else:
axes.text(x, y, s, fontsize=fontsize, backgroundcolor=backgroundcolor, \
va=va, ha=ha)
def grid(self, grid):
axes=self.__axes['axl']
axes.grid(grid)
def annotate(self, text, xy, family, va):
axes=self.__axes['axl']
axes.annotate(text, xy, family=family, va=va)
def blankticks(self):
axes = self.__axes['axl']
axes.set_xticklabels([], visible=False)
axes.set_yticklabels([], visible=False)
def addcollection(self, collection):
axes = self.__axes['axl']
axes.add_collection(collection)
def get_limits(self):
axes = self.__axes['axl']
return axes.get_xlim() + axes.get_ylim()
@APINotImplemented
def plotr(self, x, y, xlabel=None, ylabel=None, style='-', color=0xFF0000):
axes = self.__axes['axr']
self.__plot__(axes, xlabel, ylabel)
axes.plot(x, y, style, color='#%06X' % (color))
@APINotImplemented
def plotrh(self, y, xlabel=None, ylabel=None, style='--', color=0xFF0000):
axes = self.__axes['axr']
self.__plot__(axes, xlabel, ylabel)
axes.axhline(y, ls=style, color='#%06X' % (color))
@APINotImplemented
def plotrv(self, x, xlabel=None, ylabel=None, style='--', color=0xFF0000):
axes = self.__axes['axr']
self.__plot__(axes, xlabel, ylabel)
axes.axvline(x, ls=style, color='#%06X' % (color))
def reset(self, nsubplots, i):
axl = self.__axes['axl']
axr = self.__axes['axr']
axl.grid(True)
axl.yaxis.set_label_position('left')
axl.yaxis.tick_left()
# TODO: axr.grid(True)
# TODO: axr.yaxis.set_label_position('right')
# TODO: axr.yaxis.tick_right()
axl.change_geometry(nsubplots, 1, nsubplots - i)
def clear(self):
self.__axes['axl'].clear()
# TODO: self.__axes['axr'].clear()
def draw(self):
axl = self.__axes['axl']
self.__set_limits(axl, self.xlimitsl, self.ylimitsl)
self.__set_labels(axl, self.xlabel, self.ylabel)
self.__set_title(axl, self.title)
# TODO: axr = self.__axes['axr']
# TODO: self.__set_limits(axr, self.xlimitsr, self.ylimitsr)
# TODO: self.__set_labels(axr, self.xlabel, self.ylabel)
gobject.idle_add(self.__canvas.draw)
def axes_new(self, figure, canvas, nsubplots):
axl = figure.add_subplot(nsubplots + 1, 1, nsubplots + 1)
axr = None # TODO: axl.twinx()
self.__axes = {'axl': axl, 'axr': axr}
self.__canvas = canvas
def axes_delete(self, figure):
figure.delaxes(self.__axes['axl'])
def __init__(self):
Widget.__init__(self)
self.title = None
self.xlimitsl = [0, 0]
self.xlimitsr = [0, 0]
self.ylimitsl = [0, 0]
self.ylimitsr = [0, 0]
self.xlabel = ''
self.ylabel = ''
self.overlay = True
class StripChart(SubPlot, Playable):
def __tasklette(self, producer, interval=1, duration=60):
self.xlimitsl[0] = 0
self.xlimitsl[1] = duration
x = [0, 0]
y = [0, 0]
offset = time.time()
for data in producer():
elapsed = time.time() - offset
if elapsed > duration:
self.xlimitsl[0] += elapsed - x[1]
self.xlimitsl[1] += elapsed - x[1]
x[1] = elapsed
for __y in data:
y[1] = __y
gtk.gdk.threads_enter()
self.plotl(x, y)
self.draw()
gtk.gdk.threads_leave()
y[0] = y[1]
x[0] = x[1]
time.sleep(interval)
def __init__(self):
SubPlot.__init__(self)
Playable.__init__(self, self.__tasklette)
class Plot(Widget):
@Property
def overlay():
def fget(self):
return self.prefs['overlay'].enabled
def fset(self, overlay):
for plotable in self.__plotables:
plotable.overlay = overlay
widget = self.builder.get_object('overlay__enabled')
widget.set_active(overlay)
return locals()
@Property
def canvas():
def fget(self):
return self.__canvas
return locals()
@Property
def toolbar():
def fget(self):
return self.__toolbar
return locals()
def __reset(self):
nplotables = len(self.__plotables)
for i, plotable in enumerate(self.__plotables):
plotable.reset(nplotables, i)
self.__figure.subplots_adjust(hspace = 0.5)
def __plotable_new(self, plotable):
plotable.axes_new(self.__figure, self.__canvas, len(self.__plotables))
self.__plotables.append(plotable)
self.__reset()
return plotable
def __plotable_delete(self, plotable):
plotable.axes_delete(self.__figure)
self.__plotables.remove(plotable)
self.__reset()
def subplot_new(self):
return self.__plotable_new(SubPlot())
def subplot_delete(self, plotable):
self.__plotable_delete(plotable)
def stripchart_new(self):
return self.__plotable_new(StripChart())
def stripchart_delete(self, stripchart):
self.__plotable_delete(stripchart)
def plot3d_new(self):
return self.__plotable_new(Plot3D())
def plot3d_delete(self, plot3d):
self.__plotable_delete(plot3d)
def __save(self, filename):
if not filename:
return
self.__filename = filename
with open(self.__filename, 'w') as fd:
fd.write(self.__buffer.get_text(*self.__buffer.get_bounds()))
def clear(self):
for plotable in self.__plotables:
plotable.clear()
self.__reset()
self.draw()
def show(self):
self.widget.show()
self.__canvas.show()
self.__toolbar.show()
def hide(self):
self.__toolbar.hide()
self.__canvas.hide()
self.widget.hide()
def on_open(self, widget):
pass
def on_save(self, widget):
if self.__filename:
self.__save(self.__filename)
else:
self.__chooser.get_selection()
def on_saveas(self, widget):
self.__chooser.get_selection(filename=self.__filename)
def on_clear(self, widget):
self.clear()
def get_toolbar(self):
return self.__toolbar
def __init__(self):
Widget.__init__(self)
path = os.environ['GRIMA_ETC']
name = 'grima-subplot-widget'
self.loadui(path, name)
self.loaddb(path, name)
self.__figure = Figure()
self.__canvas = FigureCanvas(self.__figure)
self.__toolbar = NavigationToolbar(self.__canvas, None)
self.__canvas.show()
self.figure = self.__figure
widget = gtk.VBox()
widget.show()
widget.pack_start(self.__canvas)
widget.pack_start(self.__toolbar, False, False)
container = self.builder.get_object('container')
container.add(widget)
self.__filename = None
self.__plotables = []
self.__chooser = SaveAs()
self.__chooser.deletable = False
self.__chooser.embedded = True
self.__chooser.callback = self.__save
class PlotApp(Widget):
def __init__(self, *args, **kwargs):
Widget.__init__(self, *args, **kwargs)
path = os.environ['GRIMA_ETC']
name = 'grima-subplot-app'
self.loadui(path, name)
self.loaddb(path, name)
class Plot3D(SubPlot):
def axes_new(self, figure, canvas, nsubplots):
axl = Axes3D(figure)
axr = None # TODO: axl.twinx()
self.__axes = {'axl': axl, 'axr': axr}
self.__canvas = canvas
def reset(self, nsubplots, i):
axl = self.__axes['axl']
axr = self.__axes['axr']
axl.grid(True)
axl.yaxis.set_label_position('left')
axl.yaxis.tick_left()
def draw(self):
self.__canvas.draw()
def plotl(self, x, y, z, xlabel=None, ylabel=None, label=None):
axes = self.__axes['axl']
self.__plot__(axes, xlabel, ylabel)
axes.plot(x, y, z, label=label)
try:
axes.legend(numpoints=1)
except:
pass
def __init__(self):
SubPlot.__init__(self)
self.zlimitsl = [0, 0]
self.zlimitsr = [0, 0]
self.zlabel = ''
class Cursor:
def __init__(self, ax, plot):
self.ax = ax
self.canvas = plot.canvas
self.plot = plot
self.lx = self.ax.axhline(color='r', linewidth=2)
self.ly = self.ax.axvline(color='r', linewidth=2)
self.hide()
self.txt = ax.text(0.7, 0.9, '', transform=self.ax.transAxes)
self.txt.set_visible(False)
self.background = self.canvas.copy_from_bbox(self.ax.bbox)
self.canvas.mpl_connect('draw_event', self.on_draw)
self.canvas.mpl_connect('motion_notify_event', self.on_motion)
self.restricted = False
def show(self):
self.lx.set_visible(True)
self.ly.set_visible(True)
def hide(self):
self.lx.set_visible(False)
self.ly.set_visible(False)
def restrict(self, value):
self.restricted = value
def on_draw(self, event):
self.hide()
self.ax.draw_artist(self.ax)
self.background = self.canvas.copy_from_bbox(self.ax.bbox)
def on_motion(self, event):
if self.restricted:
return
if not event.inaxes:
self.hide()
self.canvas.restore_region(self.background)
self.canvas.blit(self.ax.bbox)
return
self.show()
self.canvas.restore_region(self.background)
x, y = event.xdata, event.ydata
self.lx.set_ydata(y)
self.ly.set_xdata(x)
self.txt.set_text('x=%1.2f, y=%1.2f' % (x,y))
self.ax.draw_artist(self.lx)
self.ax.draw_artist(self.ly)
self.ax.draw_artist(self.txt)
self.canvas.blit(self.ax.bbox)
class SnapCursor(Cursor):
def __init__(self, ax, plot, x, y):
Cursor.__init__(self, ax, plot)
self.x = x
self.z = zip(x,y)
self.x.sort()
self.z.sort()
def on_motion(self, event):
if self.hidden or not event.inaxes:
return
self.canvas.restore_region(self.background)
x, y = event.xdata, event.ydata
i = np.searchsorted(self.x, x)
x, y = self.z[i]
self.lx.set_ydata(y)
self.ly.set_xdata(x)
self.txt.set_text('x=%1.2f, y=%1.2f' % (x,y))
self.ax.draw_artist(self.lx)
self.ax.draw_artist(self.ly)
self.ax.draw_artist(self.txt)
self.canvas.blit(self.ax.bbox)
# $Id:$
#
# Local Variables:
# indent-tabs-mode: nil
# python-continuation-offset: 2
# python-indent: 8
# End:
# vim: ai et si sw=8 ts=8
| mit |
brian-team/brian2cuda | examples/compartmental/bipolar_with_inputs2_cpp.py | 1 | 2038 | '''
A pseudo MSO neuron, with two dendrites (fake geometry).
There are synaptic inputs.
Second method.
'''
import os
import matplotlib
matplotlib.use('Agg')
from brian2 import *
name = os.path.basename(__file__).replace('.py', '')
codefolder = os.path.join('code', name)
print('runing example {}'.format(name))
print('compiling model in {}'.format(codefolder))
set_device('cpp_standalone', directory=codefolder,
compile=True, run=True, debug=False)
# Morphology
morpho = Soma(30*um)
morpho.L = Cylinder(diameter=1*um, length=100*um, n=50)
morpho.R = Cylinder(diameter=1*um, length=100*um, n=50)
# Passive channels
gL = 1e-4*siemens/cm**2
EL = -70*mV
Es = 0*mV
taus = 1*ms
eqs='''
Im = gL*(EL-v) : amp/meter**2
Is = gs*(Es-v) : amp (point current)
dgs/dt = -gs/taus : siemens
'''
neuron = SpatialNeuron(morphology=morpho, model=eqs,
Cm=1*uF/cm**2, Ri=100*ohm*cm, method='exponential_euler')
neuron.v = EL
# Regular inputs
stimulation = NeuronGroup(2, 'dx/dt = 300*Hz : 1', threshold='x>1', reset='x=0',
method='euler')
stimulation.x = [0, 0.5] # Asynchronous
# Synapses
w = 20*nS
S = Synapses(stimulation, neuron,on_pre='gs += w')
S.connect(i=0, j=morpho.L[99.9*um])
S.connect(i=1, j=morpho.R[99.9*um])
# Monitors
mon_soma = StateMonitor(neuron, 'v', record=[0])
mon_L = StateMonitor(neuron.L, 'v', record=True)
mon_R = StateMonitor(neuron, 'v', record=morpho.R[99.9*um])
run(50*ms, report='text', profile=True)
print(profiling_summary())
subplot(211)
plot(mon_L.t/ms, mon_soma[0].v/mV, 'k')
plot(mon_L.t/ms, mon_L[morpho.L[99.9*um]].v/mV, 'r')
plot(mon_L.t/ms, mon_R[morpho.R[99.9*um]].v/mV, 'b')
ylabel('v (mV)')
subplot(212)
for i in [0, 5, 10, 15, 20, 25, 30, 35, 40, 45]:
plot(mon_L.t/ms, mon_L.v[i, :]/mV)
xlabel('Time (ms)')
ylabel('v (mV)')
#show()
plotpath = os.path.join('plots', '{}.png'.format(name))
savefig(plotpath)
print('plot saved in {}'.format(plotpath))
print('the generated model in {} needs to removed manually if wanted'.format(codefolder))
| gpl-2.0 |
pwwang/pyppl | pipen/proc.py | 1 | 9886 | """Provide the Proc class"""
import asyncio
import logging
from pathlib import Path
from typing import Any, ClassVar, Dict, Iterable, List, Optional, Type, Union
from rich import box
from rich.panel import Panel
from slugify import slugify
from varname import varname
from simpleconf import Config
from xqute import Xqute, JobStatus
from xqute import Scheduler
from pandas import DataFrame
from .utils import (
brief_list,
log_rich_renderable,
logger,
get_console_width,
cached_property,
DEFAULT_CONSOLE_WIDTH
)
from .template import Template
from .plugin import plugin
from ._proc_properties import ProcProperties, ProcMeta, ProcType
from .exceptions import ProcWorkdirConflictException
class Proc(ProcProperties, metaclass=ProcMeta):
"""The Proc class provides process assembly functionality"""
name: ClassVar[str] = None
desc: ClassVar[str] = None
SELF: ClassVar["Proc"] = None
def __new__(cls, *args, **kwargs):
"""Make sure cls() always get to the same instance"""
if not args and not kwargs:
if not cls.SELF or cls.SELF.__class__ is not cls:
cls.SELF = super().__new__(cls)
return cls.SELF
return super().__new__(cls)
# pylint: disable=redefined-builtin,redefined-outer-name
def __init__(self,
name: Optional[str] = None,
desc: Optional[str] = None,
*,
end: Optional[bool] = None,
input_keys: Union[List[str], str] = None,
input: Optional[Union[str, Iterable[str]]] = None,
output: Optional[Union[str, Iterable[str]]] = None,
requires: Optional[Union[ProcType, Iterable[ProcType]]] = None,
lang: Optional[str] = None,
script: Optional[str] = None,
forks: Optional[int] = None,
cache: Optional[bool] = None,
args: Optional[Dict[str, Any]] = None,
envs: Optional[Dict[str, Any]] = None,
dirsig: Optional[bool] = None,
profile: Optional[str] = None,
template: Optional[Union[str, Type[Template]]] = None,
scheduler: Optional[Union[str, Scheduler]] = None,
scheduler_opts: Optional[Dict[str, Any]] = None,
plugin_opts: Optional[Dict[str, Any]] = None) -> None:
if getattr(self, '_inited', False):
return
super().__init__(
end,
input_keys,
input,
output,
lang,
script,
forks,
requires,
args,
envs,
cache,
dirsig,
profile,
template,
scheduler,
scheduler_opts,
plugin_opts
)
self.nexts = []
self.name = (
name if name is not None
else self.__class__.name if self.__class__.name is not None
else self.__class__.__name__ if self is self.__class__.SELF
else varname()
)
self.desc = (
desc if desc is not None
else self.__class__.desc
if self.__class__.desc is not None
else self.__doc__.lstrip().splitlines()[0]
if self.__doc__
else 'Undescribed.'
)
self.pipeline = None
self.pbar = None
self.jobs = []
self.xqute = None
self.workdir = None
self.out_channel = None
self._inited = True
def log(self,
level: Union[int, str],
msg: str,
*args,
logger: logging.Logger = logger) -> None:
"""Log message for the process
Args:
level: The log level of the record
msg: The message to log
*args: The arguments to format the message
logger: The logging logger
"""
msg = msg % args
if not isinstance(level, int):
level = logging.getLevelName(level.upper())
logger.log(level, '[cyan]%s:[/cyan] %s', self.name, msg)
def gc(self):
"""GC process for the process to save memory after it's done"""
del self.xqute
self.xqute = None
del self.jobs[:]
self.jobs = []
del self.pbar
self.pbar = None
async def prepare(self, pipeline: "Pipen", profile: str) -> None:
"""Prepare the process
Args:
pipeline: The Pipen object
profile: The profile of the configuration
"""
if self.end is None and not self.nexts:
self.end = True
self.pipeline = pipeline
profile = self.profile or profile
if profile == 'default':
# no profile specified or profile is default,
# we should use __init__ the highest priority
config = pipeline.config._use('default', copy=True)
else:
config = pipeline.config._use(profile, 'default', copy=True)
self.properties_from_config(config)
self.workdir = Path(config.workdir) / slugify(self.name)
self.compute_properties()
await plugin.hooks.on_proc_property_computed(self)
# check if it's the same proc using the workdir
proc_name_file = self.workdir / 'proc.name'
if proc_name_file.is_file() and proc_name_file.read_text() != self.name:
raise ProcWorkdirConflictException(
'Workdir name is conflicting with process '
f'{proc_name_file.read_text()!r}, use a differnt pipeline '
'or a different process name.'
)
self.workdir.mkdir(parents=True, exist_ok=True)
proc_name_file.write_text(self.name)
self.xqute = Xqute(
self.scheduler,
job_metadir=self.workdir,
job_submission_batch=config.submission_batch,
job_error_strategy=config.error_strategy,
job_num_retries=config.num_retries,
scheduler_forks=self.forks,
**self.scheduler_opts)
# for the plugin hooks to access
self.xqute.proc = self
# init all other properties and jobs
await self._init_jobs(config)
self.out_channel = DataFrame((job.output for job in self.jobs))
await plugin.hooks.on_proc_init(self)
def __repr__(self):
return f'<Proc-{hex(id(self))}({self.name}: {self.size})>'
@cached_property
def size(self) -> int:
"""The size of the process (# of jobs)"""
return len(self.jobs)
@cached_property
def succeeded(self) -> bool:
"""Check if the process is succeeded (all jobs succeeded)"""
return all(job.status == JobStatus.FINISHED for job in self.jobs)
async def run(self) -> None:
"""Run the process"""
self._print_banner()
self.log('info', 'Workdir: %r', str(self.workdir))
self._print_dependencies()
# init pbar
self.pbar = self.pipeline.pbar.proc_bar(self.size, self.name)
await plugin.hooks.on_proc_start(self)
cached_jobs = []
for job in self.jobs:
if await job.cached:
cached_jobs.append(job.index)
self.pbar.update_job_submitted()
self.pbar.update_job_running()
self.pbar.update_job_succeeded()
job.status = JobStatus.FINISHED
await self.xqute.put(job)
if cached_jobs:
self.log('info', 'Cached jobs: %s', brief_list(cached_jobs))
await self.xqute.run_until_complete()
self.pbar.done()
await plugin.hooks.on_proc_done(
self,
False if not self.succeeded
# pylint: disable=comparison-with-callable
else 'cached' if len(cached_jobs) == self.size
else True
)
async def _init_job(self, worker_id: int, config: Config) -> None:
"""A worker to initialize jobs
Args:
worker_id: The worker id
config: The pipeline configuration
"""
for job in self.jobs:
if job.index % config.submission_batch != worker_id:
continue
await job.prepare(self)
async def _init_jobs(self, config: Config) -> None:
"""Initialize all jobs
Args:
config: The pipeline configuration
"""
for i in range(self.input.data.shape[0]):
job = self.scheduler.job_class(i, '', self.workdir)
self.jobs.append(job)
await asyncio.gather(
*(self._init_job(i, config)
for i in range(config.submission_batch))
)
def _print_banner(self) -> None:
"""Print the banner of the process"""
console_width = get_console_width()
panel = Panel(
self.desc,
title=self.name,
box=box.Box(
"╭═┬╮\n"
"║ ║║\n"
"├═┼┤\n"
"║ ║║\n"
"├═┼┤\n"
"├═┼┤\n"
"║ ║║\n"
"╰═┴╯\n"
) if self.end else box.ROUNDED,
width=min(DEFAULT_CONSOLE_WIDTH, console_width)
)
logger.info('')
log_rich_renderable(panel, 'cyan', logger.info)
def _print_dependencies(self):
"""Print the dependencies"""
self.log('info',
'[yellow]<<<[/yellow] %s',
[proc.name for proc in self.requires]
if self.requires else '[START]')
self.log('info',
'[yellow]>>>[/yellow] %s',
[proc.name for proc in self.nexts]
if self.nexts else '[END]')
| apache-2.0 |
freephys/python_ase | ase/gui/bulk_modulus.py | 14 | 1044 | # -*- coding: utf-8 -*-
from math import sqrt
import numpy as np
from ase.units import kJ
from ase.utils.eos import EquationOfState
def BulkModulus(images):
v = np.array([abs(np.linalg.det(A)) for A in images.A])
#import matplotlib.pyplot as plt
import pylab as plt
plt.ion()
EquationOfState(v, images.E).plot()
"""
fit = np.poly1d(np.polyfit(v**-(1.0 / 3), images.E, 3))
fit1 = np.polyder(fit, 1)
fit2 = np.polyder(fit1, 1)
for t in np.roots(fit1):
if t > 0 and fit2(t) > 0:
break
v0 = t**-3
e0 = fit(t)
B = t**5 * fit2(t) / 9 / kJ * 1.0e24 # Gpa
import pylab
import matplotlib
#matplotlib.use('GTK')
pylab.ion()
x = 3.95
pylab.figure(figsize=(x * 2.5**0.5, x))
pylab.plot(v, images.E, 'o')
x = np.linspace(min(v), max(v), 100)
pylab.plot(x, fit(x**-(1.0 / 3)), '-r')
pylab.xlabel(u'volume [Å^3]')
pylab.ylabel(u'energy [eV]')
pylab.title(u'E: %.3f eV, V: %.3f Å^3, B: %.3f GPa' % (e0, v0, B))
pylab.show()
"""
| gpl-3.0 |
stefantkeller/VECSELsetup | exp/eval/calibration/calib_highpower_pump.py | 1 | 8078 | #! /usr/bin/python2.7
# -*- coding: utf-8 -*-
import numpy as np
import matplotlib.pyplot as plt
import errorvalues as ev # github.com/stefantkeller/errorvalues
from VECSELsetup.eval.varycolor import varycolor
from VECSELsetup.eval.gen_functions import extract
#from sys import exit
def calibrate_pump(logfile1,logfile2,calibfile_pp,calibplot_pp,calibfile_cp,calibplot_cp):
# logfile0 w/ directly attached P-I measurement
# here we assume the P-I relation to be constant
# logfile1 w/ P-I measurement after BS
# (logfile0 and 1 give BS-fraction assumed to be constant)
# logfile2 w/ "emission" measurement for whole I-range
# from 2 find P-I relation, transform I into P with results from 1
# this gives a P-P relation for pump.
#
# drop 0-measurement and work with P-I relation from logfile1
#------------------------------------
#current_set,current,pump,reflection,emission,absorption = extract(logfile,identifiers=['Current','Pump','Refl','Laser'])
current_set1, current1, thermal1 = extract(logfile1, identifiers=['Current','Laser'])
current_set2, current2, pump2 = extract(logfile2, identifiers=['Current','Pump'])
T1, T2 = current1.keys()[0], current2.keys()[0] # because the protocol writes HS temperature that we're not interested in during calibration (lazy.)
#------------------------------------
cols = varycolor(3*len(current1)) # 3 per temperature
# P-I (1)
xmin, xmax = ev.min(current1[T1],False), ev.max(current1[T1],False)
ymin, ymax = ev.min(thermal1[T1],False), ev.max(thermal1[T1],False)
xlim = [xmin.v()-2*xmin.e(),xmax.v()+2*xmax.e()]
ylim = [ymin.v()-2*ymin.e(),ymax.v()+2*ymax.e()]
textx, texty = xmax.v()/4, ymax.v()*2/3
start_linreg_at = 6 #A
sind1 = sum(current1[T1].v()<start_linreg_at)
q1,m1 = ev.linreg(current1[T1].v()[sind1:],thermal1[T1].v()[sind1:],thermal1[T1].e()[sind1:])
if False:
plt.clf()
plt.subplot(1,1,1)
plt.errorbar(current1[T1].v(),thermal1[T1].v(),
xerr=current1[T1].e(),yerr=thermal1[T1].e(),
c=cols[0],linestyle=' ')
plt.plot(current1[T1].v(),m1.v()*current1[T1].v()+q1.v(),c=cols[1])
summary = r'(${0}$) $\times$ pump + (${1}$) W'.format(m1.round(2),q1.round(2))
plt.text(textx,texty, summary,color='k')
plt.title('P-I pos1 -- supposed to be linear relation!')
plt.xlabel('Pump current (A)')
plt.ylabel('Power seen by S314C (thermal PM) (W)')
plt.xlim(xlim)
plt.ylim(ylim)
plt.grid('on')
plt.show()
# P-I (2)
xmin, xmax = ev.min(current2[T2],False), ev.max(current2[T2],False)
ymin, ymax = ev.min(pump2[T2],False), ev.max(pump2[T2],False)
xlim = [xmin.v()-2*xmin.e(),xmax.v()+2*xmax.e()]
ylim = [ymin.v()-2*ymin.e(),ymax.v()+2*ymax.e()]
textx, texty = xmax.v()/4, ymax.v()*2/3
start_linreg_at = 6 #A
sind2 = sum(current2[T2].v()<start_linreg_at)
q2,m2 = ev.linreg(current2[T2].v()[sind2:],pump2[T2].v()[sind2:],pump2[T2].e()[sind2:])
if False:
plt.clf()
plt.subplot(1,1,1)
plt.errorbar(current2[T2].v(),pump2[T2].v(),
xerr=current2[T2].e(),yerr=pump2[T2].e(),
c=cols[0],linestyle=' ')
plt.plot(current2[T2].v(),m2.v()*current2[T2].v()+q2.v(),c=cols[1])
summary = r'(${0}$) $\times$ pump + (${1}$) W'.format(m2.round(2),q2.round(2))
plt.text(textx,texty, summary,color='k')
plt.title('P-I pump')
plt.xlabel('Pump current (A)')
plt.ylabel('Power seen by S121C (photodiode) (W)')
plt.xlim(xlim)
plt.ylim(ylim)
plt.grid('on')
plt.show()
# ----------------------------------
# with m1,q1 scale current from 2
plt.clf()
plt.subplot(1,1,1)
pump_ = ev.errvallist([ev.max(ev.errvallist([0,p]),False) for p in current2[T2]*m1+q1]) # ignore values below pump threshold
nzeros = np.sum(pump_.v()==0)
xmin, xmax = ev.min(pump2[T2],False), ev.max(pump2[T2],False)
ymin, ymax = ev.min(pump_,False), ev.max(pump_,False)
xlim = [xmin.v()-2*xmin.e(),xmax.v()+2*xmax.e()]
ylim = [ymin.v()-2*ymin.e(),ymax.v()+2*ymax.e()]
textx, texty = xmax.v()/4, ymax.v()*2/3
q3,m3 = ev.linreg(pump2[T2].v()[nzeros:],pump_.v()[nzeros:],pump_.e()[nzeros:])
plt.errorbar(pump2[T2].v(),pump_.v(),
xerr=pump2[T2].e(),yerr=pump_.e(),
c=cols[0],linestyle=' ')
plt.plot(pump2[T2].v(),m3.v()*pump2[T2].v()+q3.v(),c=cols[1])
summary = r'(${0}$) $\times$ pump + (${1}$) W'.format(m3.round(2),q3.round(2))
plt.text(textx,texty, summary,color='k')
plt.title('Pump')
plt.xlabel('Power seen by S121C (photodiode) (W)')
plt.ylabel('Power at sample (W)')
plt.xlim(xlim)
plt.ylim(ylim)
plt.grid('on')
#plt.show()
plt.savefig(calibplot_pp)
#
#
# P-I (scaled)
plt.clf()
plt.subplot(1,1,1)
xmin, xmax = ev.min(current2[T2],False), ev.max(current2[T2],False)
ymin, ymax = ev.min(pump_,False), ev.max(pump_,False)
xlim = [xmin.v()-2*xmin.e(),xmax.v()+2*xmax.e()]
ylim = [ymin.v()-2*ymin.e(),ymax.v()+2*ymax.e()]
textx, texty = xmax.v()/4, ymax.v()*2/3
start_linreg_at = 20 #A
#end_linreg_at = ..
sind = sum(current2[T2].v()<start_linreg_at)
#eind = sum(current[Tc].v()<end_linreg_at)
# linreg
q4,m4 = ev.linreg(current2[T2].v()[sind:],pump_.v()[sind:],pump_.e()[sind:])
# plot
plt.errorbar(current2[T2].v(),pump_.v(),
xerr=current2[T2].e(),yerr=pump_.e(),
c=cols[0],linestyle=' ')
plt.plot(current2[T2].v(),m4.v()*current2[T2].v()+q4.v(),c=cols[1])
summary = r'(${0}$) $\times$ current (${1}$) W'.format(m4.round(2),q4.round(2))
plt.text(textx,texty, summary,color='k')
plt.title('Pump (wrt current)')
plt.xlabel('Achieved current setting (A)')
plt.ylabel('Power at sample (W)')
plt.xlim(xlim)
plt.ylim(ylim)
plt.grid('on')
#plt.show()
plt.savefig(calibplot_cp)
#exit()
#------------------------------------
# write file with evaluation as LUT
with open(calibfile_pp,'wb') as pf:
pf.write(u'columns=PM_pump(W),PM_pump_sterr(W),PM_reference(W),PM_reference_sterr(W);linreg={0}*p+{1}\n'.format(ev.errval(m3,printout='cp!'),ev.errval(q3,printout='cp!')))
for entry in range(len(pump_)):
pf.write(u'{0},{1},{2},{3}\n'.format( pump2[T2].v()[entry],pump2[T2].e()[entry],
pump_.v()[entry],pump_.e()[entry] ))
with open(calibfile_cp,'wb') as cf:
cf.write(u'columns=current(A),current_sterr(A),PM_reference(W),PM_reference_sterr(W);linreg={0}*c+{1}\n'.format(ev.errval(m4,printout='cp!'),ev.errval(q4,printout='cp!')))
for entry in range(len(current2[T2])):
cf.write(u'{0},{1},{2},{3}\n'.format( current2[T2].v()[entry],current2[T2].e()[entry],
pump_.v()[entry],pump_.e()[entry] ))
print u'Pump calibration finished:\n{0}\n{1}\n{2}\n{3}'.format(calibfile_pp,calibplot_pp,calibfile_cp,calibplot_cp)
def main():
logfile1 = '20150112_calib/1_pump_calib.csv' # thermal PM at sample pos
logfile2 = '20150112_calib/2_refl_calib.csv'
rootpath = '/'.join(logfile1.split('/')[:-1])
lut_folder = '/LUTs'
calibfile_pp = rootpath+lut_folder+'/calib_pump.csv'
calibplot_pp = rootpath+lut_folder+'/calib_pump.png'
calibfile_cp = rootpath+lut_folder+'/calib_pump_current.csv'
calibplot_cp = rootpath+lut_folder+'/calib_pump_current.png'
#logfile1 = '../1_pump_calib.csv' # from here: take 'Current', 'Pump', and 'Laser'
calibrate_pump(logfile1,logfile2,calibfile_pp,calibplot_pp,calibfile_cp,calibplot_cp)
if __name__ == "__main__":
main()
| mit |
herval/zeppelin | python/src/main/resources/python/zeppelin_python.py | 2 | 6911 | #
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import os, sys, traceback, json, re
from py4j.java_gateway import java_import, JavaGateway, GatewayClient
from py4j.protocol import Py4JJavaError
import ast
class Logger(object):
def __init__(self):
pass
def write(self, message):
intp.appendOutput(message)
def reset(self):
pass
def flush(self):
pass
class PythonCompletion:
def __init__(self, interpreter, userNameSpace):
self.interpreter = interpreter
self.userNameSpace = userNameSpace
def getObjectCompletion(self, text_value):
completions = [completion for completion in list(self.userNameSpace.keys()) if completion.startswith(text_value)]
builtinCompletions = [completion for completion in dir(__builtins__) if completion.startswith(text_value)]
return completions + builtinCompletions
def getMethodCompletion(self, objName, methodName):
execResult = locals()
try:
exec("{} = dir({})".format("objectDefList", objName), _zcUserQueryNameSpace, execResult)
except:
self.interpreter.logPythonOutput("Fail to run dir on " + objName)
self.interpreter.logPythonOutput(traceback.format_exc())
return None
else:
objectDefList = execResult['objectDefList']
return [completion for completion in execResult['objectDefList'] if completion.startswith(methodName)]
def getCompletion(self, text_value):
if text_value == None:
return None
dotPos = text_value.find(".")
if dotPos == -1:
objName = text_value
completionList = self.getObjectCompletion(objName)
else:
objName = text_value[:dotPos]
methodName = text_value[dotPos + 1:]
completionList = self.getMethodCompletion(objName, methodName)
if completionList is None or len(completionList) <= 0:
self.interpreter.setStatementsFinished("", False)
else:
result = json.dumps(list(filter(lambda x : not re.match("^__.*", x), list(completionList))))
self.interpreter.setStatementsFinished(result, False)
host = sys.argv[1]
port = int(sys.argv[2])
if "PY4J_GATEWAY_SECRET" in os.environ:
from py4j.java_gateway import GatewayParameters
gateway_secret = os.environ["PY4J_GATEWAY_SECRET"]
gateway = JavaGateway(gateway_parameters=GatewayParameters(
address=host, port=port, auth_token=gateway_secret, auto_convert=True))
else:
gateway = JavaGateway(GatewayClient(address=host, port=port), auto_convert=True)
intp = gateway.entry_point
_zcUserQueryNameSpace = {}
completion = PythonCompletion(intp, _zcUserQueryNameSpace)
_zcUserQueryNameSpace["__zeppelin_completion__"] = completion
_zcUserQueryNameSpace["gateway"] = gateway
from zeppelin_context import PyZeppelinContext
if intp.getZeppelinContext():
z = __zeppelin__ = PyZeppelinContext(intp.getZeppelinContext(), gateway)
__zeppelin__._setup_matplotlib()
_zcUserQueryNameSpace["z"] = z
_zcUserQueryNameSpace["__zeppelin__"] = __zeppelin__
intp.onPythonScriptInitialized(os.getpid())
# redirect stdout/stderr to java side so that PythonInterpreter can capture the python execution result
output = Logger()
sys.stdout = output
sys.stderr = output
while True :
req = intp.getStatements()
try:
stmts = req.statements().split("\n")
isForCompletion = req.isForCompletion()
# Get post-execute hooks
try:
if req.isCallHooks():
global_hook = intp.getHook('post_exec_dev')
else:
global_hook = None
except:
global_hook = None
try:
if req.isCallHooks():
user_hook = __zeppelin__.getHook('post_exec')
else:
user_hook = None
except:
user_hook = None
nhooks = 0
if not isForCompletion:
for hook in (global_hook, user_hook):
if hook:
nhooks += 1
if stmts:
# use exec mode to compile the statements except the last statement,
# so that the last statement's evaluation will be printed to stdout
code = compile('\n'.join(stmts), '<stdin>', 'exec', ast.PyCF_ONLY_AST, 1)
to_run_hooks = []
if (nhooks > 0):
to_run_hooks = code.body[-nhooks:]
to_run_exec, to_run_single = (code.body[:-(nhooks + 1)],
[code.body[-(nhooks + 1)]])
try:
for node in to_run_exec:
mod = ast.Module([node])
code = compile(mod, '<stdin>', 'exec')
exec(code, _zcUserQueryNameSpace)
for node in to_run_single:
mod = ast.Interactive([node])
code = compile(mod, '<stdin>', 'single')
exec(code, _zcUserQueryNameSpace)
for node in to_run_hooks:
mod = ast.Module([node])
code = compile(mod, '<stdin>', 'exec')
exec(code, _zcUserQueryNameSpace)
if not isForCompletion:
# only call it when it is not for code completion. code completion will call it in
# PythonCompletion.getCompletion
intp.setStatementsFinished("", False)
except Py4JJavaError:
# raise it to outside try except
raise
except:
if not isForCompletion:
# extract which line incur error from error message. e.g.
# Traceback (most recent call last):
# File "<stdin>", line 1, in <module>
# ZeroDivisionError: integer division or modulo by zero
exception = traceback.format_exc()
m = re.search("File \"<stdin>\", line (\d+).*", exception)
if m:
line_no = int(m.group(1))
intp.setStatementsFinished(
"Fail to execute line {}: {}\n".format(line_no, stmts[line_no - 1]) + exception, True)
else:
intp.setStatementsFinished(exception, True)
else:
intp.setStatementsFinished("", False)
except Py4JJavaError:
excInnerError = traceback.format_exc() # format_tb() does not return the inner exception
innerErrorStart = excInnerError.find("Py4JJavaError:")
if innerErrorStart > -1:
excInnerError = excInnerError[innerErrorStart:]
intp.setStatementsFinished(excInnerError + str(sys.exc_info()), True)
except:
intp.setStatementsFinished(traceback.format_exc(), True)
output.reset()
| apache-2.0 |
idealabasu/code_pynamics | python/pynamics_examples/falling_rod.py | 1 | 3239 | # -*- coding: utf-8 -*-
"""
Written by Daniel M. Aukes
Email: danaukes<at>gmail.com
Please see LICENSE for full license.
"""
import sympy
sympy.init_printing(pretty_print=False)
import pynamics
from pynamics.frame import Frame
from pynamics.variable_types import Differentiable,Constant,Variable
from pynamics.system import System
from pynamics.body import Body
from pynamics.dyadic import Dyadic
from pynamics.output import Output,PointsOutput
from pynamics.particle import Particle
import pynamics.integration
import pynamics.tanh
import sympy
import numpy
import matplotlib.pyplot as plt
plt.ion()
from math import pi
system = System()
pynamics.set_system(__name__,system)
tol = 1e-4
error_tol = 1e-10
m1 = Constant(1e0,'m1',system)
m2 = Constant(1e0,'m2',system)
l0 = Constant(1,'l0',system)
g = Constant(9.81,'g',system)
k_constraint = Constant(1e4,'k_constraint',system)
b_constraint = Constant(1e5,'b_constraint',system)
tinitial = 0
tfinal = 10
fps = 30
tstep = 1/fps
t = numpy.r_[tinitial:tfinal:tstep]
x1,x1_d,x1_dd = Differentiable('x1',system)
y1,y1_d,y1_dd = Differentiable('y1',system)
q1,q1_d,q1_dd = Differentiable('q1',system)
vini = 5
aini = -60*pi/180
initialvalues = {}
initialvalues[x1]=0
initialvalues[x1_d]=2
initialvalues[y1]=1
initialvalues[y1_d]=10
initialvalues[q1]=10*pi/180
initialvalues[q1_d]=-10
statevariables = system.get_state_variables()
ini = [initialvalues[item] for item in statevariables]
N = Frame('N')
system.set_newtonian(N)
A = Frame('A')
A.rotate_fixed_axis_directed(N,[0,0,1],q1)
pNA=0*N.x
pm1 = x1*N.x + y1*N.y
pm2 = pm1+l0*A.y
Particle1 = Particle(pm1,m1,'Particle1',system)
Particle2 = Particle(pm2,m2,'Particle2',system)
vpm1 = pm1.time_derivative(N,system)
vpm2 = pm2.time_derivative(N,system)
system.addforcegravity(-g*N.y)
y2 = pm2.dot(N.y)
f_floor2 = (y2**2)**.5 - y2
system.addforce(k_constraint*f_floor2*N.y,vpm2)
system.addforce(-b_constraint*f_floor2*vpm2,vpm2)
f_floor1 = (y1**2)**.5 - y1
system.addforce(k_constraint*f_floor1*N.y,vpm1)
system.addforce(-b_constraint*f_floor1*vpm1,vpm1)
eq = []
f,ma = system.getdynamics()
func = system.state_space_post_invert(f,ma)
constants = system.constant_values.copy()
# constants[b_constraint]=0
states=pynamics.integration.integrate_odeint(func,ini,t,rtol = tol, atol = tol, args=({'constants':constants},))
points = [pm1,pm2]
po = PointsOutput(points, system, constant_values=system.constant_values)
po.calc(states)
constants = system.constant_values.copy()
constants[b_constraint]=1e3
states=pynamics.integration.integrate_odeint(func,ini,t,rtol = tol, atol = tol, args=({'constants':constants},))
points = [pm1,pm2]
po2 = PointsOutput(points, system, constant_values=system.constant_values)
po2.calc(states)
constants[b_constraint]=0
states=pynamics.integration.integrate_odeint(func,ini,t,rtol = tol, atol = tol, args=({'constants':constants},))
points = [pm1,pm2]
po3 = PointsOutput(points, system, constant_values=system.constant_values)
po3.calc(states)
po.plot_time()
po.animate(fps = fps, movie_name='bouncy-mod.mp4',lw=2,marker='o')
po2.plot_time()
po2.animate(fps = fps, movie_name='bouncy-mod.mp4',lw=2,marker='o')
po3.plot_time()
po3.animate(fps = fps, movie_name='bouncy-mod.mp4',lw=2,marker='o')
| mit |
daodaoliang/neural-network-animation | matplotlib/tests/test_agg.py | 9 | 4670 | from __future__ import (absolute_import, division, print_function,
unicode_literals)
import six
import io
import os
import numpy as np
from numpy.testing import assert_array_almost_equal
from matplotlib.image import imread
from matplotlib.backends.backend_agg import FigureCanvasAgg as FigureCanvas
from matplotlib.figure import Figure
from matplotlib.testing.decorators import cleanup
from matplotlib import pyplot as plt
from matplotlib import collections
from matplotlib import path
@cleanup
def test_repeated_save_with_alpha():
# We want an image which has a background color of bluish green, with an
# alpha of 0.25.
fig = Figure([1, 0.4])
canvas = FigureCanvas(fig)
fig.set_facecolor((0, 1, 0.4))
fig.patch.set_alpha(0.25)
# The target color is fig.patch.get_facecolor()
buf = io.BytesIO()
fig.savefig(buf,
facecolor=fig.get_facecolor(),
edgecolor='none')
# Save the figure again to check that the
# colors don't bleed from the previous renderer.
buf.seek(0)
fig.savefig(buf,
facecolor=fig.get_facecolor(),
edgecolor='none')
# Check the first pixel has the desired color & alpha
# (approx: 0, 1.0, 0.4, 0.25)
buf.seek(0)
assert_array_almost_equal(tuple(imread(buf)[0, 0]),
(0.0, 1.0, 0.4, 0.250),
decimal=3)
@cleanup
def test_large_single_path_collection():
buff = io.BytesIO()
# Generates a too-large single path in a path collection that
# would cause a segfault if the draw_markers optimization is
# applied.
f, ax = plt.subplots()
collection = collections.PathCollection(
[path.Path([[-10, 5], [10, 5], [10, -5], [-10, -5], [-10, 5]])])
ax.add_artist(collection)
ax.set_xlim(10**-3, 1)
plt.savefig(buff)
def report_memory(i):
pid = os.getpid()
a2 = os.popen('ps -p %d -o rss,sz' % pid).readlines()
print(i, ' ', a2[1], end=' ')
return int(a2[1].split()[0])
# This test is disabled -- it uses old API. -ADS 2009-09-07
## def test_memleak():
## """Test agg backend for memory leaks."""
## from matplotlib.ft2font import FT2Font
## from numpy.random import rand
## from matplotlib.backend_bases import GraphicsContextBase
## from matplotlib.backends._backend_agg import RendererAgg
## fontname = '/usr/local/share/matplotlib/Vera.ttf'
## N = 200
## for i in range( N ):
## gc = GraphicsContextBase()
## gc.set_clip_rectangle( [20, 20, 20, 20] )
## o = RendererAgg( 400, 400, 72 )
## for j in range( 50 ):
## xs = [ 400*int(rand()) for k in range(8) ]
## ys = [ 400*int(rand()) for k in range(8) ]
## rgb = (1, 0, 0)
## pnts = zip( xs, ys )
## o.draw_polygon( gc, rgb, pnts )
## o.draw_polygon( gc, None, pnts )
## for j in range( 50 ):
## x = [ 400*int(rand()) for k in range(4) ]
## y = [ 400*int(rand()) for k in range(4) ]
## o.draw_lines( gc, x, y )
## for j in range( 50 ):
## args = [ 400*int(rand()) for k in range(4) ]
## rgb = (1, 0, 0)
## o.draw_rectangle( gc, rgb, *args )
## if 1: # add text
## font = FT2Font( fontname )
## font.clear()
## font.set_text( 'hi mom', 60 )
## font.set_size( 12, 72 )
## o.draw_text_image( font.get_image(), 30, 40, gc )
## fname = "agg_memleak_%05d.png"
## o.write_png( fname % i )
## val = report_memory( i )
## if i==1: start = val
## end = val
## avgMem = (end - start) / float(N)
## print 'Average memory consumed per loop: %1.4f\n' % (avgMem)
## #TODO: Verify the expected mem usage and approximate tolerance that
## # should be used
## #self.checkClose( 0.32, avgMem, absTol = 0.1 )
## # w/o text and w/o write_png: Average memory consumed per loop: 0.02
## # w/o text and w/ write_png : Average memory consumed per loop: 0.3400
## # w/ text and w/ write_png : Average memory consumed per loop: 0.32
@cleanup
def test_marker_with_nan():
# This creates a marker with nans in it, which was segfaulting the
# Agg backend (see #3722)
fig, ax = plt.subplots(1)
steps = 1000
data = np.arange(steps)
ax.semilogx(data)
ax.fill_between(data, data*0.8, data*1.2)
buf = io.BytesIO()
fig.savefig(buf, format='png')
if __name__ == "__main__":
import nose
nose.runmodule(argv=['-s', '--with-doctest'], exit=False)
| mit |
detrout/debian-statsmodels | docs/sphinxext/numpy_ext/docscrape_sphinx.py | 62 | 7703 | import re, inspect, textwrap, pydoc
import sphinx
from docscrape import NumpyDocString, FunctionDoc, ClassDoc
class SphinxDocString(NumpyDocString):
def __init__(self, docstring, config={}):
self.use_plots = config.get('use_plots', False)
NumpyDocString.__init__(self, docstring, config=config)
# string conversion routines
def _str_header(self, name, symbol='`'):
return ['.. rubric:: ' + name, '']
def _str_field_list(self, name):
return [':' + name + ':']
def _str_indent(self, doc, indent=4):
out = []
for line in doc:
out += [' '*indent + line]
return out
def _str_signature(self):
return ['']
if self['Signature']:
return ['``%s``' % self['Signature']] + ['']
else:
return ['']
def _str_summary(self):
return self['Summary'] + ['']
def _str_extended_summary(self):
return self['Extended Summary'] + ['']
def _str_param_list(self, name):
out = []
if self[name]:
out += self._str_field_list(name)
out += ['']
for param,param_type,desc in self[name]:
out += self._str_indent(['**%s** : %s' % (param.strip(),
param_type)])
out += ['']
out += self._str_indent(desc,8)
out += ['']
return out
@property
def _obj(self):
if hasattr(self, '_cls'):
return self._cls
elif hasattr(self, '_f'):
return self._f
return None
def _str_member_list(self, name):
"""
Generate a member listing, autosummary:: table where possible,
and a table where not.
"""
out = []
if self[name]:
out += ['.. rubric:: %s' % name, '']
prefix = getattr(self, '_name', '')
if prefix:
prefix = '~%s.' % prefix
autosum = []
others = []
for param, param_type, desc in self[name]:
param = param.strip()
if not self._obj or hasattr(self._obj, param):
autosum += [" %s%s" % (prefix, param)]
else:
others.append((param, param_type, desc))
if autosum:
out += ['.. autosummary::', ' :toctree:', '']
out += autosum
if others:
maxlen_0 = max([len(x[0]) for x in others])
maxlen_1 = max([len(x[1]) for x in others])
hdr = "="*maxlen_0 + " " + "="*maxlen_1 + " " + "="*10
fmt = '%%%ds %%%ds ' % (maxlen_0, maxlen_1)
n_indent = maxlen_0 + maxlen_1 + 4
out += [hdr]
for param, param_type, desc in others:
out += [fmt % (param.strip(), param_type)]
out += self._str_indent(desc, n_indent)
out += [hdr]
out += ['']
return out
def _str_section(self, name):
out = []
if self[name]:
out += self._str_header(name)
out += ['']
content = textwrap.dedent("\n".join(self[name])).split("\n")
out += content
out += ['']
return out
def _str_see_also(self, func_role):
out = []
if self['See Also']:
see_also = super(SphinxDocString, self)._str_see_also(func_role)
out = ['.. seealso::', '']
out += self._str_indent(see_also[2:])
return out
def _str_warnings(self):
out = []
if self['Warnings']:
out = ['.. warning::', '']
out += self._str_indent(self['Warnings'])
return out
def _str_index(self):
idx = self['index']
out = []
if len(idx) == 0:
return out
out += ['.. index:: %s' % idx.get('default','')]
for section, references in idx.iteritems():
if section == 'default':
continue
elif section == 'refguide':
out += [' single: %s' % (', '.join(references))]
else:
out += [' %s: %s' % (section, ','.join(references))]
return out
def _str_references(self):
out = []
if self['References']:
out += self._str_header('References')
if isinstance(self['References'], str):
self['References'] = [self['References']]
out.extend(self['References'])
out += ['']
# Latex collects all references to a separate bibliography,
# so we need to insert links to it
if sphinx.__version__ >= "0.6":
out += ['.. only:: latex','']
else:
out += ['.. latexonly::','']
items = []
for line in self['References']:
m = re.match(r'.. \[([a-z0-9._-]+)\]', line, re.I)
if m:
items.append(m.group(1))
out += [' ' + ", ".join(["[%s]_" % item for item in items]), '']
return out
def _str_examples(self):
examples_str = "\n".join(self['Examples'])
if (self.use_plots and 'import matplotlib' in examples_str
and 'plot::' not in examples_str):
out = []
out += self._str_header('Examples')
out += ['.. plot::', '']
out += self._str_indent(self['Examples'])
out += ['']
return out
else:
return self._str_section('Examples')
def __str__(self, indent=0, func_role="obj"):
out = []
out += self._str_signature()
out += self._str_index() + ['']
out += self._str_summary()
out += self._str_extended_summary()
for param_list in ('Parameters', 'Returns', 'Raises'):
out += self._str_param_list(param_list)
out += self._str_warnings()
out += self._str_see_also(func_role)
out += self._str_section('Notes')
out += self._str_references()
out += self._str_examples()
for param_list in ('Attributes', 'Methods'):
out += self._str_member_list(param_list)
out = self._str_indent(out,indent)
return '\n'.join(out)
class SphinxFunctionDoc(SphinxDocString, FunctionDoc):
def __init__(self, obj, doc=None, config={}):
self.use_plots = config.get('use_plots', False)
FunctionDoc.__init__(self, obj, doc=doc, config=config)
class SphinxClassDoc(SphinxDocString, ClassDoc):
def __init__(self, obj, doc=None, func_doc=None, config={}):
self.use_plots = config.get('use_plots', False)
ClassDoc.__init__(self, obj, doc=doc, func_doc=None, config=config)
class SphinxObjDoc(SphinxDocString):
def __init__(self, obj, doc=None, config={}):
self._f = obj
SphinxDocString.__init__(self, doc, config=config)
def get_doc_object(obj, what=None, doc=None, config={}):
if what is None:
if inspect.isclass(obj):
what = 'class'
elif inspect.ismodule(obj):
what = 'module'
elif callable(obj):
what = 'function'
else:
what = 'object'
if what == 'class':
return SphinxClassDoc(obj, func_doc=SphinxFunctionDoc, doc=doc,
config=config)
elif what in ('function', 'method'):
return SphinxFunctionDoc(obj, doc=doc, config=config)
else:
if doc is None:
doc = pydoc.getdoc(obj)
return SphinxObjDoc(obj, doc, config=config)
| bsd-3-clause |
abad623/verbalucce | verbalucce/nltk/draw/dispersion.py | 17 | 1693 | # Natural Language Toolkit: Dispersion Plots
#
# Copyright (C) 2001-2012 NLTK Project
# Author: Steven Bird <[email protected]>
# URL: <http://www.nltk.org/>
# For license information, see LICENSE.TXT
"""
A utility for displaying lexical dispersion.
"""
def dispersion_plot(text, words, ignore_case=False):
"""
Generate a lexical dispersion plot.
:param text: The source text
:type text: list(str) or enum(str)
:param words: The target words
:type words: list of str
:param ignore_case: flag to set if case should be ignored when searching text
:type ignore_case: bool
"""
try:
import pylab
except ImportError:
raise ValueError('The plot function requires the matplotlib package (aka pylab).'
'See http://matplotlib.sourceforge.net/')
text = list(text)
words.reverse()
if ignore_case:
words_to_comp = map(str.lower, words)
text_to_comp = map(str.lower, text)
else:
words_to_comp = words
text_to_comp = text
points = [(x,y) for x in range(len(text_to_comp))
for y in range(len(words_to_comp))
if text_to_comp[x] == words_to_comp[y]]
if points:
x, y = zip(*points)
else:
x = y = ()
pylab.plot(x, y, "b|", scalex=.1)
pylab.yticks(range(len(words)), words, color="b")
pylab.ylim(-1, len(words))
pylab.title("Lexical Dispersion Plot")
pylab.xlabel("Word Offset")
pylab.show()
if __name__ == '__main__':
from nltk.corpus import gutenberg
words = ['Elinor', 'Marianne', 'Edward', 'Willoughby']
dispersion_plot(gutenberg.words('austen-sense.txt'), words)
| apache-2.0 |
arthur-gouveia/DAT210x | Module5/assignment5.py | 1 | 5322 | import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import matplotlib
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import Normalizer
from sklearn.decomposition import PCA
from sklearn.neighbors import KNeighborsClassifier
matplotlib.style.use('ggplot') # Look Pretty
def plotDecisionBoundary(model, X, y):
fig = plt.figure()
# ax = fig.add_subplot(111)
padding = 0.6
resolution = 0.0025
colors = ['royalblue', 'forestgreen', 'ghostwhite']
# Calculate the boundaris
x_min, x_max = X[:, 0].min(), X[:, 0].max()
y_min, y_max = X[:, 1].min(), X[:, 1].max()
x_range = x_max - x_min
y_range = y_max - y_min
x_min -= x_range * padding
y_min -= y_range * padding
x_max += x_range * padding
y_max += y_range * padding
# Create a 2D Grid Matrix. The values stored in the matrix
# are the predictions of the class at at said location
xx, yy = np.meshgrid(np.arange(x_min, x_max, resolution),
np.arange(y_min, y_max, resolution))
# What class does the classifier say?
Z = model.predict(np.c_[xx.ravel(), yy.ravel()])
Z = Z.reshape(xx.shape)
# Plot the contour map
# cs =
plt.contourf(xx, yy, Z, cmap=plt.cm.terrain)
# Plot the test original points as well...
for label in range(len(np.unique(y))):
indices = np.where(y == label)
plt.scatter(X[indices, 0], X[indices, 1], c=colors[label],
label=str(label), alpha=0.8)
p = model.get_params()
plt.axis('tight')
plt.title('K = ' + str(p['n_neighbors']))
#
# XXX: Load up the dataset into a variable called X. Check the .head and
# compare it to the file you loaded in a text editor. Make sure you're
# loading your data properly--don't fail on the 1st step!
#
# .. your code here ..
X = pd.read_csv('Datasets/wheat.data', index_col=0)
#
# XXX: Copy the 'wheat_type' series slice out of X, and into a series
# called 'y'. Then drop the original 'wheat_type' column from the X
#
# .. your code here ..
y = X.wheat_type.copy()
X = X.drop('wheat_type', axis=1)
# XXX: Do a quick, "ordinal" conversion of 'y'. In actuality our
# classification isn't ordinal, but just as an experiment...
#
# .. your code here ..
y = y.astype('category').cat.codes
#
# XXX: Basic nan munging. Fill each row's nans with the mean of the feature
#
# .. your code here ..
X.fillna(X.mean(), inplace=True)
#
# XXX: Split X into training and testing data sets using train_test_split().
# INFO: Use 0.33 test size, and use random_state=1. This is important
# so that your answers are verifiable. In the real world, you wouldn't
# specify a random_state.
#
# .. your code here ..
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.33,
random_state=1)
#
# XXX: Create an instance of SKLearn's Normalizer class and then train it
# using its .fit() method against your *training* data.
#
# NOTE: The reason you only fit against your training data is because in a
# real-world situation, you'll only have your training data to train with!
# In this lab setting, you have both train+test data; but in the wild,
# you'll only have your training data, and then unlabeled data you want to
# apply your models to.
#
# .. your code here ..
normalizer = Normalizer().fit(X_train)
#
# XXX: With your trained pre-processor, transform both your training AND
# testing data.
#
# NOTE: Any testing data has to be transformed with your preprocessor
# that has ben fit against your training data, so that it exist in the same
# feature-space as the original data used to train your models.
#
# .. your code here ..
X_train = normalizer.transform(X_train)
X_test = normalizer.transform(X_test)
#
# XXX: Just like your preprocessing transformation, create a PCA
# transformation as well. Fit it against your training data, and then
# project your training and testing features into PCA space using the
# PCA model's .transform() method.
#
# NOTE: This has to be done because the only way to visualize the decision
# boundary in 2D would be if your KNN algo ran in 2D as well:
#
# .. your code here ..
pca = PCA(2).fit(X_train)
X_train = pca.transform(X_train)
X_test = pca.transform(X_test)
#
# XXX: Create and train a KNeighborsClassifier. Start with K=9 neighbors.
# NOTE: Be sure train your classifier against the pre-processed, PCA-
# transformed training data above! You do not, of course, need to transform
# your labels.
#
# .. your code here ..
knn = KNeighborsClassifier(n_neighbors=9).fit(X_train, y_train)
# HINT: Ensure your KNeighbors classifier object from earlier is called 'knn'
plotDecisionBoundary(knn, X_train, y_train)
# ------------------------------------
#
# XXX: Display the accuracy score of your test data/labels, computed by
# your KNeighbors model.
#
# NOTE: You do NOT have to run .predict before calling .score, since
# .score will take care of running your predictions for you automatically.
#
# .. your code here ..
print(knn.score(X_test, y_test))
#
# BONUS: Instead of the ordinal conversion, try and get this assignment
# working with a proper Pandas get_dummies for feature encoding. HINT:
# You might have to update some of the plotDecisionBoundary code.
plt.show()
| mit |
TTXSJie/syn_icb | gan_toy.py | 2 | 7739 | import os, sys
sys.path.append(os.getcwd())
import random
import time
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
import numpy as np
import tensorflow as tf
import sklearn.datasets
import tflib as lib
import tflib.ops.linear
import tflib.plot
MODE = 'wgan-gp' # wgan or wgan-gp
DATASET = '8gaussians' # 8gaussians, 25gaussians, swissroll
DIM = 512 # Model dimensionality
FIXED_GENERATOR = False # whether to hold the generator fixed at real data plus
# Gaussian noise, as in the plots in the paper
LAMBDA = .1 # Smaller lambda makes things faster for toy tasks, but isn't
# necessary if you increase CRITIC_ITERS enough
CRITIC_ITERS = 5 # How many critic iterations per generator iteration
BATCH_SIZE = 256 # Batch size
ITERS = 100000 # how many generator iterations to train for
lib.print_model_settings(locals().copy())
def ReLULayer(name, n_in, n_out, inputs):
output = lib.ops.linear.Linear(
name+'.Linear',
n_in,
n_out,
inputs,
initialization='he'
)
output = tf.nn.relu(output)
return output
def Generator(n_samples, real_data):
if FIXED_GENERATOR:
return real_data + (1.*tf.random_normal(tf.shape(real_data)))
else:
noise = tf.random_normal([n_samples, 2])
output = ReLULayer('Generator.1', 2, DIM, noise)
output = ReLULayer('Generator.2', DIM, DIM, output)
output = ReLULayer('Generator.3', DIM, DIM, output)
output = lib.ops.linear.Linear('Generator.4', DIM, 2, output)
return output
def Discriminator(inputs):
output = ReLULayer('Discriminator.1', 2, DIM, inputs)
output = ReLULayer('Discriminator.2', DIM, DIM, output)
output = ReLULayer('Discriminator.3', DIM, DIM, output)
output = lib.ops.linear.Linear('Discriminator.4', DIM, 1, output)
return tf.reshape(output, [-1])
real_data = tf.placeholder(tf.float32, shape=[None, 2])
fake_data = Generator(BATCH_SIZE, real_data)
disc_real = Discriminator(real_data)
disc_fake = Discriminator(fake_data)
# WGAN loss
disc_cost = tf.reduce_mean(disc_fake) - tf.reduce_mean(disc_real)
gen_cost = -tf.reduce_mean(disc_fake)
# WGAN gradient penalty
if MODE == 'wgan-gp':
alpha = tf.random_uniform(
shape=[BATCH_SIZE,1],
minval=0.,
maxval=1.
)
interpolates = alpha*real_data + ((1-alpha)*fake_data)
disc_interpolates = Discriminator(interpolates)
gradients = tf.gradients(disc_interpolates, [interpolates])[0]
slopes = tf.sqrt(tf.reduce_sum(tf.square(gradients), reduction_indices=[1]))
gradient_penalty = tf.reduce_mean((slopes-1)**2)
disc_cost += LAMBDA*gradient_penalty
disc_params = lib.params_with_name('Discriminator')
gen_params = lib.params_with_name('Generator')
if MODE == 'wgan-gp':
disc_train_op = tf.train.AdamOptimizer(
learning_rate=1e-4,
beta1=0.5,
beta2=0.9
).minimize(
disc_cost,
var_list=disc_params
)
if len(gen_params) > 0:
gen_train_op = tf.train.AdamOptimizer(
learning_rate=1e-4,
beta1=0.5,
beta2=0.9
).minimize(
gen_cost,
var_list=gen_params
)
else:
gen_train_op = tf.no_op()
else:
disc_train_op = tf.train.RMSPropOptimizer(learning_rate=5e-5).minimize(
disc_cost,
var_list=disc_params
)
if len(gen_params) > 0:
gen_train_op = tf.train.RMSPropOptimizer(learning_rate=5e-5).minimize(
gen_cost,
var_list=gen_params
)
else:
gen_train_op = tf.no_op()
# Build an op to do the weight clipping
clip_ops = []
for var in disc_params:
clip_bounds = [-.01, .01]
clip_ops.append(
tf.assign(
var,
tf.clip_by_value(var, clip_bounds[0], clip_bounds[1])
)
)
clip_disc_weights = tf.group(*clip_ops)
print "Generator params:"
for var in lib.params_with_name('Generator'):
print "\t{}\t{}".format(var.name, var.get_shape())
print "Discriminator params:"
for var in lib.params_with_name('Discriminator'):
print "\t{}\t{}".format(var.name, var.get_shape())
frame_index = [0]
def generate_image(true_dist):
"""
Generates and saves a plot of the true distribution, the generator, and the
critic.
"""
N_POINTS = 128
RANGE = 3
points = np.zeros((N_POINTS, N_POINTS, 2), dtype='float32')
points[:,:,0] = np.linspace(-RANGE, RANGE, N_POINTS)[:,None]
points[:,:,1] = np.linspace(-RANGE, RANGE, N_POINTS)[None,:]
points = points.reshape((-1,2))
samples, disc_map = session.run(
[fake_data, disc_real],
feed_dict={real_data:points}
)
disc_map = session.run(disc_real, feed_dict={real_data:points})
plt.clf()
x = y = np.linspace(-RANGE, RANGE, N_POINTS)
plt.contour(x,y,disc_map.reshape((len(x), len(y))).transpose())
plt.scatter(true_dist[:, 0], true_dist[:, 1], c='orange', marker='+')
plt.scatter(samples[:, 0], samples[:, 1], c='green', marker='+')
plt.savefig('frame'+str(frame_index[0])+'.jpg')
frame_index[0] += 1
# Dataset iterator
def inf_train_gen():
if DATASET == '25gaussians':
dataset = []
for i in xrange(100000/25):
for x in xrange(-2, 3):
for y in xrange(-2, 3):
point = np.random.randn(2)*0.05
point[0] += 2*x
point[1] += 2*y
dataset.append(point)
dataset = np.array(dataset, dtype='float32')
np.random.shuffle(dataset)
dataset /= 2.828 # stdev
while True:
for i in xrange(len(dataset)/BATCH_SIZE):
yield dataset[i*BATCH_SIZE:(i+1)*BATCH_SIZE]
elif DATASET == 'swissroll':
while True:
data = sklearn.datasets.make_swiss_roll(
n_samples=BATCH_SIZE,
noise=0.25
)[0]
data = data.astype('float32')[:, [0, 2]]
data /= 7.5 # stdev plus a little
yield data
elif DATASET == '8gaussians':
scale = 2.
centers = [
(1,0),
(-1,0),
(0,1),
(0,-1),
(1./np.sqrt(2), 1./np.sqrt(2)),
(1./np.sqrt(2), -1./np.sqrt(2)),
(-1./np.sqrt(2), 1./np.sqrt(2)),
(-1./np.sqrt(2), -1./np.sqrt(2))
]
centers = [(scale*x,scale*y) for x,y in centers]
while True:
dataset = []
for i in xrange(BATCH_SIZE):
point = np.random.randn(2)*.02
center = random.choice(centers)
point[0] += center[0]
point[1] += center[1]
dataset.append(point)
dataset = np.array(dataset, dtype='float32')
dataset /= 1.414 # stdev
yield dataset
# Train loop!
with tf.Session() as session:
session.run(tf.initialize_all_variables())
gen = inf_train_gen()
for iteration in xrange(ITERS):
# Train generator
if iteration > 0:
_ = session.run(gen_train_op)
# Train critic
for i in xrange(CRITIC_ITERS):
_data = gen.next()
_disc_cost, _ = session.run(
[disc_cost, disc_train_op],
feed_dict={real_data: _data}
)
if MODE == 'wgan':
_ = session.run([clip_disc_weights])
# Write logs and save samples
lib.plot.plot('disc cost', _disc_cost)
if iteration % 100 == 99:
lib.plot.flush()
generate_image(_data)
lib.plot.tick()
| mit |
EuroPython/ep-tools | eptools/docstamp_utils.py | 1 | 1868 |
"""
Functions to help using docstamp.
"""
import os
import os.path as path
from docstamp.file_utils import get_extension, cleanup_docstamp_output
from docstamp.template import XeLateXDocument
def xelatex_document(doc_args, template_file, field_name, output_dir="."):
""" Use docstamp to use xelatex to produce a XeLateX document
using `template_file`.
The output file will be saved in output_dir and its path returned.
The doc_args keys will be lower case and have the white spaces replaced by '_'.
Parameters
----------
doc_args: dict or pandas.DataFrame
A dictionary with the argument values to fill the `template_file`.
template_file: str
Path to the .tex template file.
field_name: str
Name of the field in `doc_args` that will be used for the
output file name and some checks.
output_dir: str
Path to the output folder.
Returns
-------
output_path
"""
# input data
def process_data_key(key):
return key.lower().replace(" ", "_").replace("(", "").replace(")", "")
input_data = {process_data_key(key): value for key, value in doc_args.items()}
# template doc
template_doc = XeLateXDocument(template_file)
# output file name
field_val = input_data[process_data_key(field_name)].replace(" ", "")
file_extension = get_extension(template_file)
basename = path.basename(template_file).replace(file_extension, "")
file_name = basename + "_" + field_val
file_path = path.join(output_dir, file_name + ".pdf")
# make output folder
if not os.path.exists(output_dir):
os.mkdir(output_dir)
# fill the template
template_doc.fill(input_data)
# save into PDF
template_doc.render(file_path)
# clean up LateX mess
cleanup_docstamp_output(output_dir)
return file_path
| mit |
IshankGulati/scikit-learn | doc/conf.py | 22 | 9789 | # -*- coding: utf-8 -*-
#
# scikit-learn documentation build configuration file, created by
# sphinx-quickstart on Fri Jan 8 09:13:42 2010.
#
# This file is execfile()d with the current directory set to its containing
# dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
from __future__ import print_function
import sys
import os
from sklearn.externals.six import u
# If extensions (or modules to document with autodoc) are in another
# directory, add these directories to sys.path here. If the directory
# is relative to the documentation root, use os.path.abspath to make it
# absolute, like shown here.
sys.path.insert(0, os.path.abspath('sphinxext'))
from github_link import make_linkcode_resolve
import sphinx_gallery
# -- General configuration ---------------------------------------------------
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = [
'sphinx.ext.autodoc', 'sphinx.ext.autosummary',
'numpy_ext.numpydoc',
'sphinx.ext.linkcode', 'sphinx.ext.doctest',
'sphinx_gallery.gen_gallery',
'sphinx_issues',
]
# pngmath / imgmath compatibility layer for different sphinx versions
import sphinx
from distutils.version import LooseVersion
if LooseVersion(sphinx.__version__) < LooseVersion('1.4'):
extensions.append('sphinx.ext.pngmath')
else:
extensions.append('sphinx.ext.imgmath')
autodoc_default_flags = ['members', 'inherited-members']
# Add any paths that contain templates here, relative to this directory.
templates_path = ['templates']
# generate autosummary even if no references
autosummary_generate = True
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8'
# Generate the plots for the gallery
plot_gallery = True
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u('scikit-learn')
copyright = u('2007 - 2017, scikit-learn developers (BSD License)')
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
import sklearn
version = sklearn.__version__
# The full version, including alpha/beta/rc tags.
release = sklearn.__version__
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of documents that shouldn't be included in the build.
#unused_docs = []
# List of directories, relative to source directory, that shouldn't be
# searched for source files.
exclude_trees = ['_build', 'templates', 'includes']
# The reST default role (used for this markup: `text`) to use for all
# documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
add_function_parentheses = False
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# -- Options for HTML output -------------------------------------------------
# The theme to use for HTML and HTML Help pages. Major themes that come with
# Sphinx are currently 'default' and 'sphinxdoc'.
html_theme = 'scikit-learn'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
html_theme_options = {'oldversion': False, 'collapsiblesidebar': True,
'google_analytics': True, 'surveybanner': False,
'sprintbanner': True}
# Add any paths that contain custom themes here, relative to this directory.
html_theme_path = ['themes']
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
html_short_title = 'scikit-learn'
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
html_logo = 'logos/scikit-learn-logo-small.png'
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
html_favicon = 'logos/favicon.ico'
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['images']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
html_domain_indices = False
# If false, no index is generated.
html_use_index = False
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# If nonempty, this is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = ''
# Output file base name for HTML help builder.
htmlhelp_basename = 'scikit-learndoc'
# -- Options for LaTeX output ------------------------------------------------
# The paper size ('letter' or 'a4').
#latex_paper_size = 'letter'
# The font size ('10pt', '11pt' or '12pt').
#latex_font_size = '10pt'
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass
# [howto/manual]).
latex_documents = [('index', 'user_guide.tex', u('scikit-learn user guide'),
u('scikit-learn developers'), 'manual'), ]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
latex_logo = "logos/scikit-learn-logo.png"
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# Additional stuff for the LaTeX preamble.
latex_preamble = r"""
\usepackage{amsmath}\usepackage{amsfonts}\usepackage{bm}\usepackage{morefloats}
\usepackage{enumitem} \setlistdepth{10}
"""
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
latex_domain_indices = False
trim_doctests_flags = True
sphinx_gallery_conf = {
'doc_module': 'sklearn',
'reference_url': {
'sklearn': None,
'matplotlib': 'http://matplotlib.org',
'numpy': 'http://docs.scipy.org/doc/numpy-1.6.0',
'scipy': 'http://docs.scipy.org/doc/scipy-0.11.0/reference',
'nibabel': 'http://nipy.org/nibabel'}
}
# The following dictionary contains the information used to create the
# thumbnails for the front page of the scikit-learn home page.
# key: first image in set
# values: (number of plot in set, height of thumbnail)
carousel_thumbs = {'sphx_glr_plot_classifier_comparison_001.png': 600,
'sphx_glr_plot_outlier_detection_003.png': 372,
'sphx_glr_plot_gpr_co2_001.png': 350,
'sphx_glr_plot_adaboost_twoclass_001.png': 372,
'sphx_glr_plot_compare_methods_001.png': 349}
def make_carousel_thumbs(app, exception):
"""produces the final resized carousel images"""
if exception is not None:
return
print('Preparing carousel images')
image_dir = os.path.join(app.builder.outdir, '_images')
for glr_plot, max_width in carousel_thumbs.items():
image = os.path.join(image_dir, glr_plot)
if os.path.exists(image):
c_thumb = os.path.join(image_dir, glr_plot[:-4] + '_carousel.png')
sphinx_gallery.gen_rst.scale_image(image, c_thumb, max_width, 190)
# Config for sphinx_issues
issues_uri = 'https://github.com/scikit-learn/scikit-learn/issues/{issue}'
issues_github_path = 'scikit-learn/scikit-learn'
issues_user_uri = 'https://github.com/{user}'
def setup(app):
# to hide/show the prompt in code examples:
app.add_javascript('js/copybutton.js')
app.connect('build-finished', make_carousel_thumbs)
# The following is used by sphinx.ext.linkcode to provide links to github
linkcode_resolve = make_linkcode_resolve('sklearn',
u'https://github.com/scikit-learn/'
'scikit-learn/blob/{revision}/'
'{package}/{path}#L{lineno}')
| bsd-3-clause |
evenmarbles/mlpy | mlpy/planners/discrete.py | 1 | 12392 | from __future__ import division, print_function, absolute_import
import math
from datetime import datetime
import numpy as np
from matplotlib import pyplot as plt
from matplotlib.backends.backend_pdf import PdfPages
from . import IPlanner
from .explorers.discrete import DiscreteExplorer
from ..tools.log import LoggingMgr
from ..tools.misc import Waiting
class ValueIteration(IPlanner):
""" Planning through value Iteration.
Parameters
----------
model : DiscreteModel
The Markov decision model.
explorer : Explorer, optional
The exploration strategy to employ. Available explorers are:
:class:`.EGreedyExplorer`
With :math:`\\epsilon` probability, a random action is
chosen, otherwise the action resulting in the highest
q-value is selected.
:class:`.SoftmaxExplorer`
The softmax explorer varies the action probability as a
graded function of estimated value. The greedy action is
still given the highest selection probability, but all the others
are ranked and weighted according to their value estimates.
By default no explorer is used and the greedy action is chosen.
gamma : float, optional
The discount factor. Default is 0.9.
ignore_unreachable : bool, optional
Whether to ignore unreachable states or not. Unreachability is determined
by how many steps a state is are away from the closest neighboring state.
Default is False.
Raises
------
AttributeError
If both the Markov model and the planner define an explorer.
Only one explorer can be specified.
"""
MAX_STEPS = 100
@property
def model(self):
""" The Markov decision process model.
The Markov decision process model contain information about
the states, actions, and their transitions and the reward
function.
Returns
-------
IMDPModel :
The model.
"""
return self._model
def __init__(self, model, explorer=None, gamma=None, ignore_unreachable=False):
super(ValueIteration, self).__init__(explorer)
self._logger = LoggingMgr().get_logger(self._mid)
self._plot_num = 0
self._model = model
""":type: IMDPModel"""
if self._explorer is not None:
variable = getattr(self._model, "_explorer", None)
if variable is not None:
raise AttributeError("There can be only one explorer. Either based on the model or of the planner.")
if self._explorer is None:
self._explorer = DiscreteExplorer()
self._gamma = 0.9 if gamma is None else gamma
self._ignore_unreachable = ignore_unreachable if ignore_unreachable is not None else False
def __getstate__(self):
data = super(ValueIteration, self).__getstate__()
data.update({
'_model': self._model,
'_explorer': self._explorer,
'_gamma': self._gamma,
'_ignore_unreachable': self._ignore_unreachable,
'_plot_num': self._plot_num
})
return data
def __setstate__(self, d):
super(ValueIteration, self).__setstate__(d)
for name, value in d.iteritems():
setattr(self, name, value)
self._logger = LoggingMgr().get_logger(self._mid)
def activate_exploration(self):
"""Turn the explorer on."""
super(ValueIteration, self).activate_exploration()
func = getattr(self._model, "activate_exploration", None)
if callable(func):
func()
def deactivate_exploration(self):
"""Turn the explorer off."""
super(ValueIteration, self).deactivate_exploration()
func = getattr(self._model, "deactivate_exploration", None)
if callable(func):
func()
def get_best_action(self, state):
"""Choose the best next action for the agent to take.
Parameters
----------
state : State
The state for which to choose the action for.
Returns
-------
Action :
The best action.
"""
self._model.add_state(state)
actions = self._model.get_actions(state)
info = self._model.statespace[state]
action = self._explorer.choose_action(actions, [info.q[a] for a in actions])
self._logger.debug("state=%s\tact=%s\tvalue=%.2f", state, action, self._model.statespace[state].q[action])
return action
def plan(self):
"""Plan for the optimal policy.
Perform value iteration and build the Q-table.
"""
if self._ignore_unreachable:
self._calculate_reachable_states()
nloops = 0
max_error = 5000
min_error = 0.1
states_updated = 0
waiting = None
if self._logger.level > LoggingMgr.LOG_DEBUG:
waiting = Waiting("Perform value iteration")
waiting.start()
s0 = datetime.now()
while max_error > min_error:
self._logger.debug("max error: %0.5f nloops: %d", max_error, nloops)
max_error = 0
nloops += 1
for state in self._model.statespace.keys():
info = self._model.statespace[state]
self._logger.debug("\tState: id: %d: %s, Steps: %d", info.id, state, info.steps_away)
states_updated += 1
if self._ignore_unreachable and info.steps_away > 99999:
self._logger.debug("\tState not reachable, ignoring")
continue
for action, mdl in info.models.iteritems():
newq = mdl.reward_func.get(state)
for state2, prob in mdl.transition_proba:
self._logger.debug("\t\tNext state is: %s, prob: %.2f", state2, prob)
real_state = state2.is_valid()
next_state = state2
if not real_state:
next_state = state
elif self._ignore_unreachable and info.steps_away >= ValueIteration.MAX_STEPS:
next_state = state
else:
self._model.add_state(next_state)
info2 = self._model.statespace[next_state]
next_steps = info.steps_away + 1
if next_steps < info2.steps_away:
info2.steps_away = next_steps
maxq = max([info2.q[a] for a in self._model.get_actions(state2)])
newq += self._gamma * prob * maxq
tderror = math.fabs(info.q[action] - newq)
info.q[action] = newq
if tderror > max_error:
max_error = tderror
self._logger.debug("\t\tTD error: %.5f Max error: %.5f", tderror, max_error)
s1 = datetime.now()
delta = s1 - s0
if waiting is not None:
waiting.stop()
self._logger.info("\tvalues computed with maxError: %.5f nloops: %d time: %d:%d states: %d", max_error, nloops,
delta.seconds, delta.microseconds, states_updated)
self._remove_unreachable_states()
# noinspection PyShadowingNames
def visualize(self):
"""Visualize of the planning data.
The results in the Q table are visualized via a heat map.
"""
nrows = 30
actions = self._model.get_actions()
ncols = len(actions)
num_states = len(self._model.statespace)
data = np.zeros((num_states, len(actions)))
ylabels = [None] * num_states
for state, info in self._model.statespace.iteritems():
ylabels[info.id - 1] = state # TODO: check if that is correct: .encode()
for i, act in enumerate(actions):
data[info.id - 1][i] = info.q[act]
decorated = [(i, tup[0], tup) for i, tup in enumerate(ylabels)]
decorated.sort(key=lambda tup: tup[1])
ylabels = [tup for i, second, tup in decorated]
indices = [i for i, second, tup in decorated]
data = np.array([data[i] for i in indices])
self._logger.debug("Q-table data".format(data[::-1]))
h, w = data.shape
nsubplots = int(math.ceil(h / float(nrows)))
diff = (nsubplots * nrows) - h
# noinspection PyTypeChecker
data = np.lib.pad(data, ((0, diff), (0, 0)), 'constant', constant_values=0)
# noinspection PyTypeChecker
ylabels.extend([""] * diff)
h, w = data.shape
# noinspection PyArgumentList,PyTypeChecker
sdata = (data.reshape(h // nrows, nrows, -1, ncols)
.swapaxes(1, 2)
.reshape(-1, nrows, ncols))
dt = datetime.now().strftime("%Y-%m-%d %H-%M-%S")
with PdfPages("savedata/figures/plot {0}.pdf".format(dt)) as pdf:
fig, axes = plt.subplots(1, nsubplots, figsize=(10, 7), tight_layout=True)
if nsubplots > 1:
for i, ax in enumerate(axes.flat):
self._add_subplot(fig, ax, sdata[i], ylabels[i * nrows:i * nrows + nrows])
else:
self._add_subplot(fig, axes, sdata[0], ylabels[0:nrows])
fig.subplots_adjust(right=1.2, top=0.2)
fig.suptitle("Plot #{0}".format(self._plot_num + 1), fontsize=10)
self._plot_num += 1
pdf.savefig()
plt.close()
def _create_policy(self, func=None):
"""Creates a policy (i.e., a state-action association).
Parameters
----------
func : callable, optional
A callback function for mixing policies.
"""
policy = {}
# noinspection PyUnresolvedReferences
if func and self._history and len(self._history.itervalues().next()) >= 2:
lmda = np.cumsum(func(), dtype=float)
for state, info in self._model.statespace.iteritems():
idx = np.argmax(lmda > np.random.random())
policy[state] = [self._history[state][idx]]
else:
for state, info in self._model.statespace.iteritems():
policy[state] = [self.get_best_action(state)]
return policy
def _calculate_reachable_states(self):
"""Identify the reachable states."""
for state, info in self._model.statespace.iteritems():
info.steps_away = 100000
for mdl in info.models.values():
if mdl.visits > 0:
info.steps_away = 0
break
def _remove_unreachable_states(self):
"""Remove unreachable states."""
if False and self._ignore_unreachable:
for state in self._model.statespace.keys():
info = self._model.statespace[state]
if info.steps_away > ValueIteration.MAX_STEPS:
self._model.statespace.pop(state, None)
# noinspection PyMethodMayBeStatic
def _add_subplot(self, fig, ax, data, ylabels):
"""Add a subplot."""
h, w = data.shape
# noinspection PyUnresolvedReferences
heatmap = ax.pcolormesh(data,
edgecolors='w', # put white lines between squares in heatmap
cmap=plt.cm.Blues)
ax.autoscale(tight=True) # get rid of whitespace in margins of heatmap
ax.set_aspect('equal') # ensure heatmap cells are square
ax.tick_params(bottom='on', top='off', left='on', right='off') # turn off ticks
ax.set_yticks(np.arange(h) + 0.5)
ax.set_yticklabels(np.arange(1, h + 1), size=7)
ax.set_xticks(np.arange(w) + 0.5)
ax.set_xticklabels(np.arange(1, w + 1), size=7)
from mpl_toolkits.axes_grid1 import make_axes_locatable
divider = make_axes_locatable(ax)
cax = divider.append_axes("right", "20%", pad="15%")
cbar = fig.colorbar(heatmap, cax=cax)
cbar.ax.tick_params(labelsize=7)
# Set the labels
ax.set_xticklabels(self._model.get_actions(), minor=False, rotation=90)
ax.set_yticklabels(ylabels, minor=False)
| mit |
gfyoung/pandas | pandas/tests/window/test_numba.py | 1 | 9535 | import numpy as np
import pytest
from pandas.errors import NumbaUtilError
import pandas.util._test_decorators as td
from pandas import DataFrame, Series, option_context
import pandas._testing as tm
from pandas.core.util.numba_ import NUMBA_FUNC_CACHE
@td.skip_if_no("numba", "0.46.0")
@pytest.mark.filterwarnings("ignore:\\nThe keyword argument")
# Filter warnings when parallel=True and the function can't be parallelized by Numba
class TestEngine:
@pytest.mark.parametrize("jit", [True, False])
def test_numba_vs_cython_apply(self, jit, nogil, parallel, nopython, center):
def f(x, *args):
arg_sum = 0
for arg in args:
arg_sum += arg
return np.mean(x) + arg_sum
if jit:
import numba
f = numba.jit(f)
engine_kwargs = {"nogil": nogil, "parallel": parallel, "nopython": nopython}
args = (2,)
s = Series(range(10))
result = s.rolling(2, center=center).apply(
f, args=args, engine="numba", engine_kwargs=engine_kwargs, raw=True
)
expected = s.rolling(2, center=center).apply(
f, engine="cython", args=args, raw=True
)
tm.assert_series_equal(result, expected)
def test_numba_vs_cython_rolling_methods(
self, nogil, parallel, nopython, arithmetic_numba_supported_operators
):
method = arithmetic_numba_supported_operators
engine_kwargs = {"nogil": nogil, "parallel": parallel, "nopython": nopython}
df = DataFrame(np.eye(5))
roll = df.rolling(2)
result = getattr(roll, method)(engine="numba", engine_kwargs=engine_kwargs)
expected = getattr(roll, method)(engine="cython")
# Check the cache
assert (getattr(np, f"nan{method}"), "Rolling_apply_single") in NUMBA_FUNC_CACHE
tm.assert_frame_equal(result, expected)
def test_numba_vs_cython_expanding_methods(
self, nogil, parallel, nopython, arithmetic_numba_supported_operators
):
method = arithmetic_numba_supported_operators
engine_kwargs = {"nogil": nogil, "parallel": parallel, "nopython": nopython}
df = DataFrame(np.eye(5))
expand = df.expanding()
result = getattr(expand, method)(engine="numba", engine_kwargs=engine_kwargs)
expected = getattr(expand, method)(engine="cython")
# Check the cache
assert (
getattr(np, f"nan{method}"),
"Expanding_apply_single",
) in NUMBA_FUNC_CACHE
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize("jit", [True, False])
def test_cache_apply(self, jit, nogil, parallel, nopython):
# Test that the functions are cached correctly if we switch functions
def func_1(x):
return np.mean(x) + 4
def func_2(x):
return np.std(x) * 5
if jit:
import numba
func_1 = numba.jit(func_1)
func_2 = numba.jit(func_2)
engine_kwargs = {"nogil": nogil, "parallel": parallel, "nopython": nopython}
roll = Series(range(10)).rolling(2)
result = roll.apply(
func_1, engine="numba", engine_kwargs=engine_kwargs, raw=True
)
expected = roll.apply(func_1, engine="cython", raw=True)
tm.assert_series_equal(result, expected)
# func_1 should be in the cache now
assert (func_1, "Rolling_apply_single") in NUMBA_FUNC_CACHE
result = roll.apply(
func_2, engine="numba", engine_kwargs=engine_kwargs, raw=True
)
expected = roll.apply(func_2, engine="cython", raw=True)
tm.assert_series_equal(result, expected)
# This run should use the cached func_1
result = roll.apply(
func_1, engine="numba", engine_kwargs=engine_kwargs, raw=True
)
expected = roll.apply(func_1, engine="cython", raw=True)
tm.assert_series_equal(result, expected)
@td.skip_if_no("numba", "0.46.0")
class TestGroupbyEWMMean:
def test_invalid_engine(self):
df = DataFrame({"A": ["a", "b", "a", "b"], "B": range(4)})
with pytest.raises(ValueError, match="engine must be either"):
df.groupby("A").ewm(com=1.0).mean(engine="foo")
def test_invalid_engine_kwargs(self):
df = DataFrame({"A": ["a", "b", "a", "b"], "B": range(4)})
with pytest.raises(ValueError, match="cython engine does not"):
df.groupby("A").ewm(com=1.0).mean(
engine="cython", engine_kwargs={"nopython": True}
)
def test_cython_vs_numba(self, nogil, parallel, nopython, ignore_na, adjust):
df = DataFrame({"A": ["a", "b", "a", "b"], "B": range(4)})
gb_ewm = df.groupby("A").ewm(com=1.0, adjust=adjust, ignore_na=ignore_na)
engine_kwargs = {"nogil": nogil, "parallel": parallel, "nopython": nopython}
result = gb_ewm.mean(engine="numba", engine_kwargs=engine_kwargs)
expected = gb_ewm.mean(engine="cython")
tm.assert_frame_equal(result, expected)
@td.skip_if_no("numba", "0.46.0")
def test_use_global_config():
def f(x):
return np.mean(x) + 2
s = Series(range(10))
with option_context("compute.use_numba", True):
result = s.rolling(2).apply(f, engine=None, raw=True)
expected = s.rolling(2).apply(f, engine="numba", raw=True)
tm.assert_series_equal(expected, result)
@td.skip_if_no("numba", "0.46.0")
def test_invalid_kwargs_nopython():
with pytest.raises(NumbaUtilError, match="numba does not support kwargs with"):
Series(range(1)).rolling(1).apply(
lambda x: x, kwargs={"a": 1}, engine="numba", raw=True
)
@td.skip_if_no("numba", "0.46.0")
@pytest.mark.slow
@pytest.mark.filterwarnings("ignore:\\nThe keyword argument")
# Filter warnings when parallel=True and the function can't be parallelized by Numba
class TestTableMethod:
def test_table_series_valueerror(self):
def f(x):
return np.sum(x, axis=0) + 1
with pytest.raises(
ValueError, match="method='table' not applicable for Series objects."
):
Series(range(1)).rolling(1, method="table").apply(
f, engine="numba", raw=True
)
def test_table_method_rolling_methods(
self, axis, nogil, parallel, nopython, arithmetic_numba_supported_operators
):
method = arithmetic_numba_supported_operators
engine_kwargs = {"nogil": nogil, "parallel": parallel, "nopython": nopython}
df = DataFrame(np.eye(3))
result = getattr(
df.rolling(2, method="table", axis=axis, min_periods=0), method
)(engine_kwargs=engine_kwargs, engine="numba")
expected = getattr(
df.rolling(2, method="single", axis=axis, min_periods=0), method
)(engine_kwargs=engine_kwargs, engine="numba")
tm.assert_frame_equal(result, expected)
def test_table_method_rolling_apply(self, axis, nogil, parallel, nopython):
engine_kwargs = {"nogil": nogil, "parallel": parallel, "nopython": nopython}
def f(x):
return np.sum(x, axis=0) + 1
df = DataFrame(np.eye(3))
result = df.rolling(2, method="table", axis=axis, min_periods=0).apply(
f, raw=True, engine_kwargs=engine_kwargs, engine="numba"
)
expected = df.rolling(2, method="single", axis=axis, min_periods=0).apply(
f, raw=True, engine_kwargs=engine_kwargs, engine="numba"
)
tm.assert_frame_equal(result, expected)
def test_table_method_rolling_weighted_mean(self):
def weighted_mean(x):
arr = np.ones((1, x.shape[1]))
arr[:, :2] = (x[:, :2] * x[:, 2]).sum(axis=0) / x[:, 2].sum()
return arr
df = DataFrame([[1, 2, 0.6], [2, 3, 0.4], [3, 4, 0.2], [4, 5, 0.7]])
result = df.rolling(2, method="table", min_periods=0).apply(
weighted_mean, raw=True, engine="numba"
)
expected = DataFrame(
[
[1.0, 2.0, 1.0],
[1.8, 2.0, 1.0],
[3.333333, 2.333333, 1.0],
[1.555556, 7, 1.0],
]
)
tm.assert_frame_equal(result, expected)
def test_table_method_expanding_apply(self, axis, nogil, parallel, nopython):
engine_kwargs = {"nogil": nogil, "parallel": parallel, "nopython": nopython}
def f(x):
return np.sum(x, axis=0) + 1
df = DataFrame(np.eye(3))
result = df.expanding(method="table", axis=axis).apply(
f, raw=True, engine_kwargs=engine_kwargs, engine="numba"
)
expected = df.expanding(method="single", axis=axis).apply(
f, raw=True, engine_kwargs=engine_kwargs, engine="numba"
)
tm.assert_frame_equal(result, expected)
def test_table_method_expanding_methods(
self, axis, nogil, parallel, nopython, arithmetic_numba_supported_operators
):
method = arithmetic_numba_supported_operators
engine_kwargs = {"nogil": nogil, "parallel": parallel, "nopython": nopython}
df = DataFrame(np.eye(3))
result = getattr(df.expanding(method="table", axis=axis), method)(
engine_kwargs=engine_kwargs, engine="numba"
)
expected = getattr(df.expanding(method="single", axis=axis), method)(
engine_kwargs=engine_kwargs, engine="numba"
)
tm.assert_frame_equal(result, expected)
| bsd-3-clause |
rfdougherty/dipy | scratch/very_scratch/spherical_statistics.py | 20 | 5695 | import numpy as np
import dipy.core.meshes as meshes
import get_vertices as gv
from dipy.core.triangle_subdivide import create_unit_sphere
#from dipy.viz import fos
#from dipy.io import dicomreaders as dcm
#import dipy.core.geometry as geometry
#import matplotlib.pyplot as mplp
import dipy.core.sphere_plots as splot
# set up a dictionary of sphere points that are in use EITHER as a set
# directions for diffusion weighted acquisitions OR as a set of
# evaluation points for an ODF (orientation distribution function.
sphere_dic = {'fy362': {'filepath' : '/home/ian/Devel/dipy/dipy/core/data/evenly_distributed_sphere_362.npz', 'object': 'npz', 'vertices': 'vertices', 'omit': 0, 'hemi': False},
'fy642': {'filepath' : '/home/ian/Devel/dipy/dipy/core/data/evenly_distributed_sphere_642.npz', 'object': 'npz', 'vertices': 'odf_vertices', 'omit': 0, 'hemi': False},
'siem64': {'filepath':'/home/ian/Devel/dipy/dipy/core/tests/data/small_64D.gradients.npy', 'object': 'npy', 'omit': 1, 'hemi': True},
'create2': {},
'create3': {},
'create4': {},
'create5': {},
'create6': {},
'create7': {},
'create8': {},
'create9': {},
'marta200': {'filepath': '/home/ian/Data/Spheres/200.npy', 'object': 'npy', 'omit': 0, 'hemi': True},
'dsi101': {'filepath': '/home/ian/Data/Frank_Eleftherios/frank/20100511_m030y_cbu100624/08_ep2d_advdiff_101dir_DSI', 'object': 'dicom', 'omit': 0, 'hemi': True}}
def plot_sphere(v,key):
r = fos.ren()
fos.add(r,fos.point(v,fos.green, point_radius= 0.01))
fos.show(r, title=key, size=(1000,1000))
def plot_lambert(v,key):
lamb = geometry.lambert_equal_area_projection_cart(*v.T).T
(y1,y2) = lamb
radius = np.sum(lamb**2,axis=0) < 1
#print inner
#print y1[inner]
#print y1[-inner]
figure = mplp.figure(facecolor='w')
current = figure.add_subplot(111)
current.patch.set_color('k')
current.plot(y1[radius],y2[radius],'.g')
current.plot(y1[-radius],y2[-radius],'.r')
current.axes.set_aspect(aspect = 'equal', adjustable = 'box')
figure.show()
figure.waitforbuttonpress()
mplp.close()
def get_vertex_set(key):
if key[:6] == 'create':
number = eval(key[6:])
vertices, edges, faces = create_unit_sphere(number)
omit = 0
else:
entry = sphere_dic[key]
#print entry
if entry.has_key('omit'):
omit = entry['omit']
else:
omit = 0
filepath = entry['filepath']
if entry['object'] == 'npz':
filearray = np.load(filepath)
vertices = filearray[entry['vertices']]
elif sphere_dic[key]['object'] == 'npy':
vertices = np.load(filepath)
elif entry['object'] == 'dicom':
data,affine,bvals,gradients=dcm.read_mosaic_dir(filepath)
#print (bvals.shape, gradients.shape)
grad3 = np.vstack((bvals,bvals,bvals)).transpose()
#print grad3.shape
#vertices = grad3*gradients
vertices = gradients
if omit > 0:
vertices = vertices[omit:,:]
if entry['hemi']:
vertices = np.vstack([vertices, -vertices])
print key, ': number of vertices = ', vertices.shape[0], '(drop ',omit,')'
return vertices[omit:,:]
xup=np.array([ 1,0,0])
xdn=np.array([-1,0,0])
yup=np.array([0, 1,0])
ydn=np.array([0,-1,0])
zup=np.array([0,0, 1])
zdn=np.array([0,0,-1])
#for key in sphere_dic:
#for key in ['siem64']:
for key in ['fy642']:
v = gv.get_vertex_set(key)
splot.plot_sphere(v,key)
splot.plot_lambert(v,key,centre=np.array([0.,0.]))
equat, polar = meshes.spherical_statistics(v,north=xup,width=0.2)
l = 2.*len(v)
equat = equat/l
polar = polar/l
print '%6.3f %6.3f %6.3f %6.3f' % (equat.min(), equat.mean(), equat.max(), np.sqrt(equat.var()))
print '%6.3f %6.3f %6.3f %6.3f' % (polar.min(), polar.mean(), polar.max(), np.sqrt(polar.var()))
def spherical_statistics(vertices, north=np.array([0,0,1]), width=0.02):
'''
function to evaluate a spherical triangulation by looking at the
variability of numbers of vertices in 'vertices' in equatorial bands
of width 'width' orthogonal to each point in 'vertices'
'''
equatorial_counts = np.array([len(equatorial_zone_vertices(vertices, pole, width=width)) for pole in vertices if np.dot(pole,north) >= 0])
#equatorial_counts = np.bincount(equatorial_counts)
#args = np.where(equatorial_counts>0)
#print zip(list(args[0]), equatorial_counts[args])
polar_counts = np.array([len(polar_zone_vertices(vertices, pole, width=width)) for pole in vertices if np.dot(pole,north) >= 0])
#unique_counts = np.sort(np.array(list(set(equatorial_counts))))
#polar_counts = np.bincount(polar_counts)
#counts_tokens = [(uc, bin_counts[uc]) for uc in bin_counts if ]
#args = np.where(polar_counts>0)
#print '(number, frequency):', zip(unique_counts,tokens)
#print '(number, frequency):', counts_tokens
#print zip(args, bin_counts[args])
#print zip(list(args[0]), polar_counts[args])
return equatorial_counts, polar_counts
def spherical_proportion(zone_width):
# assuming radius is 1: (2*np.pi*zone_width)/(4*np.pi)
# 0 <= zone_width <= 2
return zone_width/2.
def angle_for_zone(zone_width):
return np.arcsin(zone_width/2.)
def coarseness(faces):
faces = np.asarray(faces)
coarseness = 0.0
for face in faces:
a, b, c = face
coarse = np.max(coarse, geom.circumradius(a,b,c))
return coarse
| bsd-3-clause |
fzalkow/scikit-learn | sklearn/kernel_ridge.py | 155 | 6545 | """Module :mod:`sklearn.kernel_ridge` implements kernel ridge regression."""
# Authors: Mathieu Blondel <[email protected]>
# Jan Hendrik Metzen <[email protected]>
# License: BSD 3 clause
import numpy as np
from .base import BaseEstimator, RegressorMixin
from .metrics.pairwise import pairwise_kernels
from .linear_model.ridge import _solve_cholesky_kernel
from .utils import check_X_y
from .utils.validation import check_is_fitted
class KernelRidge(BaseEstimator, RegressorMixin):
"""Kernel ridge regression.
Kernel ridge regression (KRR) combines ridge regression (linear least
squares with l2-norm regularization) with the kernel trick. It thus
learns a linear function in the space induced by the respective kernel and
the data. For non-linear kernels, this corresponds to a non-linear
function in the original space.
The form of the model learned by KRR is identical to support vector
regression (SVR). However, different loss functions are used: KRR uses
squared error loss while support vector regression uses epsilon-insensitive
loss, both combined with l2 regularization. In contrast to SVR, fitting a
KRR model can be done in closed-form and is typically faster for
medium-sized datasets. On the other hand, the learned model is non-sparse
and thus slower than SVR, which learns a sparse model for epsilon > 0, at
prediction-time.
This estimator has built-in support for multi-variate regression
(i.e., when y is a 2d-array of shape [n_samples, n_targets]).
Read more in the :ref:`User Guide <kernel_ridge>`.
Parameters
----------
alpha : {float, array-like}, shape = [n_targets]
Small positive values of alpha improve the conditioning of the problem
and reduce the variance of the estimates. Alpha corresponds to
``(2*C)^-1`` in other linear models such as LogisticRegression or
LinearSVC. If an array is passed, penalties are assumed to be specific
to the targets. Hence they must correspond in number.
kernel : string or callable, default="linear"
Kernel mapping used internally. A callable should accept two arguments
and the keyword arguments passed to this object as kernel_params, and
should return a floating point number.
gamma : float, default=None
Gamma parameter for the RBF, polynomial, exponential chi2 and
sigmoid kernels. Interpretation of the default value is left to
the kernel; see the documentation for sklearn.metrics.pairwise.
Ignored by other kernels.
degree : float, default=3
Degree of the polynomial kernel. Ignored by other kernels.
coef0 : float, default=1
Zero coefficient for polynomial and sigmoid kernels.
Ignored by other kernels.
kernel_params : mapping of string to any, optional
Additional parameters (keyword arguments) for kernel function passed
as callable object.
Attributes
----------
dual_coef_ : array, shape = [n_features] or [n_targets, n_features]
Weight vector(s) in kernel space
X_fit_ : {array-like, sparse matrix}, shape = [n_samples, n_features]
Training data, which is also required for prediction
References
----------
* Kevin P. Murphy
"Machine Learning: A Probabilistic Perspective", The MIT Press
chapter 14.4.3, pp. 492-493
See also
--------
Ridge
Linear ridge regression.
SVR
Support Vector Regression implemented using libsvm.
Examples
--------
>>> from sklearn.kernel_ridge import KernelRidge
>>> import numpy as np
>>> n_samples, n_features = 10, 5
>>> rng = np.random.RandomState(0)
>>> y = rng.randn(n_samples)
>>> X = rng.randn(n_samples, n_features)
>>> clf = KernelRidge(alpha=1.0)
>>> clf.fit(X, y) # doctest: +NORMALIZE_WHITESPACE
KernelRidge(alpha=1.0, coef0=1, degree=3, gamma=None, kernel='linear',
kernel_params=None)
"""
def __init__(self, alpha=1, kernel="linear", gamma=None, degree=3, coef0=1,
kernel_params=None):
self.alpha = alpha
self.kernel = kernel
self.gamma = gamma
self.degree = degree
self.coef0 = coef0
self.kernel_params = kernel_params
def _get_kernel(self, X, Y=None):
if callable(self.kernel):
params = self.kernel_params or {}
else:
params = {"gamma": self.gamma,
"degree": self.degree,
"coef0": self.coef0}
return pairwise_kernels(X, Y, metric=self.kernel,
filter_params=True, **params)
@property
def _pairwise(self):
return self.kernel == "precomputed"
def fit(self, X, y=None, sample_weight=None):
"""Fit Kernel Ridge regression model
Parameters
----------
X : {array-like, sparse matrix}, shape = [n_samples, n_features]
Training data
y : array-like, shape = [n_samples] or [n_samples, n_targets]
Target values
sample_weight : float or numpy array of shape [n_samples]
Individual weights for each sample, ignored if None is passed.
Returns
-------
self : returns an instance of self.
"""
# Convert data
X, y = check_X_y(X, y, accept_sparse=("csr", "csc"), multi_output=True,
y_numeric=True)
K = self._get_kernel(X)
alpha = np.atleast_1d(self.alpha)
ravel = False
if len(y.shape) == 1:
y = y.reshape(-1, 1)
ravel = True
copy = self.kernel == "precomputed"
self.dual_coef_ = _solve_cholesky_kernel(K, y, alpha,
sample_weight,
copy)
if ravel:
self.dual_coef_ = self.dual_coef_.ravel()
self.X_fit_ = X
return self
def predict(self, X):
"""Predict using the the kernel ridge model
Parameters
----------
X : {array-like, sparse matrix}, shape = [n_samples, n_features]
Samples.
Returns
-------
C : array, shape = [n_samples] or [n_samples, n_targets]
Returns predicted values.
"""
check_is_fitted(self, ["X_fit_", "dual_coef_"])
K = self._get_kernel(X, self.X_fit_)
return np.dot(K, self.dual_coef_)
| bsd-3-clause |
IndraVikas/scikit-learn | sklearn/linear_model/omp.py | 127 | 30417 | """Orthogonal matching pursuit algorithms
"""
# Author: Vlad Niculae
#
# License: BSD 3 clause
import warnings
from distutils.version import LooseVersion
import numpy as np
from scipy import linalg
from scipy.linalg.lapack import get_lapack_funcs
from .base import LinearModel, _pre_fit
from ..base import RegressorMixin
from ..utils import as_float_array, check_array, check_X_y
from ..cross_validation import check_cv
from ..externals.joblib import Parallel, delayed
import scipy
solve_triangular_args = {}
if LooseVersion(scipy.__version__) >= LooseVersion('0.12'):
# check_finite=False is an optimization available only in scipy >=0.12
solve_triangular_args = {'check_finite': False}
premature = """ Orthogonal matching pursuit ended prematurely due to linear
dependence in the dictionary. The requested precision might not have been met.
"""
def _cholesky_omp(X, y, n_nonzero_coefs, tol=None, copy_X=True,
return_path=False):
"""Orthogonal Matching Pursuit step using the Cholesky decomposition.
Parameters
----------
X : array, shape (n_samples, n_features)
Input dictionary. Columns are assumed to have unit norm.
y : array, shape (n_samples,)
Input targets
n_nonzero_coefs : int
Targeted number of non-zero elements
tol : float
Targeted squared error, if not None overrides n_nonzero_coefs.
copy_X : bool, optional
Whether the design matrix X must be copied by the algorithm. A false
value is only helpful if X is already Fortran-ordered, otherwise a
copy is made anyway.
return_path : bool, optional. Default: False
Whether to return every value of the nonzero coefficients along the
forward path. Useful for cross-validation.
Returns
-------
gamma : array, shape (n_nonzero_coefs,)
Non-zero elements of the solution
idx : array, shape (n_nonzero_coefs,)
Indices of the positions of the elements in gamma within the solution
vector
coef : array, shape (n_features, n_nonzero_coefs)
The first k values of column k correspond to the coefficient value
for the active features at that step. The lower left triangle contains
garbage. Only returned if ``return_path=True``.
n_active : int
Number of active features at convergence.
"""
if copy_X:
X = X.copy('F')
else: # even if we are allowed to overwrite, still copy it if bad order
X = np.asfortranarray(X)
min_float = np.finfo(X.dtype).eps
nrm2, swap = linalg.get_blas_funcs(('nrm2', 'swap'), (X,))
potrs, = get_lapack_funcs(('potrs',), (X,))
alpha = np.dot(X.T, y)
residual = y
gamma = np.empty(0)
n_active = 0
indices = np.arange(X.shape[1]) # keeping track of swapping
max_features = X.shape[1] if tol is not None else n_nonzero_coefs
if solve_triangular_args:
# new scipy, don't need to initialize because check_finite=False
L = np.empty((max_features, max_features), dtype=X.dtype)
else:
# old scipy, we need the garbage upper triangle to be non-Inf
L = np.zeros((max_features, max_features), dtype=X.dtype)
L[0, 0] = 1.
if return_path:
coefs = np.empty_like(L)
while True:
lam = np.argmax(np.abs(np.dot(X.T, residual)))
if lam < n_active or alpha[lam] ** 2 < min_float:
# atom already selected or inner product too small
warnings.warn(premature, RuntimeWarning, stacklevel=2)
break
if n_active > 0:
# Updates the Cholesky decomposition of X' X
L[n_active, :n_active] = np.dot(X[:, :n_active].T, X[:, lam])
linalg.solve_triangular(L[:n_active, :n_active],
L[n_active, :n_active],
trans=0, lower=1,
overwrite_b=True,
**solve_triangular_args)
v = nrm2(L[n_active, :n_active]) ** 2
if 1 - v <= min_float: # selected atoms are dependent
warnings.warn(premature, RuntimeWarning, stacklevel=2)
break
L[n_active, n_active] = np.sqrt(1 - v)
X.T[n_active], X.T[lam] = swap(X.T[n_active], X.T[lam])
alpha[n_active], alpha[lam] = alpha[lam], alpha[n_active]
indices[n_active], indices[lam] = indices[lam], indices[n_active]
n_active += 1
# solves LL'x = y as a composition of two triangular systems
gamma, _ = potrs(L[:n_active, :n_active], alpha[:n_active], lower=True,
overwrite_b=False)
if return_path:
coefs[:n_active, n_active - 1] = gamma
residual = y - np.dot(X[:, :n_active], gamma)
if tol is not None and nrm2(residual) ** 2 <= tol:
break
elif n_active == max_features:
break
if return_path:
return gamma, indices[:n_active], coefs[:, :n_active], n_active
else:
return gamma, indices[:n_active], n_active
def _gram_omp(Gram, Xy, n_nonzero_coefs, tol_0=None, tol=None,
copy_Gram=True, copy_Xy=True, return_path=False):
"""Orthogonal Matching Pursuit step on a precomputed Gram matrix.
This function uses the the Cholesky decomposition method.
Parameters
----------
Gram : array, shape (n_features, n_features)
Gram matrix of the input data matrix
Xy : array, shape (n_features,)
Input targets
n_nonzero_coefs : int
Targeted number of non-zero elements
tol_0 : float
Squared norm of y, required if tol is not None.
tol : float
Targeted squared error, if not None overrides n_nonzero_coefs.
copy_Gram : bool, optional
Whether the gram matrix must be copied by the algorithm. A false
value is only helpful if it is already Fortran-ordered, otherwise a
copy is made anyway.
copy_Xy : bool, optional
Whether the covariance vector Xy must be copied by the algorithm.
If False, it may be overwritten.
return_path : bool, optional. Default: False
Whether to return every value of the nonzero coefficients along the
forward path. Useful for cross-validation.
Returns
-------
gamma : array, shape (n_nonzero_coefs,)
Non-zero elements of the solution
idx : array, shape (n_nonzero_coefs,)
Indices of the positions of the elements in gamma within the solution
vector
coefs : array, shape (n_features, n_nonzero_coefs)
The first k values of column k correspond to the coefficient value
for the active features at that step. The lower left triangle contains
garbage. Only returned if ``return_path=True``.
n_active : int
Number of active features at convergence.
"""
Gram = Gram.copy('F') if copy_Gram else np.asfortranarray(Gram)
if copy_Xy:
Xy = Xy.copy()
min_float = np.finfo(Gram.dtype).eps
nrm2, swap = linalg.get_blas_funcs(('nrm2', 'swap'), (Gram,))
potrs, = get_lapack_funcs(('potrs',), (Gram,))
indices = np.arange(len(Gram)) # keeping track of swapping
alpha = Xy
tol_curr = tol_0
delta = 0
gamma = np.empty(0)
n_active = 0
max_features = len(Gram) if tol is not None else n_nonzero_coefs
if solve_triangular_args:
# new scipy, don't need to initialize because check_finite=False
L = np.empty((max_features, max_features), dtype=Gram.dtype)
else:
# old scipy, we need the garbage upper triangle to be non-Inf
L = np.zeros((max_features, max_features), dtype=Gram.dtype)
L[0, 0] = 1.
if return_path:
coefs = np.empty_like(L)
while True:
lam = np.argmax(np.abs(alpha))
if lam < n_active or alpha[lam] ** 2 < min_float:
# selected same atom twice, or inner product too small
warnings.warn(premature, RuntimeWarning, stacklevel=3)
break
if n_active > 0:
L[n_active, :n_active] = Gram[lam, :n_active]
linalg.solve_triangular(L[:n_active, :n_active],
L[n_active, :n_active],
trans=0, lower=1,
overwrite_b=True,
**solve_triangular_args)
v = nrm2(L[n_active, :n_active]) ** 2
if 1 - v <= min_float: # selected atoms are dependent
warnings.warn(premature, RuntimeWarning, stacklevel=3)
break
L[n_active, n_active] = np.sqrt(1 - v)
Gram[n_active], Gram[lam] = swap(Gram[n_active], Gram[lam])
Gram.T[n_active], Gram.T[lam] = swap(Gram.T[n_active], Gram.T[lam])
indices[n_active], indices[lam] = indices[lam], indices[n_active]
Xy[n_active], Xy[lam] = Xy[lam], Xy[n_active]
n_active += 1
# solves LL'x = y as a composition of two triangular systems
gamma, _ = potrs(L[:n_active, :n_active], Xy[:n_active], lower=True,
overwrite_b=False)
if return_path:
coefs[:n_active, n_active - 1] = gamma
beta = np.dot(Gram[:, :n_active], gamma)
alpha = Xy - beta
if tol is not None:
tol_curr += delta
delta = np.inner(gamma, beta[:n_active])
tol_curr -= delta
if abs(tol_curr) <= tol:
break
elif n_active == max_features:
break
if return_path:
return gamma, indices[:n_active], coefs[:, :n_active], n_active
else:
return gamma, indices[:n_active], n_active
def orthogonal_mp(X, y, n_nonzero_coefs=None, tol=None, precompute=False,
copy_X=True, return_path=False,
return_n_iter=False):
"""Orthogonal Matching Pursuit (OMP)
Solves n_targets Orthogonal Matching Pursuit problems.
An instance of the problem has the form:
When parametrized by the number of non-zero coefficients using
`n_nonzero_coefs`:
argmin ||y - X\gamma||^2 subject to ||\gamma||_0 <= n_{nonzero coefs}
When parametrized by error using the parameter `tol`:
argmin ||\gamma||_0 subject to ||y - X\gamma||^2 <= tol
Read more in the :ref:`User Guide <omp>`.
Parameters
----------
X : array, shape (n_samples, n_features)
Input data. Columns are assumed to have unit norm.
y : array, shape (n_samples,) or (n_samples, n_targets)
Input targets
n_nonzero_coefs : int
Desired number of non-zero entries in the solution. If None (by
default) this value is set to 10% of n_features.
tol : float
Maximum norm of the residual. If not None, overrides n_nonzero_coefs.
precompute : {True, False, 'auto'},
Whether to perform precomputations. Improves performance when n_targets
or n_samples is very large.
copy_X : bool, optional
Whether the design matrix X must be copied by the algorithm. A false
value is only helpful if X is already Fortran-ordered, otherwise a
copy is made anyway.
return_path : bool, optional. Default: False
Whether to return every value of the nonzero coefficients along the
forward path. Useful for cross-validation.
return_n_iter : bool, optional default False
Whether or not to return the number of iterations.
Returns
-------
coef : array, shape (n_features,) or (n_features, n_targets)
Coefficients of the OMP solution. If `return_path=True`, this contains
the whole coefficient path. In this case its shape is
(n_features, n_features) or (n_features, n_targets, n_features) and
iterating over the last axis yields coefficients in increasing order
of active features.
n_iters : array-like or int
Number of active features across every target. Returned only if
`return_n_iter` is set to True.
See also
--------
OrthogonalMatchingPursuit
orthogonal_mp_gram
lars_path
decomposition.sparse_encode
Notes
-----
Orthogonal matching pursuit was introduced in G. Mallat, Z. Zhang,
Matching pursuits with time-frequency dictionaries, IEEE Transactions on
Signal Processing, Vol. 41, No. 12. (December 1993), pp. 3397-3415.
(http://blanche.polytechnique.fr/~mallat/papiers/MallatPursuit93.pdf)
This implementation is based on Rubinstein, R., Zibulevsky, M. and Elad,
M., Efficient Implementation of the K-SVD Algorithm using Batch Orthogonal
Matching Pursuit Technical Report - CS Technion, April 2008.
http://www.cs.technion.ac.il/~ronrubin/Publications/KSVD-OMP-v2.pdf
"""
X = check_array(X, order='F', copy=copy_X)
copy_X = False
if y.ndim == 1:
y = y.reshape(-1, 1)
y = check_array(y)
if y.shape[1] > 1: # subsequent targets will be affected
copy_X = True
if n_nonzero_coefs is None and tol is None:
# default for n_nonzero_coefs is 0.1 * n_features
# but at least one.
n_nonzero_coefs = max(int(0.1 * X.shape[1]), 1)
if tol is not None and tol < 0:
raise ValueError("Epsilon cannot be negative")
if tol is None and n_nonzero_coefs <= 0:
raise ValueError("The number of atoms must be positive")
if tol is None and n_nonzero_coefs > X.shape[1]:
raise ValueError("The number of atoms cannot be more than the number "
"of features")
if precompute == 'auto':
precompute = X.shape[0] > X.shape[1]
if precompute:
G = np.dot(X.T, X)
G = np.asfortranarray(G)
Xy = np.dot(X.T, y)
if tol is not None:
norms_squared = np.sum((y ** 2), axis=0)
else:
norms_squared = None
return orthogonal_mp_gram(G, Xy, n_nonzero_coefs, tol, norms_squared,
copy_Gram=copy_X, copy_Xy=False,
return_path=return_path)
if return_path:
coef = np.zeros((X.shape[1], y.shape[1], X.shape[1]))
else:
coef = np.zeros((X.shape[1], y.shape[1]))
n_iters = []
for k in range(y.shape[1]):
out = _cholesky_omp(
X, y[:, k], n_nonzero_coefs, tol,
copy_X=copy_X, return_path=return_path)
if return_path:
_, idx, coefs, n_iter = out
coef = coef[:, :, :len(idx)]
for n_active, x in enumerate(coefs.T):
coef[idx[:n_active + 1], k, n_active] = x[:n_active + 1]
else:
x, idx, n_iter = out
coef[idx, k] = x
n_iters.append(n_iter)
if y.shape[1] == 1:
n_iters = n_iters[0]
if return_n_iter:
return np.squeeze(coef), n_iters
else:
return np.squeeze(coef)
def orthogonal_mp_gram(Gram, Xy, n_nonzero_coefs=None, tol=None,
norms_squared=None, copy_Gram=True,
copy_Xy=True, return_path=False,
return_n_iter=False):
"""Gram Orthogonal Matching Pursuit (OMP)
Solves n_targets Orthogonal Matching Pursuit problems using only
the Gram matrix X.T * X and the product X.T * y.
Read more in the :ref:`User Guide <omp>`.
Parameters
----------
Gram : array, shape (n_features, n_features)
Gram matrix of the input data: X.T * X
Xy : array, shape (n_features,) or (n_features, n_targets)
Input targets multiplied by X: X.T * y
n_nonzero_coefs : int
Desired number of non-zero entries in the solution. If None (by
default) this value is set to 10% of n_features.
tol : float
Maximum norm of the residual. If not None, overrides n_nonzero_coefs.
norms_squared : array-like, shape (n_targets,)
Squared L2 norms of the lines of y. Required if tol is not None.
copy_Gram : bool, optional
Whether the gram matrix must be copied by the algorithm. A false
value is only helpful if it is already Fortran-ordered, otherwise a
copy is made anyway.
copy_Xy : bool, optional
Whether the covariance vector Xy must be copied by the algorithm.
If False, it may be overwritten.
return_path : bool, optional. Default: False
Whether to return every value of the nonzero coefficients along the
forward path. Useful for cross-validation.
return_n_iter : bool, optional default False
Whether or not to return the number of iterations.
Returns
-------
coef : array, shape (n_features,) or (n_features, n_targets)
Coefficients of the OMP solution. If `return_path=True`, this contains
the whole coefficient path. In this case its shape is
(n_features, n_features) or (n_features, n_targets, n_features) and
iterating over the last axis yields coefficients in increasing order
of active features.
n_iters : array-like or int
Number of active features across every target. Returned only if
`return_n_iter` is set to True.
See also
--------
OrthogonalMatchingPursuit
orthogonal_mp
lars_path
decomposition.sparse_encode
Notes
-----
Orthogonal matching pursuit was introduced in G. Mallat, Z. Zhang,
Matching pursuits with time-frequency dictionaries, IEEE Transactions on
Signal Processing, Vol. 41, No. 12. (December 1993), pp. 3397-3415.
(http://blanche.polytechnique.fr/~mallat/papiers/MallatPursuit93.pdf)
This implementation is based on Rubinstein, R., Zibulevsky, M. and Elad,
M., Efficient Implementation of the K-SVD Algorithm using Batch Orthogonal
Matching Pursuit Technical Report - CS Technion, April 2008.
http://www.cs.technion.ac.il/~ronrubin/Publications/KSVD-OMP-v2.pdf
"""
Gram = check_array(Gram, order='F', copy=copy_Gram)
Xy = np.asarray(Xy)
if Xy.ndim > 1 and Xy.shape[1] > 1:
# or subsequent target will be affected
copy_Gram = True
if Xy.ndim == 1:
Xy = Xy[:, np.newaxis]
if tol is not None:
norms_squared = [norms_squared]
if n_nonzero_coefs is None and tol is None:
n_nonzero_coefs = int(0.1 * len(Gram))
if tol is not None and norms_squared is None:
raise ValueError('Gram OMP needs the precomputed norms in order '
'to evaluate the error sum of squares.')
if tol is not None and tol < 0:
raise ValueError("Epsilon cannot be negative")
if tol is None and n_nonzero_coefs <= 0:
raise ValueError("The number of atoms must be positive")
if tol is None and n_nonzero_coefs > len(Gram):
raise ValueError("The number of atoms cannot be more than the number "
"of features")
if return_path:
coef = np.zeros((len(Gram), Xy.shape[1], len(Gram)))
else:
coef = np.zeros((len(Gram), Xy.shape[1]))
n_iters = []
for k in range(Xy.shape[1]):
out = _gram_omp(
Gram, Xy[:, k], n_nonzero_coefs,
norms_squared[k] if tol is not None else None, tol,
copy_Gram=copy_Gram, copy_Xy=copy_Xy,
return_path=return_path)
if return_path:
_, idx, coefs, n_iter = out
coef = coef[:, :, :len(idx)]
for n_active, x in enumerate(coefs.T):
coef[idx[:n_active + 1], k, n_active] = x[:n_active + 1]
else:
x, idx, n_iter = out
coef[idx, k] = x
n_iters.append(n_iter)
if Xy.shape[1] == 1:
n_iters = n_iters[0]
if return_n_iter:
return np.squeeze(coef), n_iters
else:
return np.squeeze(coef)
class OrthogonalMatchingPursuit(LinearModel, RegressorMixin):
"""Orthogonal Matching Pursuit model (OMP)
Parameters
----------
n_nonzero_coefs : int, optional
Desired number of non-zero entries in the solution. If None (by
default) this value is set to 10% of n_features.
tol : float, optional
Maximum norm of the residual. If not None, overrides n_nonzero_coefs.
fit_intercept : boolean, optional
whether to calculate the intercept for this model. If set
to false, no intercept will be used in calculations
(e.g. data is expected to be already centered).
normalize : boolean, optional
If False, the regressors X are assumed to be already normalized.
precompute : {True, False, 'auto'}, default 'auto'
Whether to use a precomputed Gram and Xy matrix to speed up
calculations. Improves performance when `n_targets` or `n_samples` is
very large. Note that if you already have such matrices, you can pass
them directly to the fit method.
Read more in the :ref:`User Guide <omp>`.
Attributes
----------
coef_ : array, shape (n_features,) or (n_features, n_targets)
parameter vector (w in the formula)
intercept_ : float or array, shape (n_targets,)
independent term in decision function.
n_iter_ : int or array-like
Number of active features across every target.
Notes
-----
Orthogonal matching pursuit was introduced in G. Mallat, Z. Zhang,
Matching pursuits with time-frequency dictionaries, IEEE Transactions on
Signal Processing, Vol. 41, No. 12. (December 1993), pp. 3397-3415.
(http://blanche.polytechnique.fr/~mallat/papiers/MallatPursuit93.pdf)
This implementation is based on Rubinstein, R., Zibulevsky, M. and Elad,
M., Efficient Implementation of the K-SVD Algorithm using Batch Orthogonal
Matching Pursuit Technical Report - CS Technion, April 2008.
http://www.cs.technion.ac.il/~ronrubin/Publications/KSVD-OMP-v2.pdf
See also
--------
orthogonal_mp
orthogonal_mp_gram
lars_path
Lars
LassoLars
decomposition.sparse_encode
"""
def __init__(self, n_nonzero_coefs=None, tol=None, fit_intercept=True,
normalize=True, precompute='auto'):
self.n_nonzero_coefs = n_nonzero_coefs
self.tol = tol
self.fit_intercept = fit_intercept
self.normalize = normalize
self.precompute = precompute
def fit(self, X, y):
"""Fit the model using X, y as training data.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Training data.
y : array-like, shape (n_samples,) or (n_samples, n_targets)
Target values.
Returns
-------
self : object
returns an instance of self.
"""
X, y = check_X_y(X, y, multi_output=True, y_numeric=True)
n_features = X.shape[1]
X, y, X_mean, y_mean, X_std, Gram, Xy = \
_pre_fit(X, y, None, self.precompute, self.normalize,
self.fit_intercept, copy=True)
if y.ndim == 1:
y = y[:, np.newaxis]
if self.n_nonzero_coefs is None and self.tol is None:
# default for n_nonzero_coefs is 0.1 * n_features
# but at least one.
self.n_nonzero_coefs_ = max(int(0.1 * n_features), 1)
else:
self.n_nonzero_coefs_ = self.n_nonzero_coefs
if Gram is False:
coef_, self.n_iter_ = orthogonal_mp(
X, y, self.n_nonzero_coefs_, self.tol,
precompute=False, copy_X=True,
return_n_iter=True)
else:
norms_sq = np.sum(y ** 2, axis=0) if self.tol is not None else None
coef_, self.n_iter_ = orthogonal_mp_gram(
Gram, Xy=Xy, n_nonzero_coefs=self.n_nonzero_coefs_,
tol=self.tol, norms_squared=norms_sq,
copy_Gram=True, copy_Xy=True,
return_n_iter=True)
self.coef_ = coef_.T
self._set_intercept(X_mean, y_mean, X_std)
return self
def _omp_path_residues(X_train, y_train, X_test, y_test, copy=True,
fit_intercept=True, normalize=True, max_iter=100):
"""Compute the residues on left-out data for a full LARS path
Parameters
-----------
X_train : array, shape (n_samples, n_features)
The data to fit the LARS on
y_train : array, shape (n_samples)
The target variable to fit LARS on
X_test : array, shape (n_samples, n_features)
The data to compute the residues on
y_test : array, shape (n_samples)
The target variable to compute the residues on
copy : boolean, optional
Whether X_train, X_test, y_train and y_test should be copied. If
False, they may be overwritten.
fit_intercept : boolean
whether to calculate the intercept for this model. If set
to false, no intercept will be used in calculations
(e.g. data is expected to be already centered).
normalize : boolean, optional, default False
If True, the regressors X will be normalized before regression.
max_iter : integer, optional
Maximum numbers of iterations to perform, therefore maximum features
to include. 100 by default.
Returns
-------
residues: array, shape (n_samples, max_features)
Residues of the prediction on the test data
"""
if copy:
X_train = X_train.copy()
y_train = y_train.copy()
X_test = X_test.copy()
y_test = y_test.copy()
if fit_intercept:
X_mean = X_train.mean(axis=0)
X_train -= X_mean
X_test -= X_mean
y_mean = y_train.mean(axis=0)
y_train = as_float_array(y_train, copy=False)
y_train -= y_mean
y_test = as_float_array(y_test, copy=False)
y_test -= y_mean
if normalize:
norms = np.sqrt(np.sum(X_train ** 2, axis=0))
nonzeros = np.flatnonzero(norms)
X_train[:, nonzeros] /= norms[nonzeros]
coefs = orthogonal_mp(X_train, y_train, n_nonzero_coefs=max_iter, tol=None,
precompute=False, copy_X=False,
return_path=True)
if coefs.ndim == 1:
coefs = coefs[:, np.newaxis]
if normalize:
coefs[nonzeros] /= norms[nonzeros][:, np.newaxis]
return np.dot(coefs.T, X_test.T) - y_test
class OrthogonalMatchingPursuitCV(LinearModel, RegressorMixin):
"""Cross-validated Orthogonal Matching Pursuit model (OMP)
Parameters
----------
copy : bool, optional
Whether the design matrix X must be copied by the algorithm. A false
value is only helpful if X is already Fortran-ordered, otherwise a
copy is made anyway.
fit_intercept : boolean, optional
whether to calculate the intercept for this model. If set
to false, no intercept will be used in calculations
(e.g. data is expected to be already centered).
normalize : boolean, optional
If False, the regressors X are assumed to be already normalized.
max_iter : integer, optional
Maximum numbers of iterations to perform, therefore maximum features
to include. 10% of ``n_features`` but at least 5 if available.
cv : cross-validation generator, optional
see :mod:`sklearn.cross_validation`. If ``None`` is passed, default to
a 5-fold strategy
n_jobs : integer, optional
Number of CPUs to use during the cross validation. If ``-1``, use
all the CPUs
verbose : boolean or integer, optional
Sets the verbosity amount
Read more in the :ref:`User Guide <omp>`.
Attributes
----------
intercept_ : float or array, shape (n_targets,)
Independent term in decision function.
coef_ : array, shape (n_features,) or (n_features, n_targets)
Parameter vector (w in the problem formulation).
n_nonzero_coefs_ : int
Estimated number of non-zero coefficients giving the best mean squared
error over the cross-validation folds.
n_iter_ : int or array-like
Number of active features across every target for the model refit with
the best hyperparameters got by cross-validating across all folds.
See also
--------
orthogonal_mp
orthogonal_mp_gram
lars_path
Lars
LassoLars
OrthogonalMatchingPursuit
LarsCV
LassoLarsCV
decomposition.sparse_encode
"""
def __init__(self, copy=True, fit_intercept=True, normalize=True,
max_iter=None, cv=None, n_jobs=1, verbose=False):
self.copy = copy
self.fit_intercept = fit_intercept
self.normalize = normalize
self.max_iter = max_iter
self.cv = cv
self.n_jobs = n_jobs
self.verbose = verbose
def fit(self, X, y):
"""Fit the model using X, y as training data.
Parameters
----------
X : array-like, shape [n_samples, n_features]
Training data.
y : array-like, shape [n_samples]
Target values.
Returns
-------
self : object
returns an instance of self.
"""
X, y = check_X_y(X, y, y_numeric=True)
X = as_float_array(X, copy=False, force_all_finite=False)
cv = check_cv(self.cv, X, y, classifier=False)
max_iter = (min(max(int(0.1 * X.shape[1]), 5), X.shape[1])
if not self.max_iter
else self.max_iter)
cv_paths = Parallel(n_jobs=self.n_jobs, verbose=self.verbose)(
delayed(_omp_path_residues)(
X[train], y[train], X[test], y[test], self.copy,
self.fit_intercept, self.normalize, max_iter)
for train, test in cv)
min_early_stop = min(fold.shape[0] for fold in cv_paths)
mse_folds = np.array([(fold[:min_early_stop] ** 2).mean(axis=1)
for fold in cv_paths])
best_n_nonzero_coefs = np.argmin(mse_folds.mean(axis=0)) + 1
self.n_nonzero_coefs_ = best_n_nonzero_coefs
omp = OrthogonalMatchingPursuit(n_nonzero_coefs=best_n_nonzero_coefs,
fit_intercept=self.fit_intercept,
normalize=self.normalize)
omp.fit(X, y)
self.coef_ = omp.coef_
self.intercept_ = omp.intercept_
self.n_iter_ = omp.n_iter_
return self
| bsd-3-clause |
henryzord/clustering | src/measures/dbcv/sequential.py | 1 | 2360 | import operator
import itertools
import warnings
from sklearn.metrics import pairwise_distances
import numpy as np
from __handler__ import Handler
class SequentialHandler(Handler):
def __init__(self, dataset):
super(SequentialHandler, self).__init__(dataset)
self._data_dm = pairwise_distances(dataset, metric='euclidean') ** 2.
self._data_coredist = np.empty(self._n_objects, dtype=np.float32)
def __get_coredist__(self):
"""
Calculate the all points core distance (a_pts_coredist) for each object.
:rtype: numpy.ndarray
:return: The core distance for each object in the dataset.
"""
# -- non-vectorized version, for understanding -- #
# for i in xrange(self._n_objects):
# index_neighbours = np.setdiff1d(np.flatnonzero(self.labels == self.labels[i]), [i])
# n_neighbours = index_neighbours.shape[0]
# dist_neighbours = self._data_dm[i, index_neighbours]
#
# _sum = 0.
# for neighbour in dist_neighbours:
# _sum += (1. / neighbour) ** float(self._n_attributes)
#
# _sum /= n_neighbours
# _sum **= -(1./self._n_attributes)
#
# self._data_coredist[i] = _sum
# -- vectorized counterpart, for speed -- #
warnings.filterwarnings(action='ignore')
vec_func = np.vectorize(
lambda i: (
(
1. / self._data_dm[i, np.setdiff1d(np.flatnonzero(self._data_labels == self._data_labels[i]), [i])]
) ** float(self._n_attributes)
).sum() / float(np.count_nonzero(self._data_labels == self._data_labels[i]) - 1)
)
self._data_coredist = np.power(vec_func(xrange(self._n_objects)), -(1./self._n_attributes))
warnings.filterwarnings(action='default')
def __mrd__(self):
_list = itertools.combinations_with_replacement(xrange(self._n_objects), r=2)
for x, y in _list:
self._data_mreach[x, y] = max([self._data_dm[x, y], self._data_coredist[x], self._data_coredist[y]])
self._data_mreach[y, x] = self._data_mreach[x, y]
# warnings.warn('filling diagonal with zeros!')
# np.fill_diagonal(self._data_mreach, 0.) # TODO testing!
return self._data_mreach
| gpl-3.0 |
mikebenfield/scikit-learn | sklearn/linear_model/tests/test_sag.py | 7 | 30673 | # Authors: Danny Sullivan <[email protected]>
# Tom Dupre la Tour <[email protected]>
#
# License: BSD 3 clause
import math
import numpy as np
import scipy.sparse as sp
from sklearn.linear_model.sag import get_auto_step_size
from sklearn.linear_model.sag_fast import _multinomial_grad_loss_all_samples
from sklearn.linear_model import LogisticRegression, Ridge
from sklearn.linear_model.base import make_dataset
from sklearn.linear_model.logistic import _multinomial_loss_grad
from sklearn.utils.extmath import logsumexp
from sklearn.utils.extmath import row_norms
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_greater
from sklearn.utils.testing import assert_raise_message
from sklearn.utils.testing import ignore_warnings
from sklearn.utils import compute_class_weight
from sklearn.utils import check_random_state
from sklearn.preprocessing import LabelEncoder, LabelBinarizer
from sklearn.datasets import make_blobs, load_iris
from sklearn.base import clone
iris = load_iris()
# this is used for sag classification
def log_dloss(p, y):
z = p * y
# approximately equal and saves the computation of the log
if z > 18.0:
return math.exp(-z) * -y
if z < -18.0:
return -y
return -y / (math.exp(z) + 1.0)
def log_loss(p, y):
return np.mean(np.log(1. + np.exp(-y * p)))
# this is used for sag regression
def squared_dloss(p, y):
return p - y
def squared_loss(p, y):
return np.mean(0.5 * (p - y) * (p - y))
# function for measuring the log loss
def get_pobj(w, alpha, myX, myy, loss):
w = w.ravel()
pred = np.dot(myX, w)
p = loss(pred, myy)
p += alpha * w.dot(w) / 2.
return p
def sag(X, y, step_size, alpha, n_iter=1, dloss=None, sparse=False,
sample_weight=None, fit_intercept=True, saga=False):
n_samples, n_features = X.shape[0], X.shape[1]
weights = np.zeros(X.shape[1])
sum_gradient = np.zeros(X.shape[1])
gradient_memory = np.zeros((n_samples, n_features))
intercept = 0.0
intercept_sum_gradient = 0.0
intercept_gradient_memory = np.zeros(n_samples)
rng = np.random.RandomState(77)
decay = 1.0
seen = set()
# sparse data has a fixed decay of .01
if sparse:
decay = .01
for epoch in range(n_iter):
for k in range(n_samples):
idx = int(rng.rand(1) * n_samples)
# idx = k
entry = X[idx]
seen.add(idx)
p = np.dot(entry, weights) + intercept
gradient = dloss(p, y[idx])
if sample_weight is not None:
gradient *= sample_weight[idx]
update = entry * gradient + alpha * weights
gradient_correction = update - gradient_memory[idx]
sum_gradient += gradient_correction
gradient_memory[idx] = update
if saga:
weights -= (gradient_correction *
step_size * (1 - 1. / len(seen)))
if fit_intercept:
gradient_correction = (gradient -
intercept_gradient_memory[idx])
intercept_gradient_memory[idx] = gradient
intercept_sum_gradient += gradient_correction
gradient_correction *= step_size * (1. - 1. / len(seen))
if saga:
intercept -= (step_size * intercept_sum_gradient /
len(seen) * decay) + gradient_correction
else:
intercept -= (step_size * intercept_sum_gradient /
len(seen) * decay)
weights -= step_size * sum_gradient / len(seen)
return weights, intercept
def sag_sparse(X, y, step_size, alpha, n_iter=1,
dloss=None, sample_weight=None, sparse=False,
fit_intercept=True, saga=False):
if step_size * alpha == 1.:
raise ZeroDivisionError("Sparse sag does not handle the case "
"step_size * alpha == 1")
n_samples, n_features = X.shape[0], X.shape[1]
weights = np.zeros(n_features)
sum_gradient = np.zeros(n_features)
last_updated = np.zeros(n_features, dtype=np.int)
gradient_memory = np.zeros(n_samples)
rng = np.random.RandomState(77)
intercept = 0.0
intercept_sum_gradient = 0.0
wscale = 1.0
decay = 1.0
seen = set()
c_sum = np.zeros(n_iter * n_samples)
# sparse data has a fixed decay of .01
if sparse:
decay = .01
counter = 0
for epoch in range(n_iter):
for k in range(n_samples):
# idx = k
idx = int(rng.rand(1) * n_samples)
entry = X[idx]
seen.add(idx)
if counter >= 1:
for j in range(n_features):
if last_updated[j] == 0:
weights[j] -= c_sum[counter - 1] * sum_gradient[j]
else:
weights[j] -= ((c_sum[counter - 1] -
c_sum[last_updated[j] - 1]) *
sum_gradient[j])
last_updated[j] = counter
p = (wscale * np.dot(entry, weights)) + intercept
gradient = dloss(p, y[idx])
if sample_weight is not None:
gradient *= sample_weight[idx]
update = entry * gradient
gradient_correction = update - (gradient_memory[idx] * entry)
sum_gradient += gradient_correction
if saga:
for j in range(n_features):
weights[j] -= (gradient_correction[j] * step_size *
(1 - 1. / len(seen)) / wscale)
if fit_intercept:
gradient_correction = gradient - gradient_memory[idx]
intercept_sum_gradient += gradient_correction
gradient_correction *= step_size * (1. - 1. / len(seen))
if saga:
intercept -= ((step_size * intercept_sum_gradient /
len(seen) * decay) +
gradient_correction)
else:
intercept -= (step_size * intercept_sum_gradient /
len(seen) * decay)
gradient_memory[idx] = gradient
wscale *= (1.0 - alpha * step_size)
if counter == 0:
c_sum[0] = step_size / (wscale * len(seen))
else:
c_sum[counter] = (c_sum[counter - 1] +
step_size / (wscale * len(seen)))
if counter >= 1 and wscale < 1e-9:
for j in range(n_features):
if last_updated[j] == 0:
weights[j] -= c_sum[counter] * sum_gradient[j]
else:
weights[j] -= ((c_sum[counter] -
c_sum[last_updated[j] - 1]) *
sum_gradient[j])
last_updated[j] = counter + 1
c_sum[counter] = 0
weights *= wscale
wscale = 1.0
counter += 1
for j in range(n_features):
if last_updated[j] == 0:
weights[j] -= c_sum[counter - 1] * sum_gradient[j]
else:
weights[j] -= ((c_sum[counter - 1] -
c_sum[last_updated[j] - 1]) *
sum_gradient[j])
weights *= wscale
return weights, intercept
def get_step_size(X, alpha, fit_intercept, classification=True):
if classification:
return (4.0 / (np.max(np.sum(X * X, axis=1)) +
fit_intercept + 4.0 * alpha))
else:
return 1.0 / (np.max(np.sum(X * X, axis=1)) + fit_intercept + alpha)
@ignore_warnings
def test_classifier_matching():
n_samples = 20
X, y = make_blobs(n_samples=n_samples, centers=2, random_state=0,
cluster_std=0.1)
y[y == 0] = -1
alpha = 1.1
fit_intercept = True
step_size = get_step_size(X, alpha, fit_intercept)
for solver in ['sag', 'saga']:
if solver == 'sag':
n_iter = 80
else:
# SAGA variance w.r.t. stream order is higher
n_iter = 300
clf = LogisticRegression(solver=solver, fit_intercept=fit_intercept,
tol=1e-11, C=1. / alpha / n_samples,
max_iter=n_iter, random_state=10)
clf.fit(X, y)
weights, intercept = sag_sparse(X, y, step_size, alpha, n_iter=n_iter,
dloss=log_dloss,
fit_intercept=fit_intercept,
saga=solver == 'saga')
weights2, intercept2 = sag(X, y, step_size, alpha, n_iter=n_iter,
dloss=log_dloss,
fit_intercept=fit_intercept,
saga=solver == 'saga')
weights = np.atleast_2d(weights)
intercept = np.atleast_1d(intercept)
weights2 = np.atleast_2d(weights2)
intercept2 = np.atleast_1d(intercept2)
assert_array_almost_equal(weights, clf.coef_, decimal=9)
assert_array_almost_equal(intercept, clf.intercept_, decimal=9)
assert_array_almost_equal(weights2, clf.coef_, decimal=9)
assert_array_almost_equal(intercept2, clf.intercept_, decimal=9)
@ignore_warnings
def test_regressor_matching():
n_samples = 10
n_features = 5
rng = np.random.RandomState(10)
X = rng.normal(size=(n_samples, n_features))
true_w = rng.normal(size=n_features)
y = X.dot(true_w)
alpha = 1.
n_iter = 100
fit_intercept = True
step_size = get_step_size(X, alpha, fit_intercept, classification=False)
clf = Ridge(fit_intercept=fit_intercept, tol=.00000000001, solver='sag',
alpha=alpha * n_samples, max_iter=n_iter)
clf.fit(X, y)
weights1, intercept1 = sag_sparse(X, y, step_size, alpha, n_iter=n_iter,
dloss=squared_dloss,
fit_intercept=fit_intercept)
weights2, intercept2 = sag(X, y, step_size, alpha, n_iter=n_iter,
dloss=squared_dloss,
fit_intercept=fit_intercept)
assert_array_almost_equal(weights1, clf.coef_, decimal=10)
assert_array_almost_equal(intercept1, clf.intercept_, decimal=10)
assert_array_almost_equal(weights2, clf.coef_, decimal=10)
assert_array_almost_equal(intercept2, clf.intercept_, decimal=10)
@ignore_warnings
def test_sag_pobj_matches_logistic_regression():
"""tests if the sag pobj matches log reg"""
n_samples = 100
alpha = 1.0
max_iter = 20
X, y = make_blobs(n_samples=n_samples, centers=2, random_state=0,
cluster_std=0.1)
clf1 = LogisticRegression(solver='sag', fit_intercept=False, tol=.0000001,
C=1. / alpha / n_samples, max_iter=max_iter,
random_state=10)
clf2 = clone(clf1)
clf3 = LogisticRegression(fit_intercept=False, tol=.0000001,
C=1. / alpha / n_samples, max_iter=max_iter,
random_state=10)
clf1.fit(X, y)
clf2.fit(sp.csr_matrix(X), y)
clf3.fit(X, y)
pobj1 = get_pobj(clf1.coef_, alpha, X, y, log_loss)
pobj2 = get_pobj(clf2.coef_, alpha, X, y, log_loss)
pobj3 = get_pobj(clf3.coef_, alpha, X, y, log_loss)
assert_array_almost_equal(pobj1, pobj2, decimal=4)
assert_array_almost_equal(pobj2, pobj3, decimal=4)
assert_array_almost_equal(pobj3, pobj1, decimal=4)
@ignore_warnings
def test_sag_pobj_matches_ridge_regression():
"""tests if the sag pobj matches ridge reg"""
n_samples = 100
n_features = 10
alpha = 1.0
n_iter = 100
fit_intercept = False
rng = np.random.RandomState(10)
X = rng.normal(size=(n_samples, n_features))
true_w = rng.normal(size=n_features)
y = X.dot(true_w)
clf1 = Ridge(fit_intercept=fit_intercept, tol=.00000000001, solver='sag',
alpha=alpha, max_iter=n_iter, random_state=42)
clf2 = clone(clf1)
clf3 = Ridge(fit_intercept=fit_intercept, tol=.00001, solver='lsqr',
alpha=alpha, max_iter=n_iter, random_state=42)
clf1.fit(X, y)
clf2.fit(sp.csr_matrix(X), y)
clf3.fit(X, y)
pobj1 = get_pobj(clf1.coef_, alpha, X, y, squared_loss)
pobj2 = get_pobj(clf2.coef_, alpha, X, y, squared_loss)
pobj3 = get_pobj(clf3.coef_, alpha, X, y, squared_loss)
assert_array_almost_equal(pobj1, pobj2, decimal=4)
assert_array_almost_equal(pobj1, pobj3, decimal=4)
assert_array_almost_equal(pobj3, pobj2, decimal=4)
@ignore_warnings
def test_sag_regressor_computed_correctly():
"""tests if the sag regressor is computed correctly"""
alpha = .1
n_features = 10
n_samples = 40
max_iter = 50
tol = .000001
fit_intercept = True
rng = np.random.RandomState(0)
X = rng.normal(size=(n_samples, n_features))
w = rng.normal(size=n_features)
y = np.dot(X, w) + 2.
step_size = get_step_size(X, alpha, fit_intercept, classification=False)
clf1 = Ridge(fit_intercept=fit_intercept, tol=tol, solver='sag',
alpha=alpha * n_samples, max_iter=max_iter)
clf2 = clone(clf1)
clf1.fit(X, y)
clf2.fit(sp.csr_matrix(X), y)
spweights1, spintercept1 = sag_sparse(X, y, step_size, alpha,
n_iter=max_iter,
dloss=squared_dloss,
fit_intercept=fit_intercept)
spweights2, spintercept2 = sag_sparse(X, y, step_size, alpha,
n_iter=max_iter,
dloss=squared_dloss, sparse=True,
fit_intercept=fit_intercept)
assert_array_almost_equal(clf1.coef_.ravel(),
spweights1.ravel(),
decimal=3)
assert_almost_equal(clf1.intercept_, spintercept1, decimal=1)
# TODO: uncomment when sparse Ridge with intercept will be fixed (#4710)
# assert_array_almost_equal(clf2.coef_.ravel(),
# spweights2.ravel(),
# decimal=3)
# assert_almost_equal(clf2.intercept_, spintercept2, decimal=1)'''
@ignore_warnings
def test_get_auto_step_size():
X = np.array([[1, 2, 3], [2, 3, 4], [2, 3, 2]], dtype=np.float64)
alpha = 1.2
fit_intercept = False
# sum the squares of the second sample because that's the largest
max_squared_sum = 4 + 9 + 16
max_squared_sum_ = row_norms(X, squared=True).max()
n_samples = X.shape[0]
assert_almost_equal(max_squared_sum, max_squared_sum_, decimal=4)
for saga in [True, False]:
for fit_intercept in (True, False):
if saga:
L_sqr = (max_squared_sum + alpha + int(fit_intercept))
L_log = (max_squared_sum + 4.0 * alpha +
int(fit_intercept)) / 4.0
mun_sqr = min(2 * n_samples * alpha, L_sqr)
mun_log = min(2 * n_samples * alpha, L_log)
step_size_sqr = 1 / (2 * L_sqr + mun_sqr)
step_size_log = 1 / (2 * L_log + mun_log)
else:
step_size_sqr = 1.0 / (max_squared_sum +
alpha + int(fit_intercept))
step_size_log = 4.0 / (max_squared_sum + 4.0 * alpha +
int(fit_intercept))
step_size_sqr_ = get_auto_step_size(max_squared_sum_, alpha,
"squared",
fit_intercept,
n_samples=n_samples,
is_saga=saga)
step_size_log_ = get_auto_step_size(max_squared_sum_, alpha, "log",
fit_intercept,
n_samples=n_samples,
is_saga=saga)
assert_almost_equal(step_size_sqr, step_size_sqr_, decimal=4)
assert_almost_equal(step_size_log, step_size_log_, decimal=4)
msg = 'Unknown loss function for SAG solver, got wrong instead of'
assert_raise_message(ValueError, msg, get_auto_step_size,
max_squared_sum_, alpha, "wrong", fit_intercept)
@ignore_warnings
def test_sag_regressor():
"""tests if the sag regressor performs well"""
xmin, xmax = -5, 5
n_samples = 20
tol = .001
max_iter = 20
alpha = 0.1
rng = np.random.RandomState(0)
X = np.linspace(xmin, xmax, n_samples).reshape(n_samples, 1)
# simple linear function without noise
y = 0.5 * X.ravel()
clf1 = Ridge(tol=tol, solver='sag', max_iter=max_iter,
alpha=alpha * n_samples)
clf2 = clone(clf1)
clf1.fit(X, y)
clf2.fit(sp.csr_matrix(X), y)
score1 = clf1.score(X, y)
score2 = clf2.score(X, y)
assert_greater(score1, 0.99)
assert_greater(score2, 0.99)
# simple linear function with noise
y = 0.5 * X.ravel() + rng.randn(n_samples, 1).ravel()
clf1 = Ridge(tol=tol, solver='sag', max_iter=max_iter,
alpha=alpha * n_samples)
clf2 = clone(clf1)
clf1.fit(X, y)
clf2.fit(sp.csr_matrix(X), y)
score1 = clf1.score(X, y)
score2 = clf2.score(X, y)
score2 = clf2.score(X, y)
assert_greater(score1, 0.5)
assert_greater(score2, 0.5)
@ignore_warnings
def test_sag_classifier_computed_correctly():
"""tests if the binary classifier is computed correctly"""
alpha = .1
n_samples = 50
n_iter = 50
tol = .00001
fit_intercept = True
X, y = make_blobs(n_samples=n_samples, centers=2, random_state=0,
cluster_std=0.1)
step_size = get_step_size(X, alpha, fit_intercept, classification=True)
classes = np.unique(y)
y_tmp = np.ones(n_samples)
y_tmp[y != classes[1]] = -1
y = y_tmp
clf1 = LogisticRegression(solver='sag', C=1. / alpha / n_samples,
max_iter=n_iter, tol=tol, random_state=77,
fit_intercept=fit_intercept)
clf2 = clone(clf1)
clf1.fit(X, y)
clf2.fit(sp.csr_matrix(X), y)
spweights, spintercept = sag_sparse(X, y, step_size, alpha, n_iter=n_iter,
dloss=log_dloss,
fit_intercept=fit_intercept)
spweights2, spintercept2 = sag_sparse(X, y, step_size, alpha,
n_iter=n_iter,
dloss=log_dloss, sparse=True,
fit_intercept=fit_intercept)
assert_array_almost_equal(clf1.coef_.ravel(),
spweights.ravel(),
decimal=2)
assert_almost_equal(clf1.intercept_, spintercept, decimal=1)
assert_array_almost_equal(clf2.coef_.ravel(),
spweights2.ravel(),
decimal=2)
assert_almost_equal(clf2.intercept_, spintercept2, decimal=1)
@ignore_warnings
def test_sag_multiclass_computed_correctly():
"""tests if the multiclass classifier is computed correctly"""
alpha = .1
n_samples = 20
tol = .00001
max_iter = 40
fit_intercept = True
X, y = make_blobs(n_samples=n_samples, centers=3, random_state=0,
cluster_std=0.1)
step_size = get_step_size(X, alpha, fit_intercept, classification=True)
classes = np.unique(y)
clf1 = LogisticRegression(solver='sag', C=1. / alpha / n_samples,
max_iter=max_iter, tol=tol, random_state=77,
fit_intercept=fit_intercept)
clf2 = clone(clf1)
clf1.fit(X, y)
clf2.fit(sp.csr_matrix(X), y)
coef1 = []
intercept1 = []
coef2 = []
intercept2 = []
for cl in classes:
y_encoded = np.ones(n_samples)
y_encoded[y != cl] = -1
spweights1, spintercept1 = sag_sparse(X, y_encoded, step_size, alpha,
dloss=log_dloss, n_iter=max_iter,
fit_intercept=fit_intercept)
spweights2, spintercept2 = sag_sparse(X, y_encoded, step_size, alpha,
dloss=log_dloss, n_iter=max_iter,
sparse=True,
fit_intercept=fit_intercept)
coef1.append(spweights1)
intercept1.append(spintercept1)
coef2.append(spweights2)
intercept2.append(spintercept2)
coef1 = np.vstack(coef1)
intercept1 = np.array(intercept1)
coef2 = np.vstack(coef2)
intercept2 = np.array(intercept2)
for i, cl in enumerate(classes):
assert_array_almost_equal(clf1.coef_[i].ravel(),
coef1[i].ravel(),
decimal=2)
assert_almost_equal(clf1.intercept_[i], intercept1[i], decimal=1)
assert_array_almost_equal(clf2.coef_[i].ravel(),
coef2[i].ravel(),
decimal=2)
assert_almost_equal(clf2.intercept_[i], intercept2[i], decimal=1)
@ignore_warnings
def test_classifier_results():
"""tests if classifier results match target"""
alpha = .1
n_features = 20
n_samples = 10
tol = .01
max_iter = 200
rng = np.random.RandomState(0)
X = rng.normal(size=(n_samples, n_features))
w = rng.normal(size=n_features)
y = np.dot(X, w)
y = np.sign(y)
clf1 = LogisticRegression(solver='sag', C=1. / alpha / n_samples,
max_iter=max_iter, tol=tol, random_state=77)
clf2 = clone(clf1)
clf1.fit(X, y)
clf2.fit(sp.csr_matrix(X), y)
pred1 = clf1.predict(X)
pred2 = clf2.predict(X)
assert_almost_equal(pred1, y, decimal=12)
assert_almost_equal(pred2, y, decimal=12)
@ignore_warnings
def test_binary_classifier_class_weight():
"""tests binary classifier with classweights for each class"""
alpha = .1
n_samples = 50
n_iter = 20
tol = .00001
fit_intercept = True
X, y = make_blobs(n_samples=n_samples, centers=2, random_state=10,
cluster_std=0.1)
step_size = get_step_size(X, alpha, fit_intercept, classification=True)
classes = np.unique(y)
y_tmp = np.ones(n_samples)
y_tmp[y != classes[1]] = -1
y = y_tmp
class_weight = {1: .45, -1: .55}
clf1 = LogisticRegression(solver='sag', C=1. / alpha / n_samples,
max_iter=n_iter, tol=tol, random_state=77,
fit_intercept=fit_intercept,
class_weight=class_weight)
clf2 = clone(clf1)
clf1.fit(X, y)
clf2.fit(sp.csr_matrix(X), y)
le = LabelEncoder()
class_weight_ = compute_class_weight(class_weight, np.unique(y), y)
sample_weight = class_weight_[le.fit_transform(y)]
spweights, spintercept = sag_sparse(X, y, step_size, alpha, n_iter=n_iter,
dloss=log_dloss,
sample_weight=sample_weight,
fit_intercept=fit_intercept)
spweights2, spintercept2 = sag_sparse(X, y, step_size, alpha,
n_iter=n_iter,
dloss=log_dloss, sparse=True,
sample_weight=sample_weight,
fit_intercept=fit_intercept)
assert_array_almost_equal(clf1.coef_.ravel(),
spweights.ravel(),
decimal=2)
assert_almost_equal(clf1.intercept_, spintercept, decimal=1)
assert_array_almost_equal(clf2.coef_.ravel(),
spweights2.ravel(),
decimal=2)
assert_almost_equal(clf2.intercept_, spintercept2, decimal=1)
@ignore_warnings
def test_multiclass_classifier_class_weight():
"""tests multiclass with classweights for each class"""
alpha = .1
n_samples = 20
tol = .00001
max_iter = 50
class_weight = {0: .45, 1: .55, 2: .75}
fit_intercept = True
X, y = make_blobs(n_samples=n_samples, centers=3, random_state=0,
cluster_std=0.1)
step_size = get_step_size(X, alpha, fit_intercept, classification=True)
classes = np.unique(y)
clf1 = LogisticRegression(solver='sag', C=1. / alpha / n_samples,
max_iter=max_iter, tol=tol, random_state=77,
fit_intercept=fit_intercept,
class_weight=class_weight)
clf2 = clone(clf1)
clf1.fit(X, y)
clf2.fit(sp.csr_matrix(X), y)
le = LabelEncoder()
class_weight_ = compute_class_weight(class_weight, np.unique(y), y)
sample_weight = class_weight_[le.fit_transform(y)]
coef1 = []
intercept1 = []
coef2 = []
intercept2 = []
for cl in classes:
y_encoded = np.ones(n_samples)
y_encoded[y != cl] = -1
spweights1, spintercept1 = sag_sparse(X, y_encoded, step_size, alpha,
n_iter=max_iter, dloss=log_dloss,
sample_weight=sample_weight)
spweights2, spintercept2 = sag_sparse(X, y_encoded, step_size, alpha,
n_iter=max_iter, dloss=log_dloss,
sample_weight=sample_weight,
sparse=True)
coef1.append(spweights1)
intercept1.append(spintercept1)
coef2.append(spweights2)
intercept2.append(spintercept2)
coef1 = np.vstack(coef1)
intercept1 = np.array(intercept1)
coef2 = np.vstack(coef2)
intercept2 = np.array(intercept2)
for i, cl in enumerate(classes):
assert_array_almost_equal(clf1.coef_[i].ravel(),
coef1[i].ravel(),
decimal=2)
assert_almost_equal(clf1.intercept_[i], intercept1[i], decimal=1)
assert_array_almost_equal(clf2.coef_[i].ravel(),
coef2[i].ravel(),
decimal=2)
assert_almost_equal(clf2.intercept_[i], intercept2[i], decimal=1)
def test_classifier_single_class():
"""tests if ValueError is thrown with only one class"""
X = [[1, 2], [3, 4]]
y = [1, 1]
assert_raise_message(ValueError,
"This solver needs samples of at least 2 classes "
"in the data",
LogisticRegression(solver='sag').fit,
X, y)
def test_step_size_alpha_error():
X = [[0, 0], [0, 0]]
y = [1, -1]
fit_intercept = False
alpha = 1.
msg = ("Current sag implementation does not handle the case"
" step_size * alpha_scaled == 1")
clf1 = LogisticRegression(solver='sag', C=1. / alpha,
fit_intercept=fit_intercept)
assert_raise_message(ZeroDivisionError, msg, clf1.fit, X, y)
clf2 = Ridge(fit_intercept=fit_intercept, solver='sag', alpha=alpha)
assert_raise_message(ZeroDivisionError, msg, clf2.fit, X, y)
def test_multinomial_loss():
# test if the multinomial loss and gradient computations are consistent
X, y = iris.data, iris.target.astype(np.float64)
n_samples, n_features = X.shape
n_classes = len(np.unique(y))
rng = check_random_state(42)
weights = rng.randn(n_features, n_classes)
intercept = rng.randn(n_classes)
sample_weights = rng.randn(n_samples)
np.abs(sample_weights, sample_weights)
# compute loss and gradient like in multinomial SAG
dataset, _ = make_dataset(X, y, sample_weights, random_state=42)
loss_1, grad_1 = _multinomial_grad_loss_all_samples(dataset, weights,
intercept, n_samples,
n_features, n_classes)
# compute loss and gradient like in multinomial LogisticRegression
lbin = LabelBinarizer()
Y_bin = lbin.fit_transform(y)
weights_intercept = np.vstack((weights, intercept)).T.ravel()
loss_2, grad_2, _ = _multinomial_loss_grad(weights_intercept, X, Y_bin,
0.0, sample_weights)
grad_2 = grad_2.reshape(n_classes, -1)
grad_2 = grad_2[:, :-1].T
# comparison
assert_array_almost_equal(grad_1, grad_2)
assert_almost_equal(loss_1, loss_2)
def test_multinomial_loss_ground_truth():
# n_samples, n_features, n_classes = 4, 2, 3
n_classes = 3
X = np.array([[1.1, 2.2], [2.2, -4.4], [3.3, -2.2], [1.1, 1.1]])
y = np.array([0, 1, 2, 0])
lbin = LabelBinarizer()
Y_bin = lbin.fit_transform(y)
weights = np.array([[0.1, 0.2, 0.3], [1.1, 1.2, -1.3]])
intercept = np.array([1., 0, -.2])
sample_weights = np.array([0.8, 1, 1, 0.8])
prediction = np.dot(X, weights) + intercept
logsumexp_prediction = logsumexp(prediction, axis=1)
p = prediction - logsumexp_prediction[:, np.newaxis]
loss_1 = -(sample_weights[:, np.newaxis] * p * Y_bin).sum()
diff = sample_weights[:, np.newaxis] * (np.exp(p) - Y_bin)
grad_1 = np.dot(X.T, diff)
weights_intercept = np.vstack((weights, intercept)).T.ravel()
loss_2, grad_2, _ = _multinomial_loss_grad(weights_intercept, X, Y_bin,
0.0, sample_weights)
grad_2 = grad_2.reshape(n_classes, -1)
grad_2 = grad_2[:, :-1].T
assert_almost_equal(loss_1, loss_2)
assert_array_almost_equal(grad_1, grad_2)
# ground truth
loss_gt = 11.680360354325961
grad_gt = np.array([[-0.557487, -1.619151, +2.176638],
[-0.903942, +5.258745, -4.354803]])
assert_almost_equal(loss_1, loss_gt)
assert_array_almost_equal(grad_1, grad_gt)
| bsd-3-clause |
Ziqi-Li/bknqgis | pandas/pandas/tests/internals/test_internals.py | 10 | 46654 | # -*- coding: utf-8 -*-
# pylint: disable=W0102
from datetime import datetime, date
import sys
import pytest
import numpy as np
import re
from distutils.version import LooseVersion
import itertools
from pandas import (Index, MultiIndex, DataFrame, DatetimeIndex,
Series, Categorical)
from pandas.compat import OrderedDict, lrange
from pandas.core.sparse.array import SparseArray
from pandas.core.internals import (BlockPlacement, SingleBlockManager,
make_block, BlockManager)
import pandas.core.algorithms as algos
import pandas.util.testing as tm
import pandas as pd
from pandas._libs import lib
from pandas.util.testing import (assert_almost_equal, assert_frame_equal,
randn, assert_series_equal)
from pandas.compat import zip, u
# in 3.6.1 a c-api slicing function changed, see src/compat_helper.h
PY361 = sys.version >= LooseVersion('3.6.1')
@pytest.fixture
def mgr():
return create_mgr(
'a: f8; b: object; c: f8; d: object; e: f8;'
'f: bool; g: i8; h: complex; i: datetime-1; j: datetime-2;'
'k: M8[ns, US/Eastern]; l: M8[ns, CET];')
def assert_block_equal(left, right):
tm.assert_numpy_array_equal(left.values, right.values)
assert left.dtype == right.dtype
assert isinstance(left.mgr_locs, lib.BlockPlacement)
assert isinstance(right.mgr_locs, lib.BlockPlacement)
tm.assert_numpy_array_equal(left.mgr_locs.as_array,
right.mgr_locs.as_array)
def get_numeric_mat(shape):
arr = np.arange(shape[0])
return np.lib.stride_tricks.as_strided(x=arr, shape=shape, strides=(
arr.itemsize, ) + (0, ) * (len(shape) - 1)).copy()
N = 10
def create_block(typestr, placement, item_shape=None, num_offset=0):
"""
Supported typestr:
* float, f8, f4, f2
* int, i8, i4, i2, i1
* uint, u8, u4, u2, u1
* complex, c16, c8
* bool
* object, string, O
* datetime, dt, M8[ns], M8[ns, tz]
* timedelta, td, m8[ns]
* sparse (SparseArray with fill_value=0.0)
* sparse_na (SparseArray with fill_value=np.nan)
* category, category2
"""
placement = BlockPlacement(placement)
num_items = len(placement)
if item_shape is None:
item_shape = (N, )
shape = (num_items, ) + item_shape
mat = get_numeric_mat(shape)
if typestr in ('float', 'f8', 'f4', 'f2', 'int', 'i8', 'i4', 'i2', 'i1',
'uint', 'u8', 'u4', 'u2', 'u1'):
values = mat.astype(typestr) + num_offset
elif typestr in ('complex', 'c16', 'c8'):
values = 1.j * (mat.astype(typestr) + num_offset)
elif typestr in ('object', 'string', 'O'):
values = np.reshape(['A%d' % i for i in mat.ravel() + num_offset],
shape)
elif typestr in ('b', 'bool', ):
values = np.ones(shape, dtype=np.bool_)
elif typestr in ('datetime', 'dt', 'M8[ns]'):
values = (mat * 1e9).astype('M8[ns]')
elif typestr.startswith('M8[ns'):
# datetime with tz
m = re.search(r'M8\[ns,\s*(\w+\/?\w*)\]', typestr)
assert m is not None, "incompatible typestr -> {0}".format(typestr)
tz = m.groups()[0]
assert num_items == 1, "must have only 1 num items for a tz-aware"
values = DatetimeIndex(np.arange(N) * 1e9, tz=tz)
elif typestr in ('timedelta', 'td', 'm8[ns]'):
values = (mat * 1).astype('m8[ns]')
elif typestr in ('category', ):
values = Categorical([1, 1, 2, 2, 3, 3, 3, 3, 4, 4])
elif typestr in ('category2', ):
values = Categorical(['a', 'a', 'a', 'a', 'b', 'b', 'c', 'c', 'c', 'd'
])
elif typestr in ('sparse', 'sparse_na'):
# FIXME: doesn't support num_rows != 10
assert shape[-1] == 10
assert all(s == 1 for s in shape[:-1])
if typestr.endswith('_na'):
fill_value = np.nan
else:
fill_value = 0.0
values = SparseArray([fill_value, fill_value, 1, 2, 3, fill_value,
4, 5, fill_value, 6], fill_value=fill_value)
arr = values.sp_values.view()
arr += (num_offset - 1)
else:
raise ValueError('Unsupported typestr: "%s"' % typestr)
return make_block(values, placement=placement, ndim=len(shape))
def create_single_mgr(typestr, num_rows=None):
if num_rows is None:
num_rows = N
return SingleBlockManager(
create_block(typestr, placement=slice(0, num_rows), item_shape=()),
np.arange(num_rows))
def create_mgr(descr, item_shape=None):
"""
Construct BlockManager from string description.
String description syntax looks similar to np.matrix initializer. It looks
like this::
a,b,c: f8; d,e,f: i8
Rules are rather simple:
* see list of supported datatypes in `create_block` method
* components are semicolon-separated
* each component is `NAME,NAME,NAME: DTYPE_ID`
* whitespace around colons & semicolons are removed
* components with same DTYPE_ID are combined into single block
* to force multiple blocks with same dtype, use '-SUFFIX'::
'a:f8-1; b:f8-2; c:f8-foobar'
"""
if item_shape is None:
item_shape = (N, )
offset = 0
mgr_items = []
block_placements = OrderedDict()
for d in descr.split(';'):
d = d.strip()
if not len(d):
continue
names, blockstr = d.partition(':')[::2]
blockstr = blockstr.strip()
names = names.strip().split(',')
mgr_items.extend(names)
placement = list(np.arange(len(names)) + offset)
try:
block_placements[blockstr].extend(placement)
except KeyError:
block_placements[blockstr] = placement
offset += len(names)
mgr_items = Index(mgr_items)
blocks = []
num_offset = 0
for blockstr, placement in block_placements.items():
typestr = blockstr.split('-')[0]
blocks.append(create_block(typestr,
placement,
item_shape=item_shape,
num_offset=num_offset, ))
num_offset += len(placement)
return BlockManager(sorted(blocks, key=lambda b: b.mgr_locs[0]),
[mgr_items] + [np.arange(n) for n in item_shape])
class TestBlock(object):
def setup_method(self, method):
# self.fblock = get_float_ex() # a,c,e
# self.cblock = get_complex_ex() #
# self.oblock = get_obj_ex()
# self.bool_block = get_bool_ex()
# self.int_block = get_int_ex()
self.fblock = create_block('float', [0, 2, 4])
self.cblock = create_block('complex', [7])
self.oblock = create_block('object', [1, 3])
self.bool_block = create_block('bool', [5])
self.int_block = create_block('int', [6])
def test_constructor(self):
int32block = create_block('i4', [0])
assert int32block.dtype == np.int32
def test_pickle(self):
def _check(blk):
assert_block_equal(tm.round_trip_pickle(blk), blk)
_check(self.fblock)
_check(self.cblock)
_check(self.oblock)
_check(self.bool_block)
def test_mgr_locs(self):
assert isinstance(self.fblock.mgr_locs, lib.BlockPlacement)
tm.assert_numpy_array_equal(self.fblock.mgr_locs.as_array,
np.array([0, 2, 4], dtype=np.int64))
def test_attrs(self):
assert self.fblock.shape == self.fblock.values.shape
assert self.fblock.dtype == self.fblock.values.dtype
assert len(self.fblock) == len(self.fblock.values)
def test_merge(self):
avals = randn(2, 10)
bvals = randn(2, 10)
ref_cols = Index(['e', 'a', 'b', 'd', 'f'])
ablock = make_block(avals, ref_cols.get_indexer(['e', 'b']))
bblock = make_block(bvals, ref_cols.get_indexer(['a', 'd']))
merged = ablock.merge(bblock)
tm.assert_numpy_array_equal(merged.mgr_locs.as_array,
np.array([0, 1, 2, 3], dtype=np.int64))
tm.assert_numpy_array_equal(merged.values[[0, 2]], np.array(avals))
tm.assert_numpy_array_equal(merged.values[[1, 3]], np.array(bvals))
# TODO: merge with mixed type?
def test_copy(self):
cop = self.fblock.copy()
assert cop is not self.fblock
assert_block_equal(self.fblock, cop)
def test_reindex_index(self):
pass
def test_reindex_cast(self):
pass
def test_insert(self):
pass
def test_delete(self):
newb = self.fblock.copy()
newb.delete(0)
assert isinstance(newb.mgr_locs, lib.BlockPlacement)
tm.assert_numpy_array_equal(newb.mgr_locs.as_array,
np.array([2, 4], dtype=np.int64))
assert (newb.values[0] == 1).all()
newb = self.fblock.copy()
newb.delete(1)
assert isinstance(newb.mgr_locs, lib.BlockPlacement)
tm.assert_numpy_array_equal(newb.mgr_locs.as_array,
np.array([0, 4], dtype=np.int64))
assert (newb.values[1] == 2).all()
newb = self.fblock.copy()
newb.delete(2)
tm.assert_numpy_array_equal(newb.mgr_locs.as_array,
np.array([0, 2], dtype=np.int64))
assert (newb.values[1] == 1).all()
newb = self.fblock.copy()
with pytest.raises(Exception):
newb.delete(3)
def test_split_block_at(self):
# with dup column support this method was taken out
# GH3679
pytest.skip("skipping for now")
bs = list(self.fblock.split_block_at('a'))
assert len(bs) == 1
assert np.array_equal(bs[0].items, ['c', 'e'])
bs = list(self.fblock.split_block_at('c'))
assert len(bs) == 2
assert np.array_equal(bs[0].items, ['a'])
assert np.array_equal(bs[1].items, ['e'])
bs = list(self.fblock.split_block_at('e'))
assert len(bs) == 1
assert np.array_equal(bs[0].items, ['a', 'c'])
# bblock = get_bool_ex(['f'])
# bs = list(bblock.split_block_at('f'))
# assert len(bs), 0)
class TestDatetimeBlock(object):
def test_try_coerce_arg(self):
block = create_block('datetime', [0])
# coerce None
none_coerced = block._try_coerce_args(block.values, None)[2]
assert pd.Timestamp(none_coerced) is pd.NaT
# coerce different types of date bojects
vals = (np.datetime64('2010-10-10'), datetime(2010, 10, 10),
date(2010, 10, 10))
for val in vals:
coerced = block._try_coerce_args(block.values, val)[2]
assert np.int64 == type(coerced)
assert pd.Timestamp('2010-10-10') == pd.Timestamp(coerced)
class TestBlockManager(object):
def test_constructor_corner(self):
pass
def test_attrs(self):
mgr = create_mgr('a,b,c: f8-1; d,e,f: f8-2')
assert mgr.nblocks == 2
assert len(mgr) == 6
def test_is_mixed_dtype(self):
assert not create_mgr('a,b:f8').is_mixed_type
assert not create_mgr('a:f8-1; b:f8-2').is_mixed_type
assert create_mgr('a,b:f8; c,d: f4').is_mixed_type
assert create_mgr('a,b:f8; c,d: object').is_mixed_type
def test_is_indexed_like(self):
mgr1 = create_mgr('a,b: f8')
mgr2 = create_mgr('a:i8; b:bool')
mgr3 = create_mgr('a,b,c: f8')
assert mgr1._is_indexed_like(mgr1)
assert mgr1._is_indexed_like(mgr2)
assert mgr1._is_indexed_like(mgr3)
assert not mgr1._is_indexed_like(mgr1.get_slice(
slice(-1), axis=1))
def test_duplicate_ref_loc_failure(self):
tmp_mgr = create_mgr('a:bool; a: f8')
axes, blocks = tmp_mgr.axes, tmp_mgr.blocks
blocks[0].mgr_locs = np.array([0])
blocks[1].mgr_locs = np.array([0])
# test trying to create block manager with overlapping ref locs
with pytest.raises(AssertionError):
BlockManager(blocks, axes)
blocks[0].mgr_locs = np.array([0])
blocks[1].mgr_locs = np.array([1])
mgr = BlockManager(blocks, axes)
mgr.iget(1)
def test_contains(self, mgr):
assert 'a' in mgr
assert 'baz' not in mgr
def test_pickle(self, mgr):
mgr2 = tm.round_trip_pickle(mgr)
assert_frame_equal(DataFrame(mgr), DataFrame(mgr2))
# share ref_items
# assert mgr2.blocks[0].ref_items is mgr2.blocks[1].ref_items
# GH2431
assert hasattr(mgr2, "_is_consolidated")
assert hasattr(mgr2, "_known_consolidated")
# reset to False on load
assert not mgr2._is_consolidated
assert not mgr2._known_consolidated
def test_non_unique_pickle(self):
mgr = create_mgr('a,a,a:f8')
mgr2 = tm.round_trip_pickle(mgr)
assert_frame_equal(DataFrame(mgr), DataFrame(mgr2))
mgr = create_mgr('a: f8; a: i8')
mgr2 = tm.round_trip_pickle(mgr)
assert_frame_equal(DataFrame(mgr), DataFrame(mgr2))
def test_categorical_block_pickle(self):
mgr = create_mgr('a: category')
mgr2 = tm.round_trip_pickle(mgr)
assert_frame_equal(DataFrame(mgr), DataFrame(mgr2))
smgr = create_single_mgr('category')
smgr2 = tm.round_trip_pickle(smgr)
assert_series_equal(Series(smgr), Series(smgr2))
def test_get_scalar(self, mgr):
for item in mgr.items:
for i, index in enumerate(mgr.axes[1]):
res = mgr.get_scalar((item, index))
exp = mgr.get(item, fastpath=False)[i]
assert res == exp
exp = mgr.get(item).internal_values()[i]
assert res == exp
def test_get(self):
cols = Index(list('abc'))
values = np.random.rand(3, 3)
block = make_block(values=values.copy(), placement=np.arange(3))
mgr = BlockManager(blocks=[block], axes=[cols, np.arange(3)])
assert_almost_equal(mgr.get('a', fastpath=False), values[0])
assert_almost_equal(mgr.get('b', fastpath=False), values[1])
assert_almost_equal(mgr.get('c', fastpath=False), values[2])
assert_almost_equal(mgr.get('a').internal_values(), values[0])
assert_almost_equal(mgr.get('b').internal_values(), values[1])
assert_almost_equal(mgr.get('c').internal_values(), values[2])
def test_set(self):
mgr = create_mgr('a,b,c: int', item_shape=(3, ))
mgr.set('d', np.array(['foo'] * 3))
mgr.set('b', np.array(['bar'] * 3))
tm.assert_numpy_array_equal(mgr.get('a').internal_values(),
np.array([0] * 3))
tm.assert_numpy_array_equal(mgr.get('b').internal_values(),
np.array(['bar'] * 3, dtype=np.object_))
tm.assert_numpy_array_equal(mgr.get('c').internal_values(),
np.array([2] * 3))
tm.assert_numpy_array_equal(mgr.get('d').internal_values(),
np.array(['foo'] * 3, dtype=np.object_))
def test_set_change_dtype(self, mgr):
mgr.set('baz', np.zeros(N, dtype=bool))
mgr.set('baz', np.repeat('foo', N))
assert mgr.get('baz').dtype == np.object_
mgr2 = mgr.consolidate()
mgr2.set('baz', np.repeat('foo', N))
assert mgr2.get('baz').dtype == np.object_
mgr2.set('quux', randn(N).astype(int))
assert mgr2.get('quux').dtype == np.int_
mgr2.set('quux', randn(N))
assert mgr2.get('quux').dtype == np.float_
def test_set_change_dtype_slice(self): # GH8850
cols = MultiIndex.from_tuples([('1st', 'a'), ('2nd', 'b'), ('3rd', 'c')
])
df = DataFrame([[1.0, 2, 3], [4.0, 5, 6]], columns=cols)
df['2nd'] = df['2nd'] * 2.0
assert sorted(df.blocks.keys()) == ['float64', 'int64']
assert_frame_equal(df.blocks['float64'], DataFrame(
[[1.0, 4.0], [4.0, 10.0]], columns=cols[:2]))
assert_frame_equal(df.blocks['int64'], DataFrame(
[[3], [6]], columns=cols[2:]))
def test_copy(self, mgr):
cp = mgr.copy(deep=False)
for blk, cp_blk in zip(mgr.blocks, cp.blocks):
# view assertion
assert cp_blk.equals(blk)
assert cp_blk.values.base is blk.values.base
cp = mgr.copy(deep=True)
for blk, cp_blk in zip(mgr.blocks, cp.blocks):
# copy assertion we either have a None for a base or in case of
# some blocks it is an array (e.g. datetimetz), but was copied
assert cp_blk.equals(blk)
if cp_blk.values.base is not None and blk.values.base is not None:
assert cp_blk.values.base is not blk.values.base
else:
assert cp_blk.values.base is None and blk.values.base is None
def test_sparse(self):
mgr = create_mgr('a: sparse-1; b: sparse-2')
# what to test here?
assert mgr.as_matrix().dtype == np.float64
def test_sparse_mixed(self):
mgr = create_mgr('a: sparse-1; b: sparse-2; c: f8')
assert len(mgr.blocks) == 3
assert isinstance(mgr, BlockManager)
# what to test here?
def test_as_matrix_float(self):
mgr = create_mgr('c: f4; d: f2; e: f8')
assert mgr.as_matrix().dtype == np.float64
mgr = create_mgr('c: f4; d: f2')
assert mgr.as_matrix().dtype == np.float32
def test_as_matrix_int_bool(self):
mgr = create_mgr('a: bool-1; b: bool-2')
assert mgr.as_matrix().dtype == np.bool_
mgr = create_mgr('a: i8-1; b: i8-2; c: i4; d: i2; e: u1')
assert mgr.as_matrix().dtype == np.int64
mgr = create_mgr('c: i4; d: i2; e: u1')
assert mgr.as_matrix().dtype == np.int32
def test_as_matrix_datetime(self):
mgr = create_mgr('h: datetime-1; g: datetime-2')
assert mgr.as_matrix().dtype == 'M8[ns]'
def test_as_matrix_datetime_tz(self):
mgr = create_mgr('h: M8[ns, US/Eastern]; g: M8[ns, CET]')
assert mgr.get('h').dtype == 'datetime64[ns, US/Eastern]'
assert mgr.get('g').dtype == 'datetime64[ns, CET]'
assert mgr.as_matrix().dtype == 'object'
def test_astype(self):
# coerce all
mgr = create_mgr('c: f4; d: f2; e: f8')
for t in ['float16', 'float32', 'float64', 'int32', 'int64']:
t = np.dtype(t)
tmgr = mgr.astype(t)
assert tmgr.get('c').dtype.type == t
assert tmgr.get('d').dtype.type == t
assert tmgr.get('e').dtype.type == t
# mixed
mgr = create_mgr('a,b: object; c: bool; d: datetime;'
'e: f4; f: f2; g: f8')
for t in ['float16', 'float32', 'float64', 'int32', 'int64']:
t = np.dtype(t)
tmgr = mgr.astype(t, errors='ignore')
assert tmgr.get('c').dtype.type == t
assert tmgr.get('e').dtype.type == t
assert tmgr.get('f').dtype.type == t
assert tmgr.get('g').dtype.type == t
assert tmgr.get('a').dtype.type == np.object_
assert tmgr.get('b').dtype.type == np.object_
if t != np.int64:
assert tmgr.get('d').dtype.type == np.datetime64
else:
assert tmgr.get('d').dtype.type == t
def test_convert(self):
def _compare(old_mgr, new_mgr):
""" compare the blocks, numeric compare ==, object don't """
old_blocks = set(old_mgr.blocks)
new_blocks = set(new_mgr.blocks)
assert len(old_blocks) == len(new_blocks)
# compare non-numeric
for b in old_blocks:
found = False
for nb in new_blocks:
if (b.values == nb.values).all():
found = True
break
assert found
for b in new_blocks:
found = False
for ob in old_blocks:
if (b.values == ob.values).all():
found = True
break
assert found
# noops
mgr = create_mgr('f: i8; g: f8')
new_mgr = mgr.convert()
_compare(mgr, new_mgr)
mgr = create_mgr('a, b: object; f: i8; g: f8')
new_mgr = mgr.convert()
_compare(mgr, new_mgr)
# convert
mgr = create_mgr('a,b,foo: object; f: i8; g: f8')
mgr.set('a', np.array(['1'] * N, dtype=np.object_))
mgr.set('b', np.array(['2.'] * N, dtype=np.object_))
mgr.set('foo', np.array(['foo.'] * N, dtype=np.object_))
new_mgr = mgr.convert(numeric=True)
assert new_mgr.get('a').dtype == np.int64
assert new_mgr.get('b').dtype == np.float64
assert new_mgr.get('foo').dtype == np.object_
assert new_mgr.get('f').dtype == np.int64
assert new_mgr.get('g').dtype == np.float64
mgr = create_mgr('a,b,foo: object; f: i4; bool: bool; dt: datetime;'
'i: i8; g: f8; h: f2')
mgr.set('a', np.array(['1'] * N, dtype=np.object_))
mgr.set('b', np.array(['2.'] * N, dtype=np.object_))
mgr.set('foo', np.array(['foo.'] * N, dtype=np.object_))
new_mgr = mgr.convert(numeric=True)
assert new_mgr.get('a').dtype == np.int64
assert new_mgr.get('b').dtype == np.float64
assert new_mgr.get('foo').dtype == np.object_
assert new_mgr.get('f').dtype == np.int32
assert new_mgr.get('bool').dtype == np.bool_
assert new_mgr.get('dt').dtype.type, np.datetime64
assert new_mgr.get('i').dtype == np.int64
assert new_mgr.get('g').dtype == np.float64
assert new_mgr.get('h').dtype == np.float16
def test_interleave(self):
# self
for dtype in ['f8', 'i8', 'object', 'bool', 'complex', 'M8[ns]',
'm8[ns]']:
mgr = create_mgr('a: {0}'.format(dtype))
assert mgr.as_matrix().dtype == dtype
mgr = create_mgr('a: {0}; b: {0}'.format(dtype))
assert mgr.as_matrix().dtype == dtype
# will be converted according the actual dtype of the underlying
mgr = create_mgr('a: category')
assert mgr.as_matrix().dtype == 'i8'
mgr = create_mgr('a: category; b: category')
assert mgr.as_matrix().dtype == 'i8'
mgr = create_mgr('a: category; b: category2')
assert mgr.as_matrix().dtype == 'object'
mgr = create_mgr('a: category2')
assert mgr.as_matrix().dtype == 'object'
mgr = create_mgr('a: category2; b: category2')
assert mgr.as_matrix().dtype == 'object'
# combinations
mgr = create_mgr('a: f8')
assert mgr.as_matrix().dtype == 'f8'
mgr = create_mgr('a: f8; b: i8')
assert mgr.as_matrix().dtype == 'f8'
mgr = create_mgr('a: f4; b: i8')
assert mgr.as_matrix().dtype == 'f8'
mgr = create_mgr('a: f4; b: i8; d: object')
assert mgr.as_matrix().dtype == 'object'
mgr = create_mgr('a: bool; b: i8')
assert mgr.as_matrix().dtype == 'object'
mgr = create_mgr('a: complex')
assert mgr.as_matrix().dtype == 'complex'
mgr = create_mgr('a: f8; b: category')
assert mgr.as_matrix().dtype == 'object'
mgr = create_mgr('a: M8[ns]; b: category')
assert mgr.as_matrix().dtype == 'object'
mgr = create_mgr('a: M8[ns]; b: bool')
assert mgr.as_matrix().dtype == 'object'
mgr = create_mgr('a: M8[ns]; b: i8')
assert mgr.as_matrix().dtype == 'object'
mgr = create_mgr('a: m8[ns]; b: bool')
assert mgr.as_matrix().dtype == 'object'
mgr = create_mgr('a: m8[ns]; b: i8')
assert mgr.as_matrix().dtype == 'object'
mgr = create_mgr('a: M8[ns]; b: m8[ns]')
assert mgr.as_matrix().dtype == 'object'
def test_interleave_non_unique_cols(self):
df = DataFrame([
[pd.Timestamp('20130101'), 3.5],
[pd.Timestamp('20130102'), 4.5]],
columns=['x', 'x'],
index=[1, 2])
df_unique = df.copy()
df_unique.columns = ['x', 'y']
assert df_unique.values.shape == df.values.shape
tm.assert_numpy_array_equal(df_unique.values[0], df.values[0])
tm.assert_numpy_array_equal(df_unique.values[1], df.values[1])
def test_consolidate(self):
pass
def test_consolidate_ordering_issues(self, mgr):
mgr.set('f', randn(N))
mgr.set('d', randn(N))
mgr.set('b', randn(N))
mgr.set('g', randn(N))
mgr.set('h', randn(N))
# we have datetime/tz blocks in mgr
cons = mgr.consolidate()
assert cons.nblocks == 4
cons = mgr.consolidate().get_numeric_data()
assert cons.nblocks == 1
assert isinstance(cons.blocks[0].mgr_locs, lib.BlockPlacement)
tm.assert_numpy_array_equal(cons.blocks[0].mgr_locs.as_array,
np.arange(len(cons.items), dtype=np.int64))
def test_reindex_index(self):
pass
def test_reindex_items(self):
# mgr is not consolidated, f8 & f8-2 blocks
mgr = create_mgr('a: f8; b: i8; c: f8; d: i8; e: f8;'
'f: bool; g: f8-2')
reindexed = mgr.reindex_axis(['g', 'c', 'a', 'd'], axis=0)
assert reindexed.nblocks == 2
tm.assert_index_equal(reindexed.items, pd.Index(['g', 'c', 'a', 'd']))
assert_almost_equal(
mgr.get('g', fastpath=False), reindexed.get('g', fastpath=False))
assert_almost_equal(
mgr.get('c', fastpath=False), reindexed.get('c', fastpath=False))
assert_almost_equal(
mgr.get('a', fastpath=False), reindexed.get('a', fastpath=False))
assert_almost_equal(
mgr.get('d', fastpath=False), reindexed.get('d', fastpath=False))
assert_almost_equal(
mgr.get('g').internal_values(),
reindexed.get('g').internal_values())
assert_almost_equal(
mgr.get('c').internal_values(),
reindexed.get('c').internal_values())
assert_almost_equal(
mgr.get('a').internal_values(),
reindexed.get('a').internal_values())
assert_almost_equal(
mgr.get('d').internal_values(),
reindexed.get('d').internal_values())
def test_multiindex_xs(self):
mgr = create_mgr('a,b,c: f8; d,e,f: i8')
index = MultiIndex(levels=[['foo', 'bar', 'baz', 'qux'], ['one', 'two',
'three']],
labels=[[0, 0, 0, 1, 1, 2, 2, 3, 3, 3],
[0, 1, 2, 0, 1, 1, 2, 0, 1, 2]],
names=['first', 'second'])
mgr.set_axis(1, index)
result = mgr.xs('bar', axis=1)
assert result.shape == (6, 2)
assert result.axes[1][0] == ('bar', 'one')
assert result.axes[1][1] == ('bar', 'two')
def test_get_numeric_data(self):
mgr = create_mgr('int: int; float: float; complex: complex;'
'str: object; bool: bool; obj: object; dt: datetime',
item_shape=(3, ))
mgr.set('obj', np.array([1, 2, 3], dtype=np.object_))
numeric = mgr.get_numeric_data()
tm.assert_index_equal(numeric.items,
pd.Index(['int', 'float', 'complex', 'bool']))
assert_almost_equal(
mgr.get('float', fastpath=False), numeric.get('float',
fastpath=False))
assert_almost_equal(
mgr.get('float').internal_values(),
numeric.get('float').internal_values())
# Check sharing
numeric.set('float', np.array([100., 200., 300.]))
assert_almost_equal(
mgr.get('float', fastpath=False), np.array([100., 200., 300.]))
assert_almost_equal(
mgr.get('float').internal_values(), np.array([100., 200., 300.]))
numeric2 = mgr.get_numeric_data(copy=True)
tm.assert_index_equal(numeric.items,
pd.Index(['int', 'float', 'complex', 'bool']))
numeric2.set('float', np.array([1000., 2000., 3000.]))
assert_almost_equal(
mgr.get('float', fastpath=False), np.array([100., 200., 300.]))
assert_almost_equal(
mgr.get('float').internal_values(), np.array([100., 200., 300.]))
def test_get_bool_data(self):
mgr = create_mgr('int: int; float: float; complex: complex;'
'str: object; bool: bool; obj: object; dt: datetime',
item_shape=(3, ))
mgr.set('obj', np.array([True, False, True], dtype=np.object_))
bools = mgr.get_bool_data()
tm.assert_index_equal(bools.items, pd.Index(['bool']))
assert_almost_equal(mgr.get('bool', fastpath=False),
bools.get('bool', fastpath=False))
assert_almost_equal(
mgr.get('bool').internal_values(),
bools.get('bool').internal_values())
bools.set('bool', np.array([True, False, True]))
tm.assert_numpy_array_equal(mgr.get('bool', fastpath=False),
np.array([True, False, True]))
tm.assert_numpy_array_equal(mgr.get('bool').internal_values(),
np.array([True, False, True]))
# Check sharing
bools2 = mgr.get_bool_data(copy=True)
bools2.set('bool', np.array([False, True, False]))
tm.assert_numpy_array_equal(mgr.get('bool', fastpath=False),
np.array([True, False, True]))
tm.assert_numpy_array_equal(mgr.get('bool').internal_values(),
np.array([True, False, True]))
def test_unicode_repr_doesnt_raise(self):
repr(create_mgr(u('b,\u05d0: object')))
def test_missing_unicode_key(self):
df = DataFrame({"a": [1]})
try:
df.loc[:, u("\u05d0")] # should not raise UnicodeEncodeError
except KeyError:
pass # this is the expected exception
def test_equals(self):
# unique items
bm1 = create_mgr('a,b,c: i8-1; d,e,f: i8-2')
bm2 = BlockManager(bm1.blocks[::-1], bm1.axes)
assert bm1.equals(bm2)
bm1 = create_mgr('a,a,a: i8-1; b,b,b: i8-2')
bm2 = BlockManager(bm1.blocks[::-1], bm1.axes)
assert bm1.equals(bm2)
def test_equals_block_order_different_dtypes(self):
# GH 9330
mgr_strings = [
"a:i8;b:f8", # basic case
"a:i8;b:f8;c:c8;d:b", # many types
"a:i8;e:dt;f:td;g:string", # more types
"a:i8;b:category;c:category2;d:category2", # categories
"c:sparse;d:sparse_na;b:f8", # sparse
]
for mgr_string in mgr_strings:
bm = create_mgr(mgr_string)
block_perms = itertools.permutations(bm.blocks)
for bm_perm in block_perms:
bm_this = BlockManager(bm_perm, bm.axes)
assert bm.equals(bm_this)
assert bm_this.equals(bm)
def test_single_mgr_ctor(self):
mgr = create_single_mgr('f8', num_rows=5)
assert mgr.as_matrix().tolist() == [0., 1., 2., 3., 4.]
def test_validate_bool_args(self):
invalid_values = [1, "True", [1, 2, 3], 5.0]
bm1 = create_mgr('a,b,c: i8-1; d,e,f: i8-2')
for value in invalid_values:
with pytest.raises(ValueError):
bm1.replace_list([1], [2], inplace=value)
class TestIndexing(object):
# Nosetests-style data-driven tests.
#
# This test applies different indexing routines to block managers and
# compares the outcome to the result of same operations on np.ndarray.
#
# NOTE: sparse (SparseBlock with fill_value != np.nan) fail a lot of tests
# and are disabled.
MANAGERS = [
create_single_mgr('f8', N),
create_single_mgr('i8', N),
# create_single_mgr('sparse', N),
create_single_mgr('sparse_na', N),
# 2-dim
create_mgr('a,b,c,d,e,f: f8', item_shape=(N,)),
create_mgr('a,b,c,d,e,f: i8', item_shape=(N,)),
create_mgr('a,b: f8; c,d: i8; e,f: string', item_shape=(N,)),
create_mgr('a,b: f8; c,d: i8; e,f: f8', item_shape=(N,)),
# create_mgr('a: sparse', item_shape=(N,)),
create_mgr('a: sparse_na', item_shape=(N,)),
# 3-dim
create_mgr('a,b,c,d,e,f: f8', item_shape=(N, N)),
create_mgr('a,b,c,d,e,f: i8', item_shape=(N, N)),
create_mgr('a,b: f8; c,d: i8; e,f: string', item_shape=(N, N)),
create_mgr('a,b: f8; c,d: i8; e,f: f8', item_shape=(N, N)),
# create_mgr('a: sparse', item_shape=(1, N)),
]
# MANAGERS = [MANAGERS[6]]
def test_get_slice(self):
def assert_slice_ok(mgr, axis, slobj):
# import pudb; pudb.set_trace()
mat = mgr.as_matrix()
# we maybe using an ndarray to test slicing and
# might not be the full length of the axis
if isinstance(slobj, np.ndarray):
ax = mgr.axes[axis]
if len(ax) and len(slobj) and len(slobj) != len(ax):
slobj = np.concatenate([slobj, np.zeros(
len(ax) - len(slobj), dtype=bool)])
sliced = mgr.get_slice(slobj, axis=axis)
mat_slobj = (slice(None), ) * axis + (slobj, )
tm.assert_numpy_array_equal(mat[mat_slobj], sliced.as_matrix(),
check_dtype=False)
tm.assert_index_equal(mgr.axes[axis][slobj], sliced.axes[axis])
for mgr in self.MANAGERS:
for ax in range(mgr.ndim):
# slice
assert_slice_ok(mgr, ax, slice(None))
assert_slice_ok(mgr, ax, slice(3))
assert_slice_ok(mgr, ax, slice(100))
assert_slice_ok(mgr, ax, slice(1, 4))
assert_slice_ok(mgr, ax, slice(3, 0, -2))
# boolean mask
assert_slice_ok(
mgr, ax, np.array([], dtype=np.bool_))
assert_slice_ok(
mgr, ax,
np.ones(mgr.shape[ax], dtype=np.bool_))
assert_slice_ok(
mgr, ax,
np.zeros(mgr.shape[ax], dtype=np.bool_))
if mgr.shape[ax] >= 3:
assert_slice_ok(
mgr, ax,
np.arange(mgr.shape[ax]) % 3 == 0)
assert_slice_ok(
mgr, ax, np.array(
[True, True, False], dtype=np.bool_))
# fancy indexer
assert_slice_ok(mgr, ax, [])
assert_slice_ok(mgr, ax, lrange(mgr.shape[ax]))
if mgr.shape[ax] >= 3:
assert_slice_ok(mgr, ax, [0, 1, 2])
assert_slice_ok(mgr, ax, [-1, -2, -3])
def test_take(self):
def assert_take_ok(mgr, axis, indexer):
mat = mgr.as_matrix()
taken = mgr.take(indexer, axis)
tm.assert_numpy_array_equal(np.take(mat, indexer, axis),
taken.as_matrix(), check_dtype=False)
tm.assert_index_equal(mgr.axes[axis].take(indexer),
taken.axes[axis])
for mgr in self.MANAGERS:
for ax in range(mgr.ndim):
# take/fancy indexer
assert_take_ok(mgr, ax, [])
assert_take_ok(mgr, ax, [0, 0, 0])
assert_take_ok(mgr, ax, lrange(mgr.shape[ax]))
if mgr.shape[ax] >= 3:
assert_take_ok(mgr, ax, [0, 1, 2])
assert_take_ok(mgr, ax, [-1, -2, -3])
def test_reindex_axis(self):
def assert_reindex_axis_is_ok(mgr, axis, new_labels, fill_value):
mat = mgr.as_matrix()
indexer = mgr.axes[axis].get_indexer_for(new_labels)
reindexed = mgr.reindex_axis(new_labels, axis,
fill_value=fill_value)
tm.assert_numpy_array_equal(algos.take_nd(mat, indexer, axis,
fill_value=fill_value),
reindexed.as_matrix(),
check_dtype=False)
tm.assert_index_equal(reindexed.axes[axis], new_labels)
for mgr in self.MANAGERS:
for ax in range(mgr.ndim):
for fill_value in (None, np.nan, 100.):
assert_reindex_axis_is_ok(
mgr, ax,
pd.Index([]), fill_value)
assert_reindex_axis_is_ok(
mgr, ax, mgr.axes[ax],
fill_value)
assert_reindex_axis_is_ok(
mgr, ax,
mgr.axes[ax][[0, 0, 0]], fill_value)
assert_reindex_axis_is_ok(
mgr, ax,
pd.Index(['foo', 'bar', 'baz']), fill_value)
assert_reindex_axis_is_ok(
mgr, ax,
pd.Index(['foo', mgr.axes[ax][0], 'baz']),
fill_value)
if mgr.shape[ax] >= 3:
assert_reindex_axis_is_ok(
mgr, ax,
mgr.axes[ax][:-3], fill_value)
assert_reindex_axis_is_ok(
mgr, ax,
mgr.axes[ax][-3::-1], fill_value)
assert_reindex_axis_is_ok(
mgr, ax,
mgr.axes[ax][[0, 1, 2, 0, 1, 2]], fill_value)
def test_reindex_indexer(self):
def assert_reindex_indexer_is_ok(mgr, axis, new_labels, indexer,
fill_value):
mat = mgr.as_matrix()
reindexed_mat = algos.take_nd(mat, indexer, axis,
fill_value=fill_value)
reindexed = mgr.reindex_indexer(new_labels, indexer, axis,
fill_value=fill_value)
tm.assert_numpy_array_equal(reindexed_mat,
reindexed.as_matrix(),
check_dtype=False)
tm.assert_index_equal(reindexed.axes[axis], new_labels)
for mgr in self.MANAGERS:
for ax in range(mgr.ndim):
for fill_value in (None, np.nan, 100.):
assert_reindex_indexer_is_ok(
mgr, ax,
pd.Index([]), [], fill_value)
assert_reindex_indexer_is_ok(
mgr, ax,
mgr.axes[ax], np.arange(mgr.shape[ax]), fill_value)
assert_reindex_indexer_is_ok(
mgr, ax,
pd.Index(['foo'] * mgr.shape[ax]),
np.arange(mgr.shape[ax]), fill_value)
assert_reindex_indexer_is_ok(
mgr, ax,
mgr.axes[ax][::-1], np.arange(mgr.shape[ax]),
fill_value)
assert_reindex_indexer_is_ok(
mgr, ax, mgr.axes[ax],
np.arange(mgr.shape[ax])[::-1], fill_value)
assert_reindex_indexer_is_ok(
mgr, ax,
pd.Index(['foo', 'bar', 'baz']),
[0, 0, 0], fill_value)
assert_reindex_indexer_is_ok(
mgr, ax,
pd.Index(['foo', 'bar', 'baz']),
[-1, 0, -1], fill_value)
assert_reindex_indexer_is_ok(
mgr, ax,
pd.Index(['foo', mgr.axes[ax][0], 'baz']),
[-1, -1, -1], fill_value)
if mgr.shape[ax] >= 3:
assert_reindex_indexer_is_ok(
mgr, ax,
pd.Index(['foo', 'bar', 'baz']),
[0, 1, 2], fill_value)
# test_get_slice(slice_like, axis)
# take(indexer, axis)
# reindex_axis(new_labels, axis)
# reindex_indexer(new_labels, indexer, axis)
class TestBlockPlacement(object):
def test_slice_len(self):
assert len(BlockPlacement(slice(0, 4))) == 4
assert len(BlockPlacement(slice(0, 4, 2))) == 2
assert len(BlockPlacement(slice(0, 3, 2))) == 2
assert len(BlockPlacement(slice(0, 1, 2))) == 1
assert len(BlockPlacement(slice(1, 0, -1))) == 1
def test_zero_step_raises(self):
with pytest.raises(ValueError):
BlockPlacement(slice(1, 1, 0))
with pytest.raises(ValueError):
BlockPlacement(slice(1, 2, 0))
def test_unbounded_slice_raises(self):
def assert_unbounded_slice_error(slc):
tm.assert_raises_regex(ValueError, "unbounded slice",
lambda: BlockPlacement(slc))
assert_unbounded_slice_error(slice(None, None))
assert_unbounded_slice_error(slice(10, None))
assert_unbounded_slice_error(slice(None, None, -1))
assert_unbounded_slice_error(slice(None, 10, -1))
# These are "unbounded" because negative index will change depending on
# container shape.
assert_unbounded_slice_error(slice(-1, None))
assert_unbounded_slice_error(slice(None, -1))
assert_unbounded_slice_error(slice(-1, -1))
assert_unbounded_slice_error(slice(-1, None, -1))
assert_unbounded_slice_error(slice(None, -1, -1))
assert_unbounded_slice_error(slice(-1, -1, -1))
def test_not_slice_like_slices(self):
def assert_not_slice_like(slc):
assert not BlockPlacement(slc).is_slice_like
assert_not_slice_like(slice(0, 0))
assert_not_slice_like(slice(100, 0))
assert_not_slice_like(slice(100, 100, -1))
assert_not_slice_like(slice(0, 100, -1))
assert not BlockPlacement(slice(0, 0)).is_slice_like
assert not BlockPlacement(slice(100, 100)).is_slice_like
def test_array_to_slice_conversion(self):
def assert_as_slice_equals(arr, slc):
assert BlockPlacement(arr).as_slice == slc
assert_as_slice_equals([0], slice(0, 1, 1))
assert_as_slice_equals([100], slice(100, 101, 1))
assert_as_slice_equals([0, 1, 2], slice(0, 3, 1))
assert_as_slice_equals([0, 5, 10], slice(0, 15, 5))
assert_as_slice_equals([0, 100], slice(0, 200, 100))
assert_as_slice_equals([2, 1], slice(2, 0, -1))
if not PY361:
assert_as_slice_equals([2, 1, 0], slice(2, None, -1))
assert_as_slice_equals([100, 0], slice(100, None, -100))
def test_not_slice_like_arrays(self):
def assert_not_slice_like(arr):
assert not BlockPlacement(arr).is_slice_like
assert_not_slice_like([])
assert_not_slice_like([-1])
assert_not_slice_like([-1, -2, -3])
assert_not_slice_like([-10])
assert_not_slice_like([-1])
assert_not_slice_like([-1, 0, 1, 2])
assert_not_slice_like([-2, 0, 2, 4])
assert_not_slice_like([1, 0, -1])
assert_not_slice_like([1, 1, 1])
def test_slice_iter(self):
assert list(BlockPlacement(slice(0, 3))) == [0, 1, 2]
assert list(BlockPlacement(slice(0, 0))) == []
assert list(BlockPlacement(slice(3, 0))) == []
if not PY361:
assert list(BlockPlacement(slice(3, 0, -1))) == [3, 2, 1]
assert list(BlockPlacement(slice(3, None, -1))) == [3, 2, 1, 0]
def test_slice_to_array_conversion(self):
def assert_as_array_equals(slc, asarray):
tm.assert_numpy_array_equal(
BlockPlacement(slc).as_array,
np.asarray(asarray, dtype=np.int64))
assert_as_array_equals(slice(0, 3), [0, 1, 2])
assert_as_array_equals(slice(0, 0), [])
assert_as_array_equals(slice(3, 0), [])
assert_as_array_equals(slice(3, 0, -1), [3, 2, 1])
if not PY361:
assert_as_array_equals(slice(3, None, -1), [3, 2, 1, 0])
assert_as_array_equals(slice(31, None, -10), [31, 21, 11, 1])
def test_blockplacement_add(self):
bpl = BlockPlacement(slice(0, 5))
assert bpl.add(1).as_slice == slice(1, 6, 1)
assert bpl.add(np.arange(5)).as_slice == slice(0, 10, 2)
assert list(bpl.add(np.arange(5, 0, -1))) == [5, 5, 5, 5, 5]
def test_blockplacement_add_int(self):
def assert_add_equals(val, inc, result):
assert list(BlockPlacement(val).add(inc)) == result
assert_add_equals(slice(0, 0), 0, [])
assert_add_equals(slice(1, 4), 0, [1, 2, 3])
assert_add_equals(slice(3, 0, -1), 0, [3, 2, 1])
assert_add_equals([1, 2, 4], 0, [1, 2, 4])
assert_add_equals(slice(0, 0), 10, [])
assert_add_equals(slice(1, 4), 10, [11, 12, 13])
assert_add_equals(slice(3, 0, -1), 10, [13, 12, 11])
assert_add_equals([1, 2, 4], 10, [11, 12, 14])
assert_add_equals(slice(0, 0), -1, [])
assert_add_equals(slice(1, 4), -1, [0, 1, 2])
assert_add_equals([1, 2, 4], -1, [0, 1, 3])
with pytest.raises(ValueError):
BlockPlacement(slice(1, 4)).add(-10)
with pytest.raises(ValueError):
BlockPlacement([1, 2, 4]).add(-10)
if not PY361:
assert_add_equals(slice(3, 0, -1), -1, [2, 1, 0])
assert_add_equals(slice(2, None, -1), 0, [2, 1, 0])
assert_add_equals(slice(2, None, -1), 10, [12, 11, 10])
with pytest.raises(ValueError):
BlockPlacement(slice(2, None, -1)).add(-1)
| gpl-2.0 |
pauljxtan/mathsci | examples/integrate_nonlinear_pendulum.py | 2 | 1050 | #!/usr/bin/env python
"""
Integrating the nonlinear pendulum.
"""
import math
from matplotlib import pyplot
from souffle.physics import mechanics
from souffle.math import odeint
def main():
# Set the integration parameters
f = mechanics.nonlinear_pendulum
dt = 0.01
t0 = 0.0
X0 = [0.99*math.pi, 0.0]
# Set constants
l = 0.1
# Integrate it...
# Using non-adaptive RK4:
#nonlinpend = odeint.RK4(f, t0, X0, l=l)
#nonlinpend.integrate(dt, 2500, True)
# Using adaptive RK4:
nonlinpend = odeint.RK4Adaptive(f, t0, X0, l=l)
duration = 25.0
dt0 = 0.05
delta = 1e-3
indices = [0, 1]
nonlinpend.integrate(duration, dt0, delta, indices, True)
# Unpack data
t = nonlinpend.t
theta, omega = nonlinpend.unpack()
# Plot it
fig1 = pyplot.figure()
fig1_sp1 = fig1.add_subplot(111)
fig1_sp1.plot(t, theta, "x-")
fig1_sp1.set_xlabel("Time [s]")
fig1_sp1.set_ylabel("Angle from vertical [rad]")
pyplot.show()
if __name__ == "__main__":
main()
| mit |
ChinaQuants/zipline | zipline/utils/test_utils.py | 1 | 11380 | from contextlib import contextmanager
from itertools import (
product,
)
import operator
import os
import shutil
from string import ascii_uppercase
import tempfile
from logbook import FileHandler
from mock import patch
from numpy.testing import assert_allclose, assert_array_equal
import pandas as pd
from six import (
itervalues,
)
from six.moves import filter
from sqlalchemy import create_engine
from zipline.assets import AssetFinder
from zipline.assets.asset_writer import AssetDBWriterFromDataFrame
from zipline.finance.blotter import ORDER_STATUS
from zipline.utils import security_list
EPOCH = pd.Timestamp(0, tz='UTC')
def seconds_to_timestamp(seconds):
return pd.Timestamp(seconds, unit='s', tz='UTC')
def to_utc(time_str):
"""Convert a string in US/Eastern time to UTC"""
return pd.Timestamp(time_str, tz='US/Eastern').tz_convert('UTC')
def str_to_seconds(s):
"""
Convert a pandas-intelligible string to (integer) seconds since UTC.
>>> from pandas import Timestamp
>>> (Timestamp('2014-01-01') - Timestamp(0)).total_seconds()
1388534400.0
>>> str_to_seconds('2014-01-01')
1388534400
"""
return int((pd.Timestamp(s, tz='UTC') - EPOCH).total_seconds())
def setup_logger(test, path='test.log'):
test.log_handler = FileHandler(path)
test.log_handler.push_application()
def teardown_logger(test):
test.log_handler.pop_application()
test.log_handler.close()
def drain_zipline(test, zipline):
output = []
transaction_count = 0
msg_counter = 0
# start the simulation
for update in zipline:
msg_counter += 1
output.append(update)
if 'daily_perf' in update:
transaction_count += \
len(update['daily_perf']['transactions'])
return output, transaction_count
def assert_single_position(test, zipline):
output, transaction_count = drain_zipline(test, zipline)
if 'expected_transactions' in test.zipline_test_config:
test.assertEqual(
test.zipline_test_config['expected_transactions'],
transaction_count
)
else:
test.assertEqual(
test.zipline_test_config['order_count'],
transaction_count
)
# the final message is the risk report, the second to
# last is the final day's results. Positions is a list of
# dicts.
closing_positions = output[-2]['daily_perf']['positions']
# confirm that all orders were filled.
# iterate over the output updates, overwriting
# orders when they are updated. Then check the status on all.
orders_by_id = {}
for update in output:
if 'daily_perf' in update:
if 'orders' in update['daily_perf']:
for order in update['daily_perf']['orders']:
orders_by_id[order['id']] = order
for order in itervalues(orders_by_id):
test.assertEqual(
order['status'],
ORDER_STATUS.FILLED,
"")
test.assertEqual(
len(closing_positions),
1,
"Portfolio should have one position."
)
sid = test.zipline_test_config['sid']
test.assertEqual(
closing_positions[0]['sid'],
sid,
"Portfolio should have one position in " + str(sid)
)
return output, transaction_count
class ExceptionSource(object):
def __init__(self):
pass
def get_hash(self):
return "ExceptionSource"
def __iter__(self):
return self
def next(self):
5 / 0
def __next__(self):
5 / 0
@contextmanager
def security_list_copy():
old_dir = security_list.SECURITY_LISTS_DIR
new_dir = tempfile.mkdtemp()
try:
for subdir in os.listdir(old_dir):
shutil.copytree(os.path.join(old_dir, subdir),
os.path.join(new_dir, subdir))
with patch.object(security_list, 'SECURITY_LISTS_DIR', new_dir), \
patch.object(security_list, 'using_copy', True,
create=True):
yield
finally:
shutil.rmtree(new_dir, True)
def add_security_data(adds, deletes):
if not hasattr(security_list, 'using_copy'):
raise Exception('add_security_data must be used within '
'security_list_copy context')
directory = os.path.join(
security_list.SECURITY_LISTS_DIR,
"leveraged_etf_list/20150127/20150125"
)
if not os.path.exists(directory):
os.makedirs(directory)
del_path = os.path.join(directory, "delete")
with open(del_path, 'w') as f:
for sym in deletes:
f.write(sym)
f.write('\n')
add_path = os.path.join(directory, "add")
with open(add_path, 'w') as f:
for sym in adds:
f.write(sym)
f.write('\n')
def all_pairs_matching_predicate(values, pred):
"""
Return an iterator of all pairs, (v0, v1) from values such that
`pred(v0, v1) == True`
Parameters
----------
values : iterable
pred : function
Returns
-------
pairs_iterator : generator
Generator yielding pairs matching `pred`.
Examples
--------
>>> from zipline.utils.test_utils import all_pairs_matching_predicate
>>> from operator import eq, lt
>>> list(all_pairs_matching_predicate(range(5), eq))
[(0, 0), (1, 1), (2, 2), (3, 3), (4, 4)]
>>> list(all_pairs_matching_predicate("abcd", lt))
[('a', 'b'), ('a', 'c'), ('a', 'd'), ('b', 'c'), ('b', 'd'), ('c', 'd')]
"""
return filter(lambda pair: pred(*pair), product(values, repeat=2))
def product_upper_triangle(values, include_diagonal=False):
"""
Return an iterator over pairs, (v0, v1), drawn from values.
If `include_diagonal` is True, returns all pairs such that v0 <= v1.
If `include_diagonal` is False, returns all pairs such that v0 < v1.
"""
return all_pairs_matching_predicate(
values,
operator.le if include_diagonal else operator.lt,
)
def all_subindices(index):
"""
Return all valid sub-indices of a pandas Index.
"""
return (
index[start:stop]
for start, stop in product_upper_triangle(range(len(index) + 1))
)
def make_rotating_asset_info(num_assets,
first_start,
frequency,
periods_between_starts,
asset_lifetime):
"""
Create a DataFrame representing lifetimes of assets that are constantly
rotating in and out of existence.
Parameters
----------
num_assets : int
How many assets to create.
first_start : pd.Timestamp
The start date for the first asset.
frequency : str or pd.tseries.offsets.Offset (e.g. trading_day)
Frequency used to interpret next two arguments.
periods_between_starts : int
Create a new asset every `frequency` * `periods_between_new`
asset_lifetime : int
Each asset exists for `frequency` * `asset_lifetime` days.
Returns
-------
info : pd.DataFrame
DataFrame representing newly-created assets.
"""
return pd.DataFrame(
{
'sid': range(num_assets),
'symbol': [chr(ord('A') + i) for i in range(num_assets)],
'asset_type': ['equity'] * num_assets,
# Start a new asset every `periods_between_starts` days.
'start_date': pd.date_range(
first_start,
freq=(periods_between_starts * frequency),
periods=num_assets,
),
# Each asset lasts for `asset_lifetime` days.
'end_date': pd.date_range(
first_start + (asset_lifetime * frequency),
freq=(periods_between_starts * frequency),
periods=num_assets,
),
'exchange': 'TEST',
}
)
def make_simple_asset_info(assets, start_date, end_date, symbols=None):
"""
Create a DataFrame representing assets that exist for the full duration
between `start_date` and `end_date`.
Parameters
----------
assets : array-like
start_date : pd.Timestamp
end_date : pd.Timestamp
symbols : list, optional
Symbols to use for the assets.
If not provided, symbols are generated from the sequence 'A', 'B', ...
Returns
-------
info : pd.DataFrame
DataFrame representing newly-created assets.
"""
num_assets = len(assets)
if symbols is None:
symbols = list(ascii_uppercase[:num_assets])
return pd.DataFrame(
{
'sid': assets,
'symbol': symbols,
'asset_type': ['equity'] * num_assets,
'start_date': [start_date] * num_assets,
'end_date': [end_date] * num_assets,
'exchange': 'TEST',
}
)
def check_allclose(actual,
desired,
rtol=1e-07,
atol=0,
err_msg='',
verbose=True):
"""
Wrapper around np.testing.assert_allclose that also verifies that inputs
are ndarrays.
See Also
--------
np.assert_allclose
"""
if type(actual) != type(desired):
raise AssertionError("%s != %s" % (type(actual), type(desired)))
return assert_allclose(actual, desired, err_msg=err_msg, verbose=True)
def check_arrays(x, y, err_msg='', verbose=True):
"""
Wrapper around np.testing.assert_array_equal that also verifies that inputs
are ndarrays.
See Also
--------
np.assert_array_equal
"""
if type(x) != type(y):
raise AssertionError("%s != %s" % (type(x), type(y)))
return assert_array_equal(x, y, err_msg=err_msg, verbose=True)
class UnexpectedAttributeAccess(Exception):
pass
class ExplodingObject(object):
"""
Object that will raise an exception on any attribute access.
Useful for verifying that an object is never touched during a
function/method call.
"""
def __getattribute__(self, name):
raise UnexpectedAttributeAccess(name)
class tmp_assets_db(object):
"""Create a temporary assets sqlite database.
This is meant to be used as a context manager.
Paramaters
----------
data : pd.DataFrame, optional
The data to feed to the writer. By default this maps:
('A', 'B', 'C') -> map(ord, 'ABC')
"""
def __init__(self, data=None):
self._eng = None
self._data = AssetDBWriterFromDataFrame(
data if data is not None else make_simple_asset_info(
list(map(ord, 'ABC')),
pd.Timestamp(0),
pd.Timestamp('2015'),
)
)
def __enter__(self):
self._eng = eng = create_engine('sqlite://')
self._data.write_all(eng)
return eng
def __exit__(self, *excinfo):
assert self._eng is not None, '_eng was not set in __enter__'
self._eng.dispose()
class tmp_asset_finder(tmp_assets_db):
"""Create a temporary asset finder using an in memory sqlite db.
Paramaters
----------
data : dict, optional
The data to feed to the writer
"""
def __enter__(self):
return AssetFinder(super(tmp_asset_finder, self).__enter__())
| apache-2.0 |
kambysese/mne-python | mne/viz/backends/_abstract.py | 3 | 24285 | """ABCs."""
# Authors: Guillaume Favelier <[email protected]
# Eric Larson <[email protected]>
#
# License: Simplified BSD
import warnings
from abc import ABC, abstractmethod, abstractclassmethod
from ..utils import tight_layout
from ...fixes import nullcontext
class _AbstractRenderer(ABC):
@abstractclassmethod
def __init__(self, fig=None, size=(600, 600), bgcolor=(0., 0., 0.),
name=None, show=False, shape=(1, 1)):
"""Set up the scene."""
pass
@abstractclassmethod
def subplot(self, x, y):
"""Set the active subplot."""
pass
@abstractclassmethod
def scene(self):
"""Return scene handle."""
pass
@abstractclassmethod
def set_interaction(self, interaction):
"""Set interaction mode."""
pass
@abstractclassmethod
def mesh(self, x, y, z, triangles, color, opacity=1.0, shading=False,
backface_culling=False, scalars=None, colormap=None,
vmin=None, vmax=None, interpolate_before_map=True,
representation='surface', line_width=1., normals=None,
polygon_offset=None, **kwargs):
"""Add a mesh in the scene.
Parameters
----------
x : array, shape (n_vertices,)
The array containing the X component of the vertices.
y : array, shape (n_vertices,)
The array containing the Y component of the vertices.
z : array, shape (n_vertices,)
The array containing the Z component of the vertices.
triangles : array, shape (n_polygons, 3)
The array containing the indices of the polygons.
color : tuple | str
The color of the mesh as a tuple (red, green, blue) of float
values between 0 and 1 or a valid color name (i.e. 'white'
or 'w').
opacity : float
The opacity of the mesh.
shading : bool
If True, enable the mesh shading.
backface_culling : bool
If True, enable backface culling on the mesh.
scalars : ndarray, shape (n_vertices,)
The scalar valued associated to the vertices.
vmin : float | None
vmin is used to scale the colormap.
If None, the min of the data will be used
vmax : float | None
vmax is used to scale the colormap.
If None, the max of the data will be used
colormap :
The colormap to use.
interpolate_before_map :
Enabling makes for a smoother scalars display. Default is True.
When False, OpenGL will interpolate the mapped colors which can
result is showing colors that are not present in the color map.
representation : str
The representation of the mesh: either 'surface' or 'wireframe'.
line_width : int
The width of the lines when representation='wireframe'.
normals : array, shape (n_vertices, 3)
The array containing the normal of each vertex.
polygon_offset : float
If not None, the factor used to resolve coincident topology.
kwargs : args
The arguments to pass to triangular_mesh
Returns
-------
surface :
Handle of the mesh in the scene.
"""
pass
@abstractclassmethod
def contour(self, surface, scalars, contours, width=1.0, opacity=1.0,
vmin=None, vmax=None, colormap=None,
normalized_colormap=False, kind='line', color=None):
"""Add a contour in the scene.
Parameters
----------
surface : surface object
The mesh to use as support for contour.
scalars : ndarray, shape (n_vertices,)
The scalar valued associated to the vertices.
contours : int | list
Specifying a list of values will only give the requested contours.
width : float
The width of the lines or radius of the tubes.
opacity : float
The opacity of the contour.
vmin : float | None
vmin is used to scale the colormap.
If None, the min of the data will be used
vmax : float | None
vmax is used to scale the colormap.
If None, the max of the data will be used
colormap :
The colormap to use.
normalized_colormap : bool
Specify if the values of the colormap are between 0 and 1.
kind : 'line' | 'tube'
The type of the primitives to use to display the contours.
color :
The color of the mesh as a tuple (red, green, blue) of float
values between 0 and 1 or a valid color name (i.e. 'white'
or 'w').
"""
pass
@abstractclassmethod
def surface(self, surface, color=None, opacity=1.0,
vmin=None, vmax=None, colormap=None,
normalized_colormap=False, scalars=None,
backface_culling=False, polygon_offset=None):
"""Add a surface in the scene.
Parameters
----------
surface : surface object
The information describing the surface.
color : tuple | str
The color of the surface as a tuple (red, green, blue) of float
values between 0 and 1 or a valid color name (i.e. 'white'
or 'w').
opacity : float
The opacity of the surface.
vmin : float | None
vmin is used to scale the colormap.
If None, the min of the data will be used
vmax : float | None
vmax is used to scale the colormap.
If None, the max of the data will be used
colormap :
The colormap to use.
scalars : ndarray, shape (n_vertices,)
The scalar valued associated to the vertices.
backface_culling : bool
If True, enable backface culling on the surface.
polygon_offset : float
If not None, the factor used to resolve coincident topology.
"""
pass
@abstractclassmethod
def sphere(self, center, color, scale, opacity=1.0,
resolution=8, backface_culling=False,
radius=None):
"""Add sphere in the scene.
Parameters
----------
center : ndarray, shape(n_center, 3)
The list of centers to use for the sphere(s).
color : tuple | str
The color of the sphere as a tuple (red, green, blue) of float
values between 0 and 1 or a valid color name (i.e. 'white'
or 'w').
scale : float
The scaling applied to the spheres. The given value specifies
the maximum size in drawing units.
opacity : float
The opacity of the sphere(s).
resolution : int
The resolution of the sphere created. This is the number
of divisions along theta and phi.
backface_culling : bool
If True, enable backface culling on the sphere(s).
radius : float | None
Replace the glyph scaling by a fixed radius value for each
sphere (not supported by mayavi).
"""
pass
@abstractclassmethod
def tube(self, origin, destination, radius=0.001, color='white',
scalars=None, vmin=None, vmax=None, colormap='RdBu',
normalized_colormap=False, reverse_lut=False):
"""Add tube in the scene.
Parameters
----------
origin : array, shape(n_lines, 3)
The coordinates of the first end of the tube(s).
destination : array, shape(n_lines, 3)
The coordinates of the other end of the tube(s).
radius : float
The radius of the tube(s).
color : tuple | str
The color of the tube as a tuple (red, green, blue) of float
values between 0 and 1 or a valid color name (i.e. 'white'
or 'w').
scalars : array, shape (n_quivers,) | None
The optional scalar data to use.
vmin : float | None
vmin is used to scale the colormap.
If None, the min of the data will be used
vmax : float | None
vmax is used to scale the colormap.
If None, the max of the data will be used
colormap :
The colormap to use.
opacity : float
The opacity of the tube(s).
backface_culling : bool
If True, enable backface culling on the tube(s).
reverse_lut : bool
If True, reverse the lookup table.
Returns
-------
surface :
Handle of the tube in the scene.
"""
pass
@abstractclassmethod
def quiver3d(self, x, y, z, u, v, w, color, scale, mode, resolution=8,
glyph_height=None, glyph_center=None, glyph_resolution=None,
opacity=1.0, scale_mode='none', scalars=None,
backface_culling=False, colormap=None, vmin=None, vmax=None,
line_width=2., name=None):
"""Add quiver3d in the scene.
Parameters
----------
x : array, shape (n_quivers,)
The X component of the position of the quiver.
y : array, shape (n_quivers,)
The Y component of the position of the quiver.
z : array, shape (n_quivers,)
The Z component of the position of the quiver.
u : array, shape (n_quivers,)
The last X component of the quiver.
v : array, shape (n_quivers,)
The last Y component of the quiver.
w : array, shape (n_quivers,)
The last Z component of the quiver.
color : tuple | str
The color of the quiver as a tuple (red, green, blue) of float
values between 0 and 1 or a valid color name (i.e. 'white'
or 'w').
scale : float
The scaling applied to the glyphs. The size of the glyph
is by default calculated from the inter-glyph spacing.
The given value specifies the maximum glyph size in drawing units.
mode : 'arrow', 'cone' or 'cylinder'
The type of the quiver.
resolution : int
The resolution of the glyph created. Depending on the type of
glyph, it represents the number of divisions in its geometric
representation.
glyph_height : float
The height of the glyph used with the quiver.
glyph_center : tuple
The center of the glyph used with the quiver: (x, y, z).
glyph_resolution : float
The resolution of the glyph used with the quiver.
opacity : float
The opacity of the quiver.
scale_mode : 'vector', 'scalar' or 'none'
The scaling mode for the glyph.
scalars : array, shape (n_quivers,) | None
The optional scalar data to use.
backface_culling : bool
If True, enable backface culling on the quiver.
colormap :
The colormap to use.
vmin : float | None
vmin is used to scale the colormap.
If None, the min of the data will be used
vmax : float | None
vmax is used to scale the colormap.
If None, the max of the data will be used
line_width : float
The width of the 2d arrows.
"""
pass
@abstractclassmethod
def text2d(self, x_window, y_window, text, size=14, color='white'):
"""Add 2d text in the scene.
Parameters
----------
x : float
The X component to use as position of the text in the
window coordinates system (window_width, window_height).
y : float
The Y component to use as position of the text in the
window coordinates system (window_width, window_height).
text : str
The content of the text.
size : int
The size of the font.
color : tuple | str
The color of the text as a tuple (red, green, blue) of float
values between 0 and 1 or a valid color name (i.e. 'white'
or 'w').
"""
pass
@abstractclassmethod
def text3d(self, x, y, z, text, width, color='white'):
"""Add 2d text in the scene.
Parameters
----------
x : float
The X component to use as position of the text.
y : float
The Y component to use as position of the text.
z : float
The Z component to use as position of the text.
text : str
The content of the text.
width : float
The width of the text.
color : tuple | str
The color of the text as a tuple (red, green, blue) of float
values between 0 and 1 or a valid color name (i.e. 'white'
or 'w').
"""
pass
@abstractclassmethod
def scalarbar(self, source, color="white", title=None, n_labels=4,
bgcolor=None):
"""Add a scalar bar in the scene.
Parameters
----------
source :
The object of the scene used for the colormap.
color :
The color of the label text.
title : str | None
The title of the scalar bar.
n_labels : int | None
The number of labels to display on the scalar bar.
bgcolor :
The color of the background when there is transparency.
"""
pass
@abstractclassmethod
def show(self):
"""Render the scene."""
pass
@abstractclassmethod
def close(self):
"""Close the scene."""
pass
@abstractclassmethod
def set_camera(self, azimuth=None, elevation=None, distance=None,
focalpoint=None, roll=None, reset_camera=True):
"""Configure the camera of the scene.
Parameters
----------
azimuth : float
The azimuthal angle of the camera.
elevation : float
The zenith angle of the camera.
distance : float
The distance to the focal point.
focalpoint : tuple
The focal point of the camera: (x, y, z).
roll : float
The rotation of the camera along its axis.
reset_camera : bool
If True, reset the camera properties beforehand.
"""
pass
@abstractclassmethod
def reset_camera(self):
"""Reset the camera properties."""
pass
@abstractclassmethod
def screenshot(self, mode='rgb', filename=None):
"""Take a screenshot of the scene.
Parameters
----------
mode : str
Either 'rgb' or 'rgba' for values to return.
Default is 'rgb'.
filename : str | None
If not None, save the figure to the disk.
"""
pass
@abstractclassmethod
def project(self, xyz, ch_names):
"""Convert 3d points to a 2d perspective.
Parameters
----------
xyz : array, shape(n_points, 3)
The points to project.
ch_names : array, shape(_n_points,)
Names of the channels.
"""
pass
@abstractclassmethod
def enable_depth_peeling(self):
"""Enable depth peeling."""
pass
@abstractclassmethod
def remove_mesh(self, mesh_data):
"""Remove the given mesh from the scene.
Parameters
----------
mesh_data : tuple | Surface
The mesh to remove.
"""
pass
class _AbstractToolBar(ABC):
@abstractmethod
def _tool_bar_load_icons(self):
pass
@abstractmethod
def _tool_bar_initialize(self, name="default", window=None):
pass
@abstractmethod
def _tool_bar_add_button(self, name, desc, func, icon_name=None,
shortcut=None):
pass
@abstractmethod
def _tool_bar_update_button_icon(self, name, icon_name):
pass
@abstractmethod
def _tool_bar_add_text(self, name, value, placeholder):
pass
@abstractmethod
def _tool_bar_add_spacer(self):
pass
@abstractmethod
def _tool_bar_add_screenshot_button(self, name, desc, func):
pass
@abstractmethod
def _tool_bar_set_theme(self, theme):
pass
class _AbstractDock(ABC):
@abstractmethod
def _dock_initialize(self, window=None):
pass
@abstractmethod
def _dock_finalize(self):
pass
@abstractmethod
def _dock_show(self):
pass
@abstractmethod
def _dock_hide(self):
pass
@abstractmethod
def _dock_add_stretch(self, layout):
pass
@abstractmethod
def _dock_add_layout(self, vertical=True):
pass
@abstractmethod
def _dock_add_label(self, value, align=False, layout=None):
pass
@abstractmethod
def _dock_add_button(self, name, callback, layout=None):
pass
@abstractmethod
def _dock_named_layout(self, name, layout, compact):
pass
@abstractmethod
def _dock_add_slider(self, name, value, rng, callback,
compact=True, double=False, layout=None):
pass
@abstractmethod
def _dock_add_spin_box(self, name, value, rng, callback,
compact=True, double=True, layout=None):
pass
@abstractmethod
def _dock_add_combo_box(self, name, value, rng,
callback, compact=True, layout=None):
pass
@abstractmethod
def _dock_add_group_box(self, name, layout=None):
pass
class _AbstractMenuBar(ABC):
@abstractmethod
def _menu_initialize(self, window=None):
pass
@abstractmethod
def _menu_add_submenu(self, name, desc):
pass
@abstractmethod
def _menu_add_button(self, menu_name, name, desc, func):
pass
class _AbstractStatusBar(ABC):
@abstractmethod
def _status_bar_initialize(self, window=None):
pass
@abstractmethod
def _status_bar_add_label(self, value, stretch=0):
pass
@abstractmethod
def _status_bar_add_progress_bar(self, stretch=0):
pass
class _AbstractPlayback(ABC):
@abstractmethod
def _playback_initialize(self, func, timeout):
pass
class _AbstractLayout(ABC):
@abstractmethod
def _layout_initialize(self, max_width):
pass
@abstractmethod
def _layout_add_widget(self, layout, widget):
pass
class _AbstractWidget(ABC):
def __init__(self, widget):
self._widget = widget
@property
def widget(self):
return self._widget
@abstractmethod
def set_value(self, value):
pass
@abstractmethod
def get_value(self):
pass
class _AbstractMplInterface(ABC):
@abstractmethod
def _mpl_initialize():
pass
class _AbstractMplCanvas(ABC):
def __init__(self, width, height, dpi):
"""Initialize the MplCanvas."""
from matplotlib import rc_context
from matplotlib.figure import Figure
# prefer constrained layout here but live with tight_layout otherwise
context = nullcontext
self._extra_events = ('resize',)
try:
context = rc_context({'figure.constrained_layout.use': True})
self._extra_events = ()
except KeyError:
pass
with context:
self.fig = Figure(figsize=(width, height), dpi=dpi)
self.axes = self.fig.add_subplot(111)
self.axes.set(xlabel='Time (sec)', ylabel='Activation (AU)')
self.manager = None
def _connect(self):
for event in ('button_press', 'motion_notify') + self._extra_events:
self.canvas.mpl_connect(
event + '_event', getattr(self, 'on_' + event))
def plot(self, x, y, label, **kwargs):
"""Plot a curve."""
line, = self.axes.plot(
x, y, label=label, **kwargs)
self.update_plot()
return line
def plot_time_line(self, x, label, **kwargs):
"""Plot the vertical line."""
line = self.axes.axvline(x, label=label, **kwargs)
self.update_plot()
return line
def update_plot(self):
"""Update the plot."""
with warnings.catch_warnings(record=True):
warnings.filterwarnings('ignore', 'constrained_layout')
self.canvas.draw()
def set_color(self, bg_color, fg_color):
"""Set the widget colors."""
self.axes.set_facecolor(bg_color)
self.axes.xaxis.label.set_color(fg_color)
self.axes.yaxis.label.set_color(fg_color)
self.axes.spines['top'].set_color(fg_color)
self.axes.spines['bottom'].set_color(fg_color)
self.axes.spines['left'].set_color(fg_color)
self.axes.spines['right'].set_color(fg_color)
self.axes.tick_params(axis='x', colors=fg_color)
self.axes.tick_params(axis='y', colors=fg_color)
self.fig.patch.set_facecolor(bg_color)
def show(self):
"""Show the canvas."""
if self.manager is None:
self.canvas.show()
else:
self.manager.show()
def close(self):
"""Close the canvas."""
self.canvas.close()
def clear(self):
"""Clear internal variables."""
self.close()
self.axes.clear()
self.fig.clear()
self.canvas = None
self.manager = None
def on_resize(self, event):
"""Handle resize events."""
tight_layout(fig=self.axes.figure)
class _AbstractBrainMplCanvas(_AbstractMplCanvas):
def __init__(self, brain, width, height, dpi):
"""Initialize the MplCanvas."""
super().__init__(width, height, dpi)
self.brain = brain
self.time_func = brain.callbacks["time"]
def update_plot(self):
"""Update the plot."""
leg = self.axes.legend(
prop={'family': 'monospace', 'size': 'small'},
framealpha=0.5, handlelength=1.,
facecolor=self.brain._bg_color)
for text in leg.get_texts():
text.set_color(self.brain._fg_color)
super().update_plot()
def on_button_press(self, event):
"""Handle button presses."""
# left click (and maybe drag) in progress in axes
if (event.inaxes != self.axes or
event.button != 1):
return
self.time_func(
event.xdata, update_widget=True, time_as_index=False)
on_motion_notify = on_button_press # for now they can be the same
def clear(self):
"""Clear internal variables."""
super().clear()
self.brain = None
class _AbstractWindow(ABC):
def _window_initialize(self):
self._window = None
self._interactor = None
self._mplcanvas = None
self._show_traces = None
self._separate_canvas = None
self._interactor_fraction = None
@abstractmethod
def _window_close_connect(self, func):
pass
@abstractmethod
def _window_get_dpi(self):
pass
@abstractmethod
def _window_get_size(self):
pass
def _window_get_mplcanvas_size(self, fraction):
ratio = (1 - fraction) / fraction
dpi = self._window_get_dpi()
w, h = self._window_get_size()
h /= ratio
return (w / dpi, h / dpi)
@abstractmethod
def _window_get_simple_canvas(self, width, height, dpi):
pass
@abstractmethod
def _window_get_mplcanvas(self, brain, interactor_fraction, show_traces,
separate_canvas):
pass
@abstractmethod
def _window_adjust_mplcanvas_layout(self):
pass
@abstractmethod
def _window_get_cursor(self):
pass
@abstractmethod
def _window_set_cursor(self, cursor):
pass
@abstractmethod
def _window_ensure_minimum_sizes(self):
pass
@abstractmethod
def _window_set_theme(self, theme):
pass
| bsd-3-clause |
chrinide/hep_ml | hep_ml/uboost.py | 4 | 24323 | """
The module contains an implementation of uBoost algorithm.
The main goal of **uBoost** is to fight correlation between predictions and some variables (i.e. mass of particle).
* `uBoostBDT` is a modified version of AdaBoost, that targets to obtain efficiency uniformity at the specified level (global efficiency)
* `uBoostClassifier` is a combination of uBoostBDTs for different efficiencies
This implementation is more advanced than one described in the original paper,
contains smoothing and trains classifiers in threads, has `learning_rate` and `uniforming_rate` parameters,
does automatic weights renormalization and supports SAMME.R modification to use predicted probabilities.
Only binary classification is implemented.
See also: :class:`hep_ml.losses.BinFlatnessLossFunction`, :class:`hep_ml.losses.KnnFlatnessLossFunction`,
:class:`hep_ml.losses.KnnAdaLossFunction`
to fight correlation.
Examples
________
To get uniform prediction in mass for background:
>>> base_tree = DecisionTreeClassifier(max_depth=3)
>>> clf = uBoostClassifier(uniform_features=['mass'], uniform_label=0, base_estimator=base_tree,
>>> train_features=['pt', 'flight_time'])
>>> clf.fit(train_data, train_labels, sample_weight=train_weights)
>>> proba = clf.predict_proba(test_data)
To get uniform prediction in Dalitz variables for signal
>>> clf = uBoostClassifier(uniform_features=['mass_12', 'mass_23'], uniform_label=1, base_estimator=base_tree,
>>> train_features=['pt', 'flight_time'])
>>> clf.fit(train_data, train_labels, sample_weight=train_weights)
>>> proba = clf.predict_proba(test_data)
"""
# Authors:
# Alex Rogozhnikov <[email protected]>
# Nikita Kazeev <[email protected]>
from six.moves import zip
import numpy as np
from sklearn.base import BaseEstimator, ClassifierMixin, clone
from sklearn.tree import DecisionTreeClassifier
from sklearn.utils.random import check_random_state
from .commonutils import sigmoid_function, map_on_cluster, \
compute_knn_indices_of_same_class, compute_cut_for_efficiency, check_xyw
from . import commonutils
from .metrics_utils import compute_group_efficiencies_by_indices
__author__ = "Alex Rogozhnikov, Nikita Kazeev"
__all__ = ["uBoostBDT", "uBoostClassifier"]
class uBoostBDT(BaseEstimator, ClassifierMixin):
def __init__(self,
uniform_features,
uniform_label,
target_efficiency=0.5,
n_neighbors=50,
subsample=1.0,
base_estimator=None,
n_estimators=50,
learning_rate=1.,
uniforming_rate=1.,
train_features=None,
smoothing=0.0,
random_state=None,
algorithm="SAMME"):
"""
uBoostBDT is AdaBoostClassifier, which is modified to have flat
efficiency of signal (class=1) along some variables.
Efficiency is only guaranteed at the cut,
corresponding to global efficiency == target_efficiency.
Can be used alone, without uBoostClassifier.
:param uniform_features: list of strings, names of variables, along which
flatness is desired
:param uniform_label: int, label of class on which uniformity is desired
(typically 0 for background, 1 for signal).
:param target_efficiency: float, the flatness is obtained at global BDT cut,
corresponding to global efficiency
:param n_neighbors: int, (default=50) the number of neighbours,
which are used to compute local efficiency
:param subsample: float (default=1.0), part of training dataset used
to build each base estimator.
:param base_estimator: classifier, optional (default=DecisionTreeClassifier(max_depth=2))
The base estimator from which the boosted ensemble is built.
Support for sample weighting is required, as well as proper
`classes_` and `n_classes_` attributes.
:param n_estimators: integer, optional (default=50)
number of estimators used.
:param learning_rate: float, optional (default=1.)
Learning rate shrinks the contribution of each classifier by
``learning_rate``. There is a trade-off between ``learning_rate``
and ``n_estimators``.
:param uniforming_rate: float, optional (default=1.)
how much do we take into account the uniformity of signal,
there is a trade-off between uniforming_rate and the speed of
uniforming, zero value corresponds to plain AdaBoost
:param train_features: list of strings, names of variables used in
fit/predict. If None, all the variables are used
(including uniform_variables)
:param smoothing: float, (default=0.), used to smooth computing of local
efficiencies, 0.0 corresponds to usual uBoost
:param random_state: int, RandomState instance or None (default None)
Reference
----------
.. [1] J. Stevens, M. Williams 'uBoost: A boosting method for
producing uniform selection efficiencies from multivariate classifiers'
"""
self.base_estimator = base_estimator
self.n_estimators = n_estimators
self.learning_rate = learning_rate
self.uniforming_rate = uniforming_rate
self.uniform_features = uniform_features
self.target_efficiency = target_efficiency
self.n_neighbors = n_neighbors
self.subsample = subsample
self.train_features = train_features
self.smoothing = smoothing
self.uniform_label = uniform_label
self.random_state = random_state
self.algorithm = algorithm
def fit(self, X, y, sample_weight=None, neighbours_matrix=None):
"""Build a boosted classifier from the training set (X, y).
:param X: array-like of shape [n_samples, n_features]
:param y: labels, array of shape [n_samples] with 0 and 1.
:param sample_weight: array-like of shape [n_samples] or None
:param neighbours_matrix: array-like of shape [n_samples, n_neighbours],
each row contains indices of signal neighbours
(neighbours should be computed for background too),
if None, this matrix is computed.
:return: self
"""
if self.smoothing < 0:
raise ValueError("Smoothing must be non-negative")
if not isinstance(self.base_estimator, BaseEstimator):
raise TypeError("estimator must be a subclass of BaseEstimator")
if self.n_estimators <= 0:
raise ValueError("n_estimators must be greater than zero.")
if self.learning_rate <= 0:
raise ValueError("learning_rate must be greater than zero")
if self.base_estimator is None:
self.base_estimator = DecisionTreeClassifier(max_depth=2)
# Check that algorithm is supported
if self.algorithm not in ('SAMME', 'SAMME.R'):
raise ValueError("algorithm %s is not supported" % self.algorithm)
if self.algorithm == 'SAMME.R':
if not hasattr(self.base_estimator, 'predict_proba'):
raise TypeError(
"uBoostBDT with algorithm='SAMME.R' requires "
"that the weak learner have a predict_proba method.\n"
"Please change the base estimator or set algorithm='SAMME' instead.")
assert np.in1d(y, [0, 1]).all(), \
"only two-class classification is implemented, with labels 0 and 1"
self.signed_uniform_label = 2 * self.uniform_label - 1
if neighbours_matrix is not None:
assert np.shape(neighbours_matrix) == (len(X), self.n_neighbors), \
"Wrong shape of neighbours_matrix"
self.knn_indices = neighbours_matrix
else:
assert self.uniform_features is not None, \
"uniform_variables should be set"
self.knn_indices = compute_knn_indices_of_same_class(
X.ix[:, self.uniform_features], y, self.n_neighbors)
sample_weight = commonutils.check_sample_weight(y, sample_weight=sample_weight, normalize=True)
assert np.all(sample_weight >= 0.), 'the weights should be non-negative'
# Clear any previous fit results
self.estimators_ = []
self.estimator_weights_ = []
# score cuts correspond to
# global efficiency == target_efficiency on each iteration.
self.score_cuts_ = []
x_train_features = self._get_train_features(X)
x_train_features, y, sample_weight = check_xyw(x_train_features, y, sample_weight)
self.random_state_ = check_random_state(self.random_state)
self._boost(x_train_features, y, sample_weight)
self.score_cut = self.signed_uniform_label * compute_cut_for_efficiency(
self.target_efficiency, y == self.uniform_label, self.decision_function(X) * self.signed_uniform_label)
assert np.allclose(self.score_cut, self.score_cuts_[-1], rtol=1e-10, atol=1e-10), \
"score cut doesn't appear to coincide with the staged one"
assert len(self.estimators_) == len(self.estimator_weights_) == len(self.score_cuts_)
return self
def _make_estimator(self):
estimator = clone(self.base_estimator)
# self.estimators_.append(estimator)
try:
estimator.set_params(random_state=self.random_state)
except ValueError:
pass
return estimator
def _estimator_score(self, estimator, X):
if self.algorithm == "SAMME":
return 2 * estimator.predict(X) - 1.
else:
p = estimator.predict_proba(X)
p[p <= 1e-5] = 1e-5
return np.log(p[:, 1] / p[:, 0])
@staticmethod
def _normalize_weight(y, weight):
# frequently algorithm assigns very big weight to signal events
# compared to background ones (or visa versa, if want to be uniform in bck)
return commonutils.check_sample_weight(y, sample_weight=weight, normalize=True, normalize_by_class=True)
def _compute_uboost_multipliers(self, sample_weight, score, y):
"""Returns uBoost multipliers to sample_weight and computed global cut"""
signed_score = score * self.signed_uniform_label
signed_score_cut = compute_cut_for_efficiency(self.target_efficiency, y == self.uniform_label, signed_score)
global_score_cut = signed_score_cut * self.signed_uniform_label
local_efficiencies = compute_group_efficiencies_by_indices(signed_score, self.knn_indices, cut=signed_score_cut,
smoothing=self.smoothing)
# pay attention - sample_weight should be used only here
e_prime = np.average(np.abs(local_efficiencies - self.target_efficiency),
weights=sample_weight)
is_uniform_class = (y == self.uniform_label)
# beta = np.log((1.0 - e_prime) / e_prime)
# changed to log(1. / e_prime), otherwise this can lead to the situation
# where beta is negative (which is a disaster).
# Mike (uboost author) said he didn't take that into account.
beta = np.log(1. / e_prime)
boost_weights = np.exp((self.target_efficiency - local_efficiencies) * is_uniform_class *
(beta * self.uniforming_rate))
return boost_weights, global_score_cut
def _boost(self, X, y, sample_weight):
"""Implement a single boost using the SAMME or SAMME.R algorithm,
which is modified in uBoost way"""
cumulative_score = np.zeros(len(X))
y_signed = 2 * y - 1
for iteration in range(self.n_estimators):
estimator = self._make_estimator()
mask = _generate_subsample_mask(len(X), self.subsample, self.random_state_)
estimator.fit(X[mask], y[mask], sample_weight=sample_weight[mask])
# computing estimator weight
if self.algorithm == 'SAMME':
y_pred = estimator.predict(X)
# Error fraction
estimator_error = np.average(y_pred != y, weights=sample_weight)
estimator_error = np.clip(estimator_error, 1e-6, 1. - 1e-6)
estimator_weight = self.learning_rate * 0.5 * (
np.log((1. - estimator_error) / estimator_error))
score = estimator_weight * (2 * y_pred - 1)
else:
estimator_weight = self.learning_rate * 0.5
score = estimator_weight * self._estimator_score(estimator, X)
# correcting the weights and score according to predictions
sample_weight *= np.exp(- y_signed * score)
sample_weight = self._normalize_weight(y, sample_weight)
cumulative_score += score
uboost_multipliers, global_score_cut = \
self._compute_uboost_multipliers(sample_weight, cumulative_score, y)
sample_weight *= uboost_multipliers
sample_weight = self._normalize_weight(y, sample_weight)
self.score_cuts_.append(global_score_cut)
self.estimators_.append(estimator)
self.estimator_weights_.append(estimator_weight)
# erasing from memory
self.knn_indices = None
def _get_train_features(self, X):
"""Gets the DataFrame and returns only columns
that should be used in fitting / predictions"""
if self.train_features is None:
return X
else:
return X[self.train_features]
def staged_decision_function(self, X):
"""Decision function after each stage of boosting.
Float for each sample, the greater --- the more signal like event is.
:param X: data, pandas.DataFrame of shape [n_samples, n_features]
:return: array of shape [n_samples] with floats.
"""
X = self._get_train_features(X)
score = np.zeros(len(X))
for classifier, weight in zip(self.estimators_, self.estimator_weights_):
score += self._estimator_score(classifier, X) * weight
yield score
def decision_function(self, X):
"""Decision function. Float for each sample, the greater --- the more signal like event is.
:param X: data, pandas.DataFrame of shape [n_samples, n_features]
:return: array of shape [n_samples] with floats
"""
return commonutils.take_last(self.staged_decision_function(X))
def predict(self, X):
"""Predict classes for each sample
:param X: data, pandas.DataFrame of shape [n_samples, n_features]
:return: array of shape [n_samples] with predicted classes.
"""
return np.array(self.decision_function(X) > self.score_cut, dtype=int)
def predict_proba(self, X):
"""Predict probabilities
:param X: data, pandas.DataFrame of shape [n_samples, n_features]
:return: array of shape [n_samples, n_classes] with probabilities.
"""
return commonutils.score_to_proba(self.decision_function(X))
def staged_predict_proba(self, X):
"""Predicted probabilities for each sample after each stage of boosting.
:param X: data, pandas.DataFrame of shape [n_samples, n_features]
:return: sequence of numpy.arrays of shape [n_samples, n_classes]
"""
for score in self.staged_decision_function(X):
yield commonutils.score_to_proba(score)
def _uboost_predict_score(self, X):
"""Method added specially for uBoostClassifier"""
return sigmoid_function(self.decision_function(X) - self.score_cut,
self.smoothing)
def _uboost_staged_predict_score(self, X):
"""Method added specially for uBoostClassifier"""
for cut, score in zip(self.score_cuts_, self.staged_decision_function(X)):
yield sigmoid_function(score - cut, self.smoothing)
@property
def feature_importances_(self):
"""Return the feature importances for `train_features`.
:return: array of shape [n_features], the order is the same as in `train_features`
"""
if self.estimators_ is None or len(self.estimators_) == 0:
raise ValueError("Estimator not fitted,"
" call `fit` before `feature_importances_`.")
return sum(tree.feature_importances_ * weight for tree, weight
in zip(self.estimators_, self.estimator_weights_))
def _train_classifier(classifier, X_train_vars, y, sample_weight, neighbours_matrix):
# supplementary function to train separate parts of uBoost on cluster
return classifier.fit(X_train_vars, y,
sample_weight=sample_weight,
neighbours_matrix=neighbours_matrix)
class uBoostClassifier(BaseEstimator, ClassifierMixin):
def __init__(self, uniform_features,
uniform_label,
train_features=None,
n_neighbors=50,
efficiency_steps=20,
n_estimators=40,
base_estimator=None,
subsample=1.0,
algorithm="SAMME",
smoothing=None,
n_threads=1,
random_state=None):
"""uBoost classifier, an algorithm of boosting targeted to obtain
flat efficiency in signal along some variables (e.g. mass).
In principle, uBoost is ensemble of uBoostBDTs. See [1] for details.
Parameters
----------
:param uniform_features: list of strings, names of variables,
along which flatness is desired
:param uniform_label: int,
tha label of class for which uniformity is desired
:param train_features: list of strings,
names of variables used in fit/predict.
if None, all the variables are used (including uniform_variables)
:param n_neighbors: int, (default=50) the number of neighbours,
which are used to compute local efficiency
:param n_estimators: integer, optional (default=50)
The maximum number of estimators at which boosting is terminated.
In case of perfect fit, the learning procedure is stopped early.
:param efficiency_steps: integer, optional (default=20),
How many uBoostBDTs should be trained
(each with its own target_efficiency)
:param base_estimator: object, optional (default=DecisionTreeClassifier(max_depth=2))
The base estimator from which the boosted ensemble is built.
Support for sample weighting is required,
as well as proper `classes_` and `n_classes_` attributes.
:param subsample: float (default =1.) part of training dataset used
to train each base classifier.
:param smoothing: float, default=None, used to smooth computing of
local efficiencies, 0.0 corresponds to usual uBoost,
:param random_state: int, RandomState instance or None, (default=None)
:param n_threads: int, number of threads used.
Reference
----------
.. [1] J. Stevens, M. Williams 'uBoost: A boosting method
for producing uniform selection efficiencies from multivariate classifiers'
"""
self.uniform_features = uniform_features
self.uniform_label = uniform_label
self.knn = n_neighbors
self.efficiency_steps = efficiency_steps
self.random_state = random_state
self.n_estimators = n_estimators
self.base_estimator = base_estimator
self.subsample = subsample
self.train_features = train_features
self.smoothing = smoothing
self.n_threads = n_threads
self.algorithm = algorithm
def _get_train_features(self, X):
if self.train_features is not None:
return X[self.train_features]
else:
return X
def fit(self, X, y, sample_weight=None):
"""Build a boosted classifier from the training set.
:param X: data, pandas.DatFrame of shape [n_samples, n_features]
:param y: labels, array of shape [n_samples] with 0 and 1.
The target values (integers that correspond to classes).
:param sample_weight: array-like of shape [n_samples] with weights or None
:return: self
"""
if self.uniform_features is None:
raise ValueError("Please set uniform variables")
if len(self.uniform_features) == 0:
raise ValueError("The set of uniform variables cannot be empty")
assert np.in1d(y, [0, 1]).all(), \
"only two-class classification is implemented"
if self.base_estimator is None:
self.base_estimator = DecisionTreeClassifier(max_depth=2)
X, y, sample_weight = check_xyw(X, y, sample_weight=sample_weight, classification=True)
data_train_features = self._get_train_features(X)
if self.smoothing is None:
self.smoothing = 10. / self.efficiency_steps
neighbours_matrix = compute_knn_indices_of_same_class(
X[self.uniform_features], y, n_neighbours=self.knn)
self.target_efficiencies = np.linspace(0, 1, self.efficiency_steps + 2)[1:-1]
self.classifiers = []
for efficiency in self.target_efficiencies:
classifier = uBoostBDT(
uniform_features=self.uniform_features,
uniform_label=self.uniform_label,
train_features=None,
target_efficiency=efficiency, n_neighbors=self.knn,
n_estimators=self.n_estimators,
base_estimator=self.base_estimator,
random_state=self.random_state, subsample=self.subsample,
smoothing=self.smoothing, algorithm=self.algorithm)
self.classifiers.append(classifier)
self.classifiers = map_on_cluster('threads-{}'.format(self.n_threads),
_train_classifier,
self.classifiers,
self.efficiency_steps * [data_train_features],
self.efficiency_steps * [y],
self.efficiency_steps * [sample_weight],
self.efficiency_steps * [neighbours_matrix])
return self
def predict(self, X):
"""Predict labels
:param X: data, pandas.DataFrame of shape [n_samples, n_features]
:return: numpy.array of shape [n_samples]
"""
return self.predict_proba(X).argmax(axis=1)
def predict_proba(self, X):
"""Predict probabilities
:param X: data, pandas.DataFrame of shape [n_samples, n_features]
:return: array of shape [n_samples, n_classes] with probabilities.
"""
X = self._get_train_features(X)
score = sum(clf._uboost_predict_score(X) for clf in self.classifiers)
return commonutils.score_to_proba(score / self.efficiency_steps)
def staged_predict_proba(self, X):
"""Predicted probabilities for each sample after each stage of boosting.
:param X: data, pandas.DataFrame of shape [n_samples, n_features]
:return: sequence of numpy.arrays of shape [n_samples, n_classes]
"""
X = self._get_train_features(X)
for scores in zip(*[clf._uboost_staged_predict_score(X) for clf in self.classifiers]):
yield commonutils.score_to_proba(sum(scores) / self.efficiency_steps)
def _generate_subsample_mask(n_samples, subsample, random_generator):
"""
:param float subsample: part of samples to be left
:param random_generator: numpy.random.RandomState instance
"""
assert 0 < subsample <= 1., 'subsample should be in range (0, 1]'
if subsample == 1.0:
mask = slice(None, None, None)
else:
mask = random_generator.uniform(size=n_samples) < subsample
return mask | apache-2.0 |
guschmue/tensorflow | tensorflow/contrib/metrics/python/kernel_tests/histogram_ops_test.py | 130 | 9577 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for histogram_ops."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.contrib.metrics.python.ops import histogram_ops
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import variables
from tensorflow.python.platform import test
class Strict1dCumsumTest(test.TestCase):
"""Test this private function."""
def test_empty_tensor_returns_empty(self):
with self.test_session():
tensor = constant_op.constant([])
result = histogram_ops._strict_1d_cumsum(tensor, 0)
expected = constant_op.constant([])
np.testing.assert_array_equal(expected.eval(), result.eval())
def test_length_1_tensor_works(self):
with self.test_session():
tensor = constant_op.constant([3], dtype=dtypes.float32)
result = histogram_ops._strict_1d_cumsum(tensor, 1)
expected = constant_op.constant([3], dtype=dtypes.float32)
np.testing.assert_array_equal(expected.eval(), result.eval())
def test_length_3_tensor_works(self):
with self.test_session():
tensor = constant_op.constant([1, 2, 3], dtype=dtypes.float32)
result = histogram_ops._strict_1d_cumsum(tensor, 3)
expected = constant_op.constant([1, 3, 6], dtype=dtypes.float32)
np.testing.assert_array_equal(expected.eval(), result.eval())
class AUCUsingHistogramTest(test.TestCase):
def setUp(self):
self.rng = np.random.RandomState(0)
def test_empty_labels_and_scores_gives_nan_auc(self):
with self.test_session():
labels = constant_op.constant([], shape=[0], dtype=dtypes.bool)
scores = constant_op.constant([], shape=[0], dtype=dtypes.float32)
score_range = [0, 1.]
auc, update_op = histogram_ops.auc_using_histogram(labels, scores,
score_range)
variables.local_variables_initializer().run()
update_op.run()
self.assertTrue(np.isnan(auc.eval()))
def test_perfect_scores_gives_auc_1(self):
self._check_auc(
nbins=100,
desired_auc=1.0,
score_range=[0, 1.],
num_records=50,
frac_true=0.5,
atol=0.05,
num_updates=1)
def test_terrible_scores_gives_auc_0(self):
self._check_auc(
nbins=100,
desired_auc=0.0,
score_range=[0, 1.],
num_records=50,
frac_true=0.5,
atol=0.05,
num_updates=1)
def test_many_common_conditions(self):
for nbins in [50]:
for desired_auc in [0.3, 0.5, 0.8]:
for score_range in [[-1, 1], [-10, 0]]:
for frac_true in [0.3, 0.8]:
# Tests pass with atol = 0.03. Moved up to 0.05 to avoid flakes.
self._check_auc(
nbins=nbins,
desired_auc=desired_auc,
score_range=score_range,
num_records=100,
frac_true=frac_true,
atol=0.05,
num_updates=50)
def test_large_class_imbalance_still_ok(self):
# With probability frac_true ** num_records, each batch contains only True
# records. In this case, ~ 95%.
# Tests pass with atol = 0.02. Increased to 0.05 to avoid flakes.
self._check_auc(
nbins=100,
desired_auc=0.8,
score_range=[-1, 1.],
num_records=10,
frac_true=0.995,
atol=0.05,
num_updates=1000)
def test_super_accuracy_with_many_bins_and_records(self):
# Test passes with atol = 0.0005. Increased atol to avoid flakes.
self._check_auc(
nbins=1000,
desired_auc=0.75,
score_range=[0, 1.],
num_records=1000,
frac_true=0.5,
atol=0.005,
num_updates=100)
def _check_auc(self,
nbins=100,
desired_auc=0.75,
score_range=None,
num_records=50,
frac_true=0.5,
atol=0.05,
num_updates=10):
"""Check auc accuracy against synthetic data.
Args:
nbins: nbins arg from contrib.metrics.auc_using_histogram.
desired_auc: Number in [0, 1]. The desired auc for synthetic data.
score_range: 2-tuple, (low, high), giving the range of the resultant
scores. Defaults to [0, 1.].
num_records: Positive integer. The number of records to return.
frac_true: Number in (0, 1). Expected fraction of resultant labels that
will be True. This is just in expectation...more or less may actually
be True.
atol: Absolute tolerance for final AUC estimate.
num_updates: Update internal histograms this many times, each with a new
batch of synthetic data, before computing final AUC.
Raises:
AssertionError: If resultant AUC is not within atol of theoretical AUC
from synthetic data.
"""
score_range = [0, 1.] or score_range
with self.test_session():
labels = array_ops.placeholder(dtypes.bool, shape=[num_records])
scores = array_ops.placeholder(dtypes.float32, shape=[num_records])
auc, update_op = histogram_ops.auc_using_histogram(
labels, scores, score_range, nbins=nbins)
variables.local_variables_initializer().run()
# Updates, then extract auc.
for _ in range(num_updates):
labels_a, scores_a = synthetic_data(desired_auc, score_range,
num_records, self.rng, frac_true)
update_op.run(feed_dict={labels: labels_a, scores: scores_a})
labels_a, scores_a = synthetic_data(desired_auc, score_range, num_records,
self.rng, frac_true)
# Fetch current auc, and verify that fetching again doesn't change it.
auc_eval = auc.eval()
self.assertAlmostEqual(auc_eval, auc.eval(), places=5)
msg = ('nbins: %s, desired_auc: %s, score_range: %s, '
'num_records: %s, frac_true: %s, num_updates: %s') % (nbins,
desired_auc,
score_range,
num_records,
frac_true,
num_updates)
np.testing.assert_allclose(desired_auc, auc_eval, atol=atol, err_msg=msg)
def synthetic_data(desired_auc, score_range, num_records, rng, frac_true):
"""Create synthetic boolean_labels and scores with adjustable auc.
Args:
desired_auc: Number in [0, 1], the theoretical AUC of resultant data.
score_range: 2-tuple, (low, high), giving the range of the resultant scores
num_records: Positive integer. The number of records to return.
rng: Initialized np.random.RandomState random number generator
frac_true: Number in (0, 1). Expected fraction of resultant labels that
will be True. This is just in expectation...more or less may actually be
True.
Returns:
boolean_labels: np.array, dtype=bool.
scores: np.array, dtype=np.float32
"""
# We prove here why the method (below) for computing AUC works. Of course we
# also checked this against sklearn.metrics.roc_auc_curve.
#
# First do this for score_range = [0, 1], then rescale.
# WLOG assume AUC >= 0.5, otherwise we will solve for AUC >= 0.5 then swap
# the labels.
# So for AUC in [0, 1] we create False and True labels
# and corresponding scores drawn from:
# F ~ U[0, 1], T ~ U[x, 1]
# We have,
# AUC
# = P[T > F]
# = P[T > F | F < x] P[F < x] + P[T > F | F > x] P[F > x]
# = (1 * x) + (0.5 * (1 - x)).
# Inverting, we have:
# x = 2 * AUC - 1, when AUC >= 0.5.
assert 0 <= desired_auc <= 1
assert 0 < frac_true < 1
if desired_auc < 0.5:
flip_labels = True
desired_auc = 1 - desired_auc
frac_true = 1 - frac_true
else:
flip_labels = False
x = 2 * desired_auc - 1
labels = rng.binomial(1, frac_true, size=num_records).astype(bool)
num_true = labels.sum()
num_false = num_records - labels.sum()
# Draw F ~ U[0, 1], and T ~ U[x, 1]
false_scores = rng.rand(num_false)
true_scores = x + rng.rand(num_true) * (1 - x)
# Reshape [0, 1] to score_range.
def reshape(scores):
return score_range[0] + scores * (score_range[1] - score_range[0])
false_scores = reshape(false_scores)
true_scores = reshape(true_scores)
# Place into one array corresponding with the labels.
scores = np.nan * np.ones(num_records, dtype=np.float32)
scores[labels] = true_scores
scores[~labels] = false_scores
if flip_labels:
labels = ~labels
return labels, scores
if __name__ == '__main__':
test.main()
| apache-2.0 |
fivejjs/bayespy | bayespy/inference/vmp/nodes/beta.py | 2 | 4372 | ################################################################################
# Copyright (C) 2014 Jaakko Luttinen
#
# This file is licensed under the MIT License.
################################################################################
"""
A module for the beta distribution node
"""
import numpy as np
import scipy.special as special
from .dirichlet import (DirichletMoments,
DirichletDistribution,
Dirichlet)
from .node import Moments, ensureparents
class BetaMoments(DirichletMoments):
"""
Class for the moments of beta variables.
"""
def compute_fixed_moments(self, p):
"""
Compute the moments for a fixed value
"""
p = np.asanyarray(p)[...,None] * [1,-1] + [0,1]
return super().compute_fixed_moments(p)
def compute_dims_from_values(self, p):
"""
Return the shape of the moments for a fixed value.
"""
return ( (2,), )
class BetaDistribution(DirichletDistribution):
"""
Class for the VMP formulas of beta variables.
Although the realizations are scalars (probability p), the moments is a
two-dimensional vector: [log(p), log(1-p)].
"""
def compute_message_to_parent(self, parent, index, u_self, u_alpha):
"""
Compute the message to a parent node.
"""
return super().compute_message_to_parent(parent, index, u_self, u_alpha)
def compute_phi_from_parents(self, u_alpha, mask=True):
"""
Compute the natural parameter vector given parent moments.
"""
return super().compute_phi_from_parents(u_alpha, mask=mask)
def compute_moments_and_cgf(self, phi, mask=True):
"""
Compute the moments and :math:`g(\phi)`.
"""
return super().compute_moments_and_cgf(phi, mask)
def compute_cgf_from_parents(self, u_alpha):
"""
Compute :math:`\mathrm{E}_{q(p)}[g(p)]`
"""
return super().compute_cgf_from_parents(u_alpha)
def compute_fixed_moments_and_f(self, p, mask=True):
"""
Compute the moments and :math:`f(x)` for a fixed value.
"""
p = np.asanyarray(p)[...,None] * [1,-1] + [0,1]
return super().compute_fixed_moments_and_f(p, mask=mask)
def random(self, *phi, plates=None):
"""
Draw a random sample from the distribution.
"""
p = super().random(*phi, plates=plates)
return p[...,0]
class Beta(Dirichlet):
r"""
Node for beta random variables.
The node models a probability variable :math:`p \in [0,1]` as
.. math::
p \sim \mathrm{Beta}(a, b)
where :math:`a` and :math:`b` are prior counts for success and failure,
respectively.
Parameters
----------
alpha : (...,2)-shaped array
Two-element vector containing :math:`a` and :math:`b`
Examples
--------
>>> import warnings
>>> warnings.filterwarnings('ignore', category=RuntimeWarning)
>>> from bayespy.nodes import Bernoulli, Beta
>>> p = Beta([1e-3, 1e-3])
>>> z = Bernoulli(p, plates=(10,))
>>> z.observe([0, 1, 1, 1, 0, 1, 1, 1, 0, 1])
>>> p.update()
>>> import bayespy.plot as bpplt
>>> import numpy as np
>>> bpplt.pdf(p, np.linspace(0, 1, num=100))
[<matplotlib.lines.Line2D object at 0x...>]
"""
_moments = BetaMoments()
_distribution = BetaDistribution()
def __init__(self, alpha, **kwargs):
"""
Create beta node
"""
super().__init__(alpha, **kwargs)
@classmethod
@ensureparents
def _constructor(cls, alpha, **kwargs):
"""
Constructs distribution and moments objects.
"""
D = alpha.dims[0][0]
if D != 2:
raise ValueError("Parent has wrong dimensionality. Must be a "
"two-dimensional vector.")
return super()._constructor(alpha, **kwargs)
def __str__(self):
"""
Print the distribution using standard parameterization.
"""
a = self.phi[0][...,0]
b = self.phi[0][...,1]
return ("%s ~ Beta(a, b)\n"
" a = \n"
"%s\n"
" b = \n"
"%s\n"
% (self.name, a, b))
| mit |
dhruv13J/scikit-learn | sklearn/decomposition/tests/test_factor_analysis.py | 222 | 3055 | # Author: Christian Osendorfer <[email protected]>
# Alexandre Gramfort <[email protected]>
# Licence: BSD3
import numpy as np
from sklearn.utils.testing import assert_warns
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_greater
from sklearn.utils.testing import assert_less
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils import ConvergenceWarning
from sklearn.decomposition import FactorAnalysis
def test_factor_analysis():
# Test FactorAnalysis ability to recover the data covariance structure
rng = np.random.RandomState(0)
n_samples, n_features, n_components = 20, 5, 3
# Some random settings for the generative model
W = rng.randn(n_components, n_features)
# latent variable of dim 3, 20 of it
h = rng.randn(n_samples, n_components)
# using gamma to model different noise variance
# per component
noise = rng.gamma(1, size=n_features) * rng.randn(n_samples, n_features)
# generate observations
# wlog, mean is 0
X = np.dot(h, W) + noise
assert_raises(ValueError, FactorAnalysis, svd_method='foo')
fa_fail = FactorAnalysis()
fa_fail.svd_method = 'foo'
assert_raises(ValueError, fa_fail.fit, X)
fas = []
for method in ['randomized', 'lapack']:
fa = FactorAnalysis(n_components=n_components, svd_method=method)
fa.fit(X)
fas.append(fa)
X_t = fa.transform(X)
assert_equal(X_t.shape, (n_samples, n_components))
assert_almost_equal(fa.loglike_[-1], fa.score_samples(X).sum())
assert_almost_equal(fa.score_samples(X).mean(), fa.score(X))
diff = np.all(np.diff(fa.loglike_))
assert_greater(diff, 0., 'Log likelihood dif not increase')
# Sample Covariance
scov = np.cov(X, rowvar=0., bias=1.)
# Model Covariance
mcov = fa.get_covariance()
diff = np.sum(np.abs(scov - mcov)) / W.size
assert_less(diff, 0.1, "Mean absolute difference is %f" % diff)
fa = FactorAnalysis(n_components=n_components,
noise_variance_init=np.ones(n_features))
assert_raises(ValueError, fa.fit, X[:, :2])
f = lambda x, y: np.abs(getattr(x, y)) # sign will not be equal
fa1, fa2 = fas
for attr in ['loglike_', 'components_', 'noise_variance_']:
assert_almost_equal(f(fa1, attr), f(fa2, attr))
fa1.max_iter = 1
fa1.verbose = True
assert_warns(ConvergenceWarning, fa1.fit, X)
# Test get_covariance and get_precision with n_components == n_features
# with n_components < n_features and with n_components == 0
for n_components in [0, 2, X.shape[1]]:
fa.n_components = n_components
fa.fit(X)
cov = fa.get_covariance()
precision = fa.get_precision()
assert_array_almost_equal(np.dot(cov, precision),
np.eye(X.shape[1]), 12)
| bsd-3-clause |
0x0all/scikit-learn | examples/linear_model/plot_logistic.py | 312 | 1426 | #!/usr/bin/python
# -*- coding: utf-8 -*-
"""
=========================================================
Logit function
=========================================================
Show in the plot is how the logistic regression would, in this
synthetic dataset, classify values as either 0 or 1,
i.e. class one or two, using the logit-curve.
"""
print(__doc__)
# Code source: Gael Varoquaux
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
from sklearn import linear_model
# this is our test set, it's just a straight line with some
# Gaussian noise
xmin, xmax = -5, 5
n_samples = 100
np.random.seed(0)
X = np.random.normal(size=n_samples)
y = (X > 0).astype(np.float)
X[X > 0] *= 4
X += .3 * np.random.normal(size=n_samples)
X = X[:, np.newaxis]
# run the classifier
clf = linear_model.LogisticRegression(C=1e5)
clf.fit(X, y)
# and plot the result
plt.figure(1, figsize=(4, 3))
plt.clf()
plt.scatter(X.ravel(), y, color='black', zorder=20)
X_test = np.linspace(-5, 10, 300)
def model(x):
return 1 / (1 + np.exp(-x))
loss = model(X_test * clf.coef_ + clf.intercept_).ravel()
plt.plot(X_test, loss, color='blue', linewidth=3)
ols = linear_model.LinearRegression()
ols.fit(X, y)
plt.plot(X_test, ols.coef_ * X_test + ols.intercept_, linewidth=1)
plt.axhline(.5, color='.5')
plt.ylabel('y')
plt.xlabel('X')
plt.xticks(())
plt.yticks(())
plt.ylim(-.25, 1.25)
plt.xlim(-4, 10)
plt.show()
| bsd-3-clause |
aleju/imgaug | imgaug/augmentables/segmaps.py | 2 | 21991 | """Classes dealing with segmentation maps.
E.g. masks, semantic or instance segmentation maps.
"""
from __future__ import print_function, division, absolute_import
import numpy as np
import six.moves as sm
from .. import imgaug as ia
from ..augmenters import blend as blendlib
from .base import IAugmentable
@ia.deprecated(alt_func="SegmentationMapsOnImage",
comment="(Note the plural 'Maps' instead of old 'Map'.)")
def SegmentationMapOnImage(*args, **kwargs):
"""Object representing a segmentation map associated with an image."""
# pylint: disable=invalid-name
return SegmentationMapsOnImage(*args, **kwargs)
class SegmentationMapsOnImage(IAugmentable):
"""
Object representing a segmentation map associated with an image.
Attributes
----------
DEFAULT_SEGMENT_COLORS : list of tuple of int
Standard RGB colors to use during drawing, ordered by class index.
Parameters
----------
arr : (H,W) ndarray or (H,W,C) ndarray
Array representing the segmentation map(s). May have dtypes bool,
int or uint.
shape : tuple of int
Shape of the image on which the segmentation map(s) is/are placed.
**Not** the shape of the segmentation map(s) array, unless it is
identical to the image shape (note the likely difference between the
arrays in the number of channels).
This is expected to be ``(H, W)`` or ``(H, W, C)`` with ``C`` usually
being ``3``.
If there is no corresponding image, use ``(H_arr, W_arr)`` instead,
where ``H_arr`` is the height of the segmentation map(s) array
(analogous ``W_arr``).
nb_classes : None or int, optional
Deprecated.
"""
# TODO replace this by matplotlib colormap
DEFAULT_SEGMENT_COLORS = [
(0, 0, 0), # black
(230, 25, 75), # red
(60, 180, 75), # green
(255, 225, 25), # yellow
(0, 130, 200), # blue
(245, 130, 48), # orange
(145, 30, 180), # purple
(70, 240, 240), # cyan
(240, 50, 230), # magenta
(210, 245, 60), # lime
(250, 190, 190), # pink
(0, 128, 128), # teal
(230, 190, 255), # lavender
(170, 110, 40), # brown
(255, 250, 200), # beige
(128, 0, 0), # maroon
(170, 255, 195), # mint
(128, 128, 0), # olive
(255, 215, 180), # coral
(0, 0, 128), # navy
(128, 128, 128), # grey
(255, 255, 255), # white
# --
(115, 12, 37), # dark red
(30, 90, 37), # dark green
(127, 112, 12), # dark yellow
(0, 65, 100), # dark blue
(122, 65, 24), # dark orange
(72, 15, 90), # dark purple
(35, 120, 120), # dark cyan
(120, 25, 115), # dark magenta
(105, 122, 30), # dark lime
(125, 95, 95), # dark pink
(0, 64, 64), # dark teal
(115, 95, 127), # dark lavender
(85, 55, 20), # dark brown
(127, 125, 100), # dark beige
(64, 0, 0), # dark maroon
(85, 127, 97), # dark mint
(64, 64, 0), # dark olive
(127, 107, 90), # dark coral
(0, 0, 64), # dark navy
(64, 64, 64), # dark grey
]
def __init__(self, arr, shape, nb_classes=None):
assert ia.is_np_array(arr), (
"Expected to get numpy array, got %s." % (type(arr),))
assert arr.ndim in [2, 3], (
"Expected segmentation map array to be 2- or "
"3-dimensional, got %d dimensions and shape %s." % (
arr.ndim, arr.shape))
assert isinstance(shape, tuple), (
"Expected 'shape' to be a tuple denoting the shape of the image "
"on which the segmentation map is placed. Got type %s instead." % (
type(shape)))
if arr.dtype.kind == "f":
ia.warn_deprecated(
"Got a float array as the segmentation map in "
"SegmentationMapsOnImage. That is deprecated. Please provide "
"instead a (H,W,[C]) array of dtype bool_, int or uint, where "
"C denotes the segmentation map index."
)
if arr.ndim == 2:
arr = (arr > 0.5)
else: # arr.ndim == 3
arr = np.argmax(arr, axis=2).astype(np.int32)
if arr.dtype.name == "bool":
self._input_was = (arr.dtype, arr.ndim)
if arr.ndim == 2:
arr = arr[..., np.newaxis]
elif arr.dtype.kind in ["i", "u"]:
assert np.min(arr.flat[0:100]) >= 0, (
"Expected segmentation map array to only contain values >=0, "
"got a minimum of %d." % (np.min(arr),))
if arr.dtype.kind == "u":
# allow only <=uint16 due to conversion to int32
assert arr.dtype.itemsize <= 2, (
"When using uint arrays as segmentation maps, only uint8 "
"and uint16 are allowed. Got dtype %s." % (arr.dtype.name,)
)
elif arr.dtype.kind == "i":
# allow only <=uint16 due to conversion to int32
assert arr.dtype.itemsize <= 4, (
"When using int arrays as segmentation maps, only int8, "
"int16 and int32 are allowed. Got dtype %s." % (
arr.dtype.name,)
)
self._input_was = (arr.dtype, arr.ndim)
if arr.ndim == 2:
arr = arr[..., np.newaxis]
else:
raise Exception((
"Input was expected to be an array of dtype 'bool', 'int' "
"or 'uint'. Got dtype '%s'.") % (arr.dtype.name,))
if arr.dtype.name != "int32":
arr = arr.astype(np.int32)
self.arr = arr
self.shape = shape
if nb_classes is not None:
ia.warn_deprecated(
"Providing nb_classes to SegmentationMapsOnImage is no longer "
"necessary and hence deprecated. The argument is ignored "
"and can be safely removed.")
def get_arr(self):
"""Return the seg.map array, with original dtype and shape ndim.
Here, "original" denotes the dtype and number of shape dimensions that
was used when the :class:`SegmentationMapsOnImage` instance was
created, i.e. upon the call of
:func:`SegmentationMapsOnImage.__init__`.
Internally, this class may use a different dtype and shape to simplify
computations.
.. note::
The height and width may have changed compared to the original
input due to e.g. pooling operations.
Returns
-------
ndarray
Segmentation map array.
Same dtype and number of dimensions as was originally used when
the :class:`SegmentationMapsOnImage` instance was created.
"""
input_dtype, input_ndim = self._input_was
# The internally used int32 has a wider value range than any other
# input dtype, hence we can simply convert via astype() here.
arr_input = self.arr.astype(input_dtype)
if input_ndim == 2:
assert arr_input.shape[2] == 1, (
"Originally got a (H,W) segmentation map. Internal array "
"should now have shape (H,W,1), but got %s. This might be "
"an internal error." % (arr_input.shape,))
return arr_input[:, :, 0]
return arr_input
@ia.deprecated(alt_func="SegmentationMapsOnImage.get_arr()")
def get_arr_int(self, *args, **kwargs):
"""Return the seg.map array, with original dtype and shape ndim."""
# pylint: disable=unused-argument
return self.get_arr()
def draw(self, size=None, colors=None):
"""
Render the segmentation map as an RGB image.
Parameters
----------
size : None or float or iterable of int or iterable of float, optional
Size of the rendered RGB image as ``(height, width)``.
See :func:`~imgaug.imgaug.imresize_single_image` for details.
If set to ``None``, no resizing is performed and the size of the
segmentation map array is used.
colors : None or list of tuple of int, optional
Colors to use. One for each class to draw.
If ``None``, then default colors will be used.
Returns
-------
list of (H,W,3) ndarray
Rendered segmentation map (dtype is ``uint8``).
One per ``C`` in the original input array ``(H,W,C)``.
"""
def _handle_sizeval(sizeval, arr_axis_size):
if sizeval is None:
return arr_axis_size
if ia.is_single_float(sizeval):
return max(int(arr_axis_size * sizeval), 1)
if ia.is_single_integer(sizeval):
return sizeval
raise ValueError("Expected float or int, got %s." % (
type(sizeval),))
if size is None:
size = [size, size]
elif not ia.is_iterable(size):
size = [size, size]
height = _handle_sizeval(size[0], self.arr.shape[0])
width = _handle_sizeval(size[1], self.arr.shape[1])
image = np.zeros((height, width, 3), dtype=np.uint8)
return self.draw_on_image(
image,
alpha=1.0,
resize="segmentation_map",
colors=colors,
draw_background=True
)
def draw_on_image(self, image, alpha=0.75, resize="segmentation_map",
colors=None, draw_background=False,
background_class_id=0, background_threshold=None):
"""Draw the segmentation map as an overlay over an image.
Parameters
----------
image : (H,W,3) ndarray
Image onto which to draw the segmentation map. Expected dtype
is ``uint8``.
alpha : float, optional
Alpha/opacity value to use for the mixing of image and
segmentation map. Larger values mean that the segmentation map
will be more visible and the image less visible.
resize : {'segmentation_map', 'image'}, optional
In case of size differences between the image and segmentation
map, either the image or the segmentation map can be resized.
This parameter controls which of the two will be resized to the
other's size.
colors : None or list of tuple of int, optional
Colors to use. One for each class to draw.
If ``None``, then default colors will be used.
draw_background : bool, optional
If ``True``, the background will be drawn like any other class.
If ``False``, the background will not be drawn, i.e. the respective
background pixels will be identical with the image's RGB color at
the corresponding spatial location and no color overlay will be
applied.
background_class_id : int, optional
Class id to interpret as the background class.
See `draw_background`.
background_threshold : None, optional
Deprecated.
This parameter is ignored.
Returns
-------
list of (H,W,3) ndarray
Rendered overlays as ``uint8`` arrays.
Always a **list** containing one RGB image per segmentation map
array channel.
"""
if background_threshold is not None:
ia.warn_deprecated(
"The argument `background_threshold` is deprecated and "
"ignored. Please don't use it anymore.")
assert image.ndim == 3, (
"Expected to draw on 3-dimensional image, got image with %d "
"dimensions." % (image.ndim,))
assert image.shape[2] == 3, (
"Expected to draw on RGB image, got image with %d channels "
"instead." % (image.shape[2],))
assert image.dtype.name == "uint8", (
"Expected to get image with dtype uint8, got dtype %s." % (
image.dtype.name,))
assert 0 - 1e-8 <= alpha <= 1.0 + 1e-8, (
"Expected 'alpha' to be in interval [0.0, 1.0], got %.4f." % (
alpha,))
assert resize in ["segmentation_map", "image"], (
"Expected 'resize' to be \"segmentation_map\" or \"image\", got "
"%s." % (resize,))
colors = (
colors
if colors is not None
else SegmentationMapsOnImage.DEFAULT_SEGMENT_COLORS
)
if resize == "image":
image = ia.imresize_single_image(
image, self.arr.shape[0:2], interpolation="cubic")
segmaps_drawn = []
arr_channelwise = np.dsplit(self.arr, self.arr.shape[2])
for arr in arr_channelwise:
arr = arr[:, :, 0]
nb_classes = 1 + np.max(arr)
segmap_drawn = np.zeros((arr.shape[0], arr.shape[1], 3),
dtype=np.uint8)
assert nb_classes <= len(colors), (
"Can't draw all %d classes as it would exceed the maximum "
"number of %d available colors." % (nb_classes, len(colors),))
ids_in_map = np.unique(arr)
for c, color in zip(sm.xrange(nb_classes), colors):
if c in ids_in_map:
class_mask = (arr == c)
segmap_drawn[class_mask] = color
segmap_drawn = ia.imresize_single_image(
segmap_drawn, image.shape[0:2], interpolation="nearest")
segmap_on_image = blendlib.blend_alpha(segmap_drawn, image, alpha)
if draw_background:
mix = segmap_on_image
else:
foreground_mask = ia.imresize_single_image(
(arr != background_class_id),
image.shape[0:2],
interpolation="nearest")
# without this, the merge below does nothing
foreground_mask = np.atleast_3d(foreground_mask)
mix = (
(~foreground_mask) * image
+ foreground_mask * segmap_on_image
)
segmaps_drawn.append(mix)
return segmaps_drawn
def pad(self, top=0, right=0, bottom=0, left=0, mode="constant", cval=0):
"""Pad the segmentation maps at their top/right/bottom/left side.
Parameters
----------
top : int, optional
Amount of pixels to add at the top side of the segmentation map.
Must be ``0`` or greater.
right : int, optional
Amount of pixels to add at the right side of the segmentation map.
Must be ``0`` or greater.
bottom : int, optional
Amount of pixels to add at the bottom side of the segmentation map.
Must be ``0`` or greater.
left : int, optional
Amount of pixels to add at the left side of the segmentation map.
Must be ``0`` or greater.
mode : str, optional
Padding mode to use. See :func:`~imgaug.imgaug.pad` for details.
cval : number, optional
Value to use for padding if `mode` is ``constant``.
See :func:`~imgaug.imgaug.pad` for details.
Returns
-------
imgaug.augmentables.segmaps.SegmentationMapsOnImage
Padded segmentation map with height ``H'=H+top+bottom`` and
width ``W'=W+left+right``.
"""
from ..augmenters import size as iasize
arr_padded = iasize.pad(self.arr, top=top, right=right, bottom=bottom,
left=left, mode=mode, cval=cval)
return self.deepcopy(arr=arr_padded)
def pad_to_aspect_ratio(self, aspect_ratio, mode="constant", cval=0,
return_pad_amounts=False):
"""Pad the segmentation maps until they match a target aspect ratio.
Depending on which dimension is smaller (height or width), only the
corresponding sides (left/right or top/bottom) will be padded. In
each case, both of the sides will be padded equally.
Parameters
----------
aspect_ratio : float
Target aspect ratio, given as width/height. E.g. ``2.0`` denotes
the image having twice as much width as height.
mode : str, optional
Padding mode to use.
See :func:`~imgaug.imgaug.pad` for details.
cval : number, optional
Value to use for padding if `mode` is ``constant``.
See :func:`~imgaug.imgaug.pad` for details.
return_pad_amounts : bool, optional
If ``False``, then only the padded instance will be returned.
If ``True``, a tuple with two entries will be returned, where
the first entry is the padded instance and the second entry are
the amounts by which each array side was padded. These amounts are
again a tuple of the form ``(top, right, bottom, left)``, with
each value being an integer.
Returns
-------
imgaug.augmentables.segmaps.SegmentationMapsOnImage
Padded segmentation map as :class:`SegmentationMapsOnImage`
instance.
tuple of int
Amounts by which the instance's array was padded on each side,
given as a tuple ``(top, right, bottom, left)``.
This tuple is only returned if `return_pad_amounts` was set to
``True``.
"""
from ..augmenters import size as iasize
arr_padded, pad_amounts = iasize.pad_to_aspect_ratio(
self.arr,
aspect_ratio=aspect_ratio,
mode=mode,
cval=cval,
return_pad_amounts=True)
segmap = self.deepcopy(arr=arr_padded)
if return_pad_amounts:
return segmap, pad_amounts
return segmap
@ia.deprecated(alt_func="SegmentationMapsOnImage.resize()",
comment="resize() has the exactly same interface.")
def scale(self, *args, **kwargs):
"""Resize the seg.map(s) array given a target size and interpolation."""
return self.resize(*args, **kwargs)
def resize(self, sizes, interpolation="nearest"):
"""Resize the seg.map(s) array given a target size and interpolation.
Parameters
----------
sizes : float or iterable of int or iterable of float
New size of the array in ``(height, width)``.
See :func:`~imgaug.imgaug.imresize_single_image` for details.
interpolation : None or str or int, optional
The interpolation to use during resize.
Nearest neighbour interpolation (``"nearest"``) is almost always
the best choice.
See :func:`~imgaug.imgaug.imresize_single_image` for details.
Returns
-------
imgaug.augmentables.segmaps.SegmentationMapsOnImage
Resized segmentation map object.
"""
arr_resized = ia.imresize_single_image(self.arr, sizes,
interpolation=interpolation)
return self.deepcopy(arr_resized)
# TODO how best to handle changes to _input_was due to changed 'arr'?
def copy(self, arr=None, shape=None):
"""Create a shallow copy of the segmentation map object.
Parameters
----------
arr : None or (H,W) ndarray or (H,W,C) ndarray, optional
Optionally the `arr` attribute to use for the new segmentation map
instance. Will be copied from the old instance if not provided.
See
:func:`~imgaug.augmentables.segmaps.SegmentationMapsOnImage.__init__`
for details.
shape : None or tuple of int, optional
Optionally the shape attribute to use for the the new segmentation
map instance. Will be copied from the old instance if not provided.
See
:func:`~imgaug.augmentables.segmaps.SegmentationMapsOnImage.__init__`
for details.
Returns
-------
imgaug.augmentables.segmaps.SegmentationMapsOnImage
Shallow copy.
"""
# pylint: disable=protected-access
segmap = SegmentationMapsOnImage(
self.arr if arr is None else arr,
shape=self.shape if shape is None else shape)
segmap._input_was = self._input_was
return segmap
def deepcopy(self, arr=None, shape=None):
"""Create a deep copy of the segmentation map object.
Parameters
----------
arr : None or (H,W) ndarray or (H,W,C) ndarray, optional
Optionally the `arr` attribute to use for the new segmentation map
instance. Will be copied from the old instance if not provided.
See
:func:`~imgaug.augmentables.segmaps.SegmentationMapsOnImage.__init__`
for details.
shape : None or tuple of int, optional
Optionally the shape attribute to use for the the new segmentation
map instance. Will be copied from the old instance if not provided.
See
:func:`~imgaug.augmentables.segmaps.SegmentationMapsOnImage.__init__`
for details.
Returns
-------
imgaug.augmentables.segmaps.SegmentationMapsOnImage
Deep copy.
"""
# pylint: disable=protected-access
segmap = SegmentationMapsOnImage(
np.copy(self.arr if arr is None else arr),
shape=self.shape if shape is None else shape)
segmap._input_was = self._input_was
return segmap
| mit |
xiaoh/sediFoam | cases/auto-testing/test-cases/multiParticlesCollideRho/particlePosition.py | 2 | 1082 | #!/usr/bin/python
import sys, os, os.path
import matplotlib.pyplot as plt
p = [];
for i in [1,2,3,4]:
cmd = 'grep "' + str(i) +' 1" snapshot.bubblemd > data/p' + str(i) + '.dat';
os.system(cmd);
x_p = [];
y_p = [];
x_p_bench = [];
y_p_bench = [];
pData='data/p' + str(i) + '.dat';
fData = open(pData, 'r');
for line in fData.readlines():
data = [x.strip() for x in line.split(None)]
if (not data):
continue
x_p.append(data[4]);
y_p.append(data[5]);
p.append(plt.plot(x_p,y_p,'k-o',markersize = 4))
pData='data/origin/p' + str(i) + '.dat';
fData = open(pData, 'r');
for line in fData.readlines():
data = [x.strip() for x in line.split(None)]
if (not data):
continue
x_p_bench.append(data[4]);
y_p_bench.append(data[5]);
p.append(plt.plot(x_p_bench,y_p_bench,'r-o',markersize = 4))
lg = plt.legend([p[0],p[1]],["current result","benchmark"],loc=4)
lg.draw_frame(False)
plt.savefig('data/multiParticlesPositionRho.pdf');
| gpl-2.0 |
satishgoda/bokeh | bokeh/util/serialization.py | 7 | 6810 | """ Functions for helping with serialization and deserialization of
Bokeh objects.
"""
from __future__ import absolute_import
from six import iterkeys
import numpy as np
try:
import pandas as pd
is_pandas = True
except ImportError:
is_pandas = False
import logging
log = logging.getLogger(__name__)
_simple_id = 1000
def make_id():
""" Return a new unique ID for a Bokeh object.
Normally this function will return UUIDs to use for identifying Bokeh
objects. This is especally important for Bokeh objects stored on a
Bokeh server. However, it is convenient to have more human-readable
IDs during development, so this behavior can be overridden by
setting the environment variable ``BOKEH_SIMPLE_IDS=yes``.
"""
global _simple_id
import uuid
from ..settings import settings
if settings.simple_ids(False):
_simple_id += 1
new_id = _simple_id
else:
new_id = uuid.uuid4()
return str(new_id)
def urljoin(*args):
""" Construct an absolute URL from several URL components.
Args:
*args (str) : URL components to join
Returns:
str : joined URL
"""
from six.moves.urllib.parse import urljoin as sys_urljoin
from functools import reduce
return reduce(sys_urljoin, args)
def get_json(response):
""" Unify retrieving JSON responses from different sources.
Works correctly for HTTP responses from requests <=1.0, >1.0, and
the Flask test client.
Args:
response (Flask or requests response) : a response to process
Returns:
JSON
"""
import json
try:
import flask
except ImportError:
flask = None
if flask and isinstance(response, flask.Response):
# flask testing
return json.loads(response.data.decode('utf-8'))
else:
# requests
if hasattr(response.json, '__call__'):
return response.json()
else:
return response.json
def dump(objs, docid, changed_only=True):
""" Serialize a sequence of Bokeh objects into JSON
Args:
objs (seq[obj]) : a sequence of Bokeh object to dump
docid (str) : an ID for a Bokeh Document to dump relative to
changed_only (bool, optional) : whether to dump only attributes
that have had their values changed at some point (default: True)
Returns:
list[json]
"""
json_objs = []
for obj in objs:
ref = obj.ref
ref["attributes"] = obj.vm_serialize(changed_only=changed_only)
ref["attributes"].update({"id": ref["id"], "doc" : docid})
json_objs.append(ref)
return json_objs
def is_ref(frag):
""" Test whether a given Bokeh object graph fragment is a reference.
A Bokeh "reference" is a ``dict`` with ``"type"`` and ``"id"`` keys.
Args:
frag (dict) : a fragment of a Bokeh object graph
Returns:
True, if the fragment is a reference, otherwise False
"""
return isinstance(frag, dict) and \
frag.get('type') and \
frag.get('id')
def json_apply(fragment, check_func, func):
""" Apply a function to JSON fragments that match the given predicate
and return the collected results.
Recursively traverses a nested collection of ``dict`` and ``list``,
applying ``check_func`` to each fragment. If True, then collect
``func(fragment)`` in the final output
Args:
fragment (JSON-like) : the fragment to apply ``func`` to recursively
check_func (callable) : the predicate to test fragments with
func (callable) : the conversion function to apply
Returns:
converted fragments
"""
if check_func(fragment):
return func(fragment)
elif isinstance(fragment, list):
output = []
for val in fragment:
output.append(json_apply(val, check_func, func))
return output
elif isinstance(fragment, dict):
output = {}
for k, val in fragment.items():
output[k] = json_apply(val, check_func, func)
return output
else:
return fragment
def transform_series(obj):
"""transforms pandas series into array of values
"""
vals = obj.values
return transform_array(vals)
def transform_array(obj):
"""Transform arrays into lists of json safe types
also handles pandas series, and replacing
nans and infs with strings
"""
# Check for astype failures (putative Numpy < 1.7)
dt2001 = np.datetime64('2001')
legacy_datetime64 = (dt2001.astype('int64') ==
dt2001.astype('datetime64[ms]').astype('int64'))
## not quite correct, truncates to ms..
if obj.dtype.kind == 'M':
if legacy_datetime64:
if obj.dtype == np.dtype('datetime64[ns]'):
return (obj.astype('int64') / 10**6.0).tolist()
else:
return (obj.astype('datetime64[us]').astype('int64') / 1000.).tolist()
elif obj.dtype.kind in ('u', 'i', 'f'):
return transform_numerical_array(obj)
return obj.tolist()
def transform_numerical_array(obj):
"""handles nans/inf conversion
"""
if isinstance(obj, np.ma.MaskedArray):
obj = obj.filled(np.nan) # Set masked values to nan
if not np.isnan(obj).any() and not np.isinf(obj).any():
return obj.tolist()
else:
transformed = obj.astype('object')
transformed[np.isnan(obj)] = 'NaN'
transformed[np.isposinf(obj)] = 'Infinity'
transformed[np.isneginf(obj)] = '-Infinity'
return transformed.tolist()
def traverse_data(datum):
"""recursively look for NaN, Infinity, and -Infinity objects
and replace them with JSON-compliant strings
"""
datum_copy = []
for item in datum:
if isinstance(item, (list, tuple)):
datum_copy.append(traverse_data(item))
elif isinstance(item, float):
if np.isnan(item):
item = 'NaN'
elif np.isposinf(item):
item = 'Infinity'
elif np.isneginf(item):
item = '-Infinity'
datum_copy.append(item)
else:
datum_copy.append(item)
return datum_copy
def transform_column_source_data(data):
"""iterate through the data of a ColumnSourceData object replacing
non-JSON-compliant objects with compliant ones
"""
data_copy = {}
for key in iterkeys(data):
if is_pandas and isinstance(data[key], (pd.Series, pd.Index)):
data_copy[key] = transform_series(data[key])
elif isinstance(data[key], np.ndarray):
data_copy[key] = transform_array(data[key])
else:
data_copy[key] = traverse_data(data[key])
return data_copy
| bsd-3-clause |
asnorkin/sentiment_analysis | site/lib/python2.7/site-packages/sklearn/linear_model/ridge.py | 13 | 51357 | """
Ridge regression
"""
# Author: Mathieu Blondel <[email protected]>
# Reuben Fletcher-Costin <[email protected]>
# Fabian Pedregosa <[email protected]>
# Michael Eickenberg <[email protected]>
# License: BSD 3 clause
from abc import ABCMeta, abstractmethod
import warnings
import numpy as np
from scipy import linalg
from scipy import sparse
from scipy.sparse import linalg as sp_linalg
from .base import LinearClassifierMixin, LinearModel, _rescale_data
from .sag import sag_solver
from ..base import RegressorMixin
from ..utils.extmath import safe_sparse_dot
from ..utils.extmath import row_norms
from ..utils import check_X_y
from ..utils import check_array
from ..utils import check_consistent_length
from ..utils import compute_sample_weight
from ..utils import column_or_1d
from ..preprocessing import LabelBinarizer
from ..model_selection import GridSearchCV
from ..externals import six
from ..metrics.scorer import check_scoring
def _solve_sparse_cg(X, y, alpha, max_iter=None, tol=1e-3, verbose=0):
n_samples, n_features = X.shape
X1 = sp_linalg.aslinearoperator(X)
coefs = np.empty((y.shape[1], n_features))
if n_features > n_samples:
def create_mv(curr_alpha):
def _mv(x):
return X1.matvec(X1.rmatvec(x)) + curr_alpha * x
return _mv
else:
def create_mv(curr_alpha):
def _mv(x):
return X1.rmatvec(X1.matvec(x)) + curr_alpha * x
return _mv
for i in range(y.shape[1]):
y_column = y[:, i]
mv = create_mv(alpha[i])
if n_features > n_samples:
# kernel ridge
# w = X.T * inv(X X^t + alpha*Id) y
C = sp_linalg.LinearOperator(
(n_samples, n_samples), matvec=mv, dtype=X.dtype)
coef, info = sp_linalg.cg(C, y_column, tol=tol)
coefs[i] = X1.rmatvec(coef)
else:
# linear ridge
# w = inv(X^t X + alpha*Id) * X.T y
y_column = X1.rmatvec(y_column)
C = sp_linalg.LinearOperator(
(n_features, n_features), matvec=mv, dtype=X.dtype)
coefs[i], info = sp_linalg.cg(C, y_column, maxiter=max_iter,
tol=tol)
if info < 0:
raise ValueError("Failed with error code %d" % info)
if max_iter is None and info > 0 and verbose:
warnings.warn("sparse_cg did not converge after %d iterations." %
info)
return coefs
def _solve_lsqr(X, y, alpha, max_iter=None, tol=1e-3):
n_samples, n_features = X.shape
coefs = np.empty((y.shape[1], n_features))
n_iter = np.empty(y.shape[1], dtype=np.int32)
# According to the lsqr documentation, alpha = damp^2.
sqrt_alpha = np.sqrt(alpha)
for i in range(y.shape[1]):
y_column = y[:, i]
info = sp_linalg.lsqr(X, y_column, damp=sqrt_alpha[i],
atol=tol, btol=tol, iter_lim=max_iter)
coefs[i] = info[0]
n_iter[i] = info[2]
return coefs, n_iter
def _solve_cholesky(X, y, alpha):
# w = inv(X^t X + alpha*Id) * X.T y
n_samples, n_features = X.shape
n_targets = y.shape[1]
A = safe_sparse_dot(X.T, X, dense_output=True)
Xy = safe_sparse_dot(X.T, y, dense_output=True)
one_alpha = np.array_equal(alpha, len(alpha) * [alpha[0]])
if one_alpha:
A.flat[::n_features + 1] += alpha[0]
return linalg.solve(A, Xy, sym_pos=True,
overwrite_a=True).T
else:
coefs = np.empty([n_targets, n_features])
for coef, target, current_alpha in zip(coefs, Xy.T, alpha):
A.flat[::n_features + 1] += current_alpha
coef[:] = linalg.solve(A, target, sym_pos=True,
overwrite_a=False).ravel()
A.flat[::n_features + 1] -= current_alpha
return coefs
def _solve_cholesky_kernel(K, y, alpha, sample_weight=None, copy=False):
# dual_coef = inv(X X^t + alpha*Id) y
n_samples = K.shape[0]
n_targets = y.shape[1]
if copy:
K = K.copy()
alpha = np.atleast_1d(alpha)
one_alpha = (alpha == alpha[0]).all()
has_sw = isinstance(sample_weight, np.ndarray) \
or sample_weight not in [1.0, None]
if has_sw:
# Unlike other solvers, we need to support sample_weight directly
# because K might be a pre-computed kernel.
sw = np.sqrt(np.atleast_1d(sample_weight))
y = y * sw[:, np.newaxis]
K *= np.outer(sw, sw)
if one_alpha:
# Only one penalty, we can solve multi-target problems in one time.
K.flat[::n_samples + 1] += alpha[0]
try:
# Note: we must use overwrite_a=False in order to be able to
# use the fall-back solution below in case a LinAlgError
# is raised
dual_coef = linalg.solve(K, y, sym_pos=True,
overwrite_a=False)
except np.linalg.LinAlgError:
warnings.warn("Singular matrix in solving dual problem. Using "
"least-squares solution instead.")
dual_coef = linalg.lstsq(K, y)[0]
# K is expensive to compute and store in memory so change it back in
# case it was user-given.
K.flat[::n_samples + 1] -= alpha[0]
if has_sw:
dual_coef *= sw[:, np.newaxis]
return dual_coef
else:
# One penalty per target. We need to solve each target separately.
dual_coefs = np.empty([n_targets, n_samples])
for dual_coef, target, current_alpha in zip(dual_coefs, y.T, alpha):
K.flat[::n_samples + 1] += current_alpha
dual_coef[:] = linalg.solve(K, target, sym_pos=True,
overwrite_a=False).ravel()
K.flat[::n_samples + 1] -= current_alpha
if has_sw:
dual_coefs *= sw[np.newaxis, :]
return dual_coefs.T
def _solve_svd(X, y, alpha):
U, s, Vt = linalg.svd(X, full_matrices=False)
idx = s > 1e-15 # same default value as scipy.linalg.pinv
s_nnz = s[idx][:, np.newaxis]
UTy = np.dot(U.T, y)
d = np.zeros((s.size, alpha.size))
d[idx] = s_nnz / (s_nnz ** 2 + alpha)
d_UT_y = d * UTy
return np.dot(Vt.T, d_UT_y).T
def ridge_regression(X, y, alpha, sample_weight=None, solver='auto',
max_iter=None, tol=1e-3, verbose=0, random_state=None,
return_n_iter=False, return_intercept=False):
"""Solve the ridge equation by the method of normal equations.
Read more in the :ref:`User Guide <ridge_regression>`.
Parameters
----------
X : {array-like, sparse matrix, LinearOperator},
shape = [n_samples, n_features]
Training data
y : array-like, shape = [n_samples] or [n_samples, n_targets]
Target values
alpha : {float, array-like},
shape = [n_targets] if array-like
Regularization strength; must be a positive float. Regularization
improves the conditioning of the problem and reduces the variance of
the estimates. Larger values specify stronger regularization.
Alpha corresponds to ``C^-1`` in other linear models such as
LogisticRegression or LinearSVC. If an array is passed, penalties are
assumed to be specific to the targets. Hence they must correspond in
number.
max_iter : int, optional
Maximum number of iterations for conjugate gradient solver.
For 'sparse_cg' and 'lsqr' solvers, the default value is determined
by scipy.sparse.linalg. For 'sag' solver, the default value is 1000.
sample_weight : float or numpy array of shape [n_samples]
Individual weights for each sample. If sample_weight is not None and
solver='auto', the solver will be set to 'cholesky'.
.. versionadded:: 0.17
solver : {'auto', 'svd', 'cholesky', 'lsqr', 'sparse_cg'}
Solver to use in the computational routines:
- 'auto' chooses the solver automatically based on the type of data.
- 'svd' uses a Singular Value Decomposition of X to compute the Ridge
coefficients. More stable for singular matrices than
'cholesky'.
- 'cholesky' uses the standard scipy.linalg.solve function to
obtain a closed-form solution via a Cholesky decomposition of
dot(X.T, X)
- 'sparse_cg' uses the conjugate gradient solver as found in
scipy.sparse.linalg.cg. As an iterative algorithm, this solver is
more appropriate than 'cholesky' for large-scale data
(possibility to set `tol` and `max_iter`).
- 'lsqr' uses the dedicated regularized least-squares routine
scipy.sparse.linalg.lsqr. It is the fastest but may not be available
in old scipy versions. It also uses an iterative procedure.
- 'sag' uses a Stochastic Average Gradient descent. It also uses an
iterative procedure, and is often faster than other solvers when
both n_samples and n_features are large. Note that 'sag' fast
convergence is only guaranteed on features with approximately the
same scale. You can preprocess the data with a scaler from
sklearn.preprocessing.
All last four solvers support both dense and sparse data. However,
only 'sag' supports sparse input when `fit_intercept` is True.
.. versionadded:: 0.17
Stochastic Average Gradient descent solver.
tol : float
Precision of the solution.
verbose : int
Verbosity level. Setting verbose > 0 will display additional
information depending on the solver used.
random_state : int seed, RandomState instance, or None (default)
The seed of the pseudo random number generator to use when
shuffling the data. Used only in 'sag' solver.
return_n_iter : boolean, default False
If True, the method also returns `n_iter`, the actual number of
iteration performed by the solver.
.. versionadded:: 0.17
return_intercept : boolean, default False
If True and if X is sparse, the method also returns the intercept,
and the solver is automatically changed to 'sag'. This is only a
temporary fix for fitting the intercept with sparse data. For dense
data, use sklearn.linear_model._preprocess_data before your regression.
.. versionadded:: 0.17
Returns
-------
coef : array, shape = [n_features] or [n_targets, n_features]
Weight vector(s).
n_iter : int, optional
The actual number of iteration performed by the solver.
Only returned if `return_n_iter` is True.
intercept : float or array, shape = [n_targets]
The intercept of the model. Only returned if `return_intercept`
is True and if X is a scipy sparse array.
Notes
-----
This function won't compute the intercept.
"""
if return_intercept and sparse.issparse(X) and solver != 'sag':
if solver != 'auto':
warnings.warn("In Ridge, only 'sag' solver can currently fit the "
"intercept when X is sparse. Solver has been "
"automatically changed into 'sag'.")
solver = 'sag'
# SAG needs X and y columns to be C-contiguous and np.float64
if solver == 'sag':
X = check_array(X, accept_sparse=['csr'],
dtype=np.float64, order='C')
y = check_array(y, dtype=np.float64, ensure_2d=False, order='F')
else:
X = check_array(X, accept_sparse=['csr', 'csc', 'coo'],
dtype=np.float64)
y = check_array(y, dtype='numeric', ensure_2d=False)
check_consistent_length(X, y)
n_samples, n_features = X.shape
if y.ndim > 2:
raise ValueError("Target y has the wrong shape %s" % str(y.shape))
ravel = False
if y.ndim == 1:
y = y.reshape(-1, 1)
ravel = True
n_samples_, n_targets = y.shape
if n_samples != n_samples_:
raise ValueError("Number of samples in X and y does not correspond:"
" %d != %d" % (n_samples, n_samples_))
has_sw = sample_weight is not None
if solver == 'auto':
# cholesky if it's a dense array and cg in any other case
if not sparse.issparse(X) or has_sw:
solver = 'cholesky'
else:
solver = 'sparse_cg'
elif solver == 'lsqr' and not hasattr(sp_linalg, 'lsqr'):
warnings.warn("""lsqr not available on this machine, falling back
to sparse_cg.""")
solver = 'sparse_cg'
if has_sw:
if np.atleast_1d(sample_weight).ndim > 1:
raise ValueError("Sample weights must be 1D array or scalar")
if solver != 'sag':
# SAG supports sample_weight directly. For other solvers,
# we implement sample_weight via a simple rescaling.
X, y = _rescale_data(X, y, sample_weight)
# There should be either 1 or n_targets penalties
alpha = np.asarray(alpha).ravel()
if alpha.size not in [1, n_targets]:
raise ValueError("Number of targets and number of penalties "
"do not correspond: %d != %d"
% (alpha.size, n_targets))
if alpha.size == 1 and n_targets > 1:
alpha = np.repeat(alpha, n_targets)
if solver not in ('sparse_cg', 'cholesky', 'svd', 'lsqr', 'sag'):
raise ValueError('Solver %s not understood' % solver)
n_iter = None
if solver == 'sparse_cg':
coef = _solve_sparse_cg(X, y, alpha, max_iter, tol, verbose)
elif solver == 'lsqr':
coef, n_iter = _solve_lsqr(X, y, alpha, max_iter, tol)
elif solver == 'cholesky':
if n_features > n_samples:
K = safe_sparse_dot(X, X.T, dense_output=True)
try:
dual_coef = _solve_cholesky_kernel(K, y, alpha)
coef = safe_sparse_dot(X.T, dual_coef, dense_output=True).T
except linalg.LinAlgError:
# use SVD solver if matrix is singular
solver = 'svd'
else:
try:
coef = _solve_cholesky(X, y, alpha)
except linalg.LinAlgError:
# use SVD solver if matrix is singular
solver = 'svd'
elif solver == 'sag':
# precompute max_squared_sum for all targets
max_squared_sum = row_norms(X, squared=True).max()
coef = np.empty((y.shape[1], n_features))
n_iter = np.empty(y.shape[1], dtype=np.int32)
intercept = np.zeros((y.shape[1], ))
for i, (alpha_i, target) in enumerate(zip(alpha, y.T)):
init = {'coef': np.zeros((n_features + int(return_intercept), 1))}
coef_, n_iter_, _ = sag_solver(
X, target.ravel(), sample_weight, 'squared', alpha_i,
max_iter, tol, verbose, random_state, False, max_squared_sum,
init)
if return_intercept:
coef[i] = coef_[:-1]
intercept[i] = coef_[-1]
else:
coef[i] = coef_
n_iter[i] = n_iter_
if intercept.shape[0] == 1:
intercept = intercept[0]
coef = np.asarray(coef)
if solver == 'svd':
if sparse.issparse(X):
raise TypeError('SVD solver does not support sparse'
' inputs currently')
coef = _solve_svd(X, y, alpha)
if ravel:
# When y was passed as a 1d-array, we flatten the coefficients.
coef = coef.ravel()
if return_n_iter and return_intercept:
return coef, n_iter, intercept
elif return_intercept:
return coef, intercept
elif return_n_iter:
return coef, n_iter
else:
return coef
class _BaseRidge(six.with_metaclass(ABCMeta, LinearModel)):
@abstractmethod
def __init__(self, alpha=1.0, fit_intercept=True, normalize=False,
copy_X=True, max_iter=None, tol=1e-3, solver="auto",
random_state=None):
self.alpha = alpha
self.fit_intercept = fit_intercept
self.normalize = normalize
self.copy_X = copy_X
self.max_iter = max_iter
self.tol = tol
self.solver = solver
self.random_state = random_state
def fit(self, X, y, sample_weight=None):
X, y = check_X_y(X, y, ['csr', 'csc', 'coo'], dtype=np.float64,
multi_output=True, y_numeric=True)
if ((sample_weight is not None) and
np.atleast_1d(sample_weight).ndim > 1):
raise ValueError("Sample weights must be 1D array or scalar")
X, y, X_offset, y_offset, X_scale = self._preprocess_data(
X, y, self.fit_intercept, self.normalize, self.copy_X,
sample_weight=sample_weight)
# temporary fix for fitting the intercept with sparse data using 'sag'
if sparse.issparse(X) and self.fit_intercept:
self.coef_, self.n_iter_, self.intercept_ = ridge_regression(
X, y, alpha=self.alpha, sample_weight=sample_weight,
max_iter=self.max_iter, tol=self.tol, solver=self.solver,
random_state=self.random_state, return_n_iter=True,
return_intercept=True)
self.intercept_ += y_offset
else:
self.coef_, self.n_iter_ = ridge_regression(
X, y, alpha=self.alpha, sample_weight=sample_weight,
max_iter=self.max_iter, tol=self.tol, solver=self.solver,
random_state=self.random_state, return_n_iter=True,
return_intercept=False)
self._set_intercept(X_offset, y_offset, X_scale)
return self
class Ridge(_BaseRidge, RegressorMixin):
"""Linear least squares with l2 regularization.
This model solves a regression model where the loss function is
the linear least squares function and regularization is given by
the l2-norm. Also known as Ridge Regression or Tikhonov regularization.
This estimator has built-in support for multi-variate regression
(i.e., when y is a 2d-array of shape [n_samples, n_targets]).
Read more in the :ref:`User Guide <ridge_regression>`.
Parameters
----------
alpha : {float, array-like}, shape (n_targets)
Regularization strength; must be a positive float. Regularization
improves the conditioning of the problem and reduces the variance of
the estimates. Larger values specify stronger regularization.
Alpha corresponds to ``C^-1`` in other linear models such as
LogisticRegression or LinearSVC. If an array is passed, penalties are
assumed to be specific to the targets. Hence they must correspond in
number.
copy_X : boolean, optional, default True
If True, X will be copied; else, it may be overwritten.
fit_intercept : boolean
Whether to calculate the intercept for this model. If set
to false, no intercept will be used in calculations
(e.g. data is expected to be already centered).
max_iter : int, optional
Maximum number of iterations for conjugate gradient solver.
For 'sparse_cg' and 'lsqr' solvers, the default value is determined
by scipy.sparse.linalg. For 'sag' solver, the default value is 1000.
normalize : boolean, optional, default False
If True, the regressors X will be normalized before regression.
This parameter is ignored when `fit_intercept` is set to False.
When the regressors are normalized, note that this makes the
hyperparameters learnt more robust and almost independent of the number
of samples. The same property is not valid for standardized data.
However, if you wish to standardize, please use
`preprocessing.StandardScaler` before calling `fit` on an estimator
with `normalize=False`.
solver : {'auto', 'svd', 'cholesky', 'lsqr', 'sparse_cg', 'sag'}
Solver to use in the computational routines:
- 'auto' chooses the solver automatically based on the type of data.
- 'svd' uses a Singular Value Decomposition of X to compute the Ridge
coefficients. More stable for singular matrices than
'cholesky'.
- 'cholesky' uses the standard scipy.linalg.solve function to
obtain a closed-form solution.
- 'sparse_cg' uses the conjugate gradient solver as found in
scipy.sparse.linalg.cg. As an iterative algorithm, this solver is
more appropriate than 'cholesky' for large-scale data
(possibility to set `tol` and `max_iter`).
- 'lsqr' uses the dedicated regularized least-squares routine
scipy.sparse.linalg.lsqr. It is the fastest but may not be available
in old scipy versions. It also uses an iterative procedure.
- 'sag' uses a Stochastic Average Gradient descent. It also uses an
iterative procedure, and is often faster than other solvers when
both n_samples and n_features are large. Note that 'sag' fast
convergence is only guaranteed on features with approximately the
same scale. You can preprocess the data with a scaler from
sklearn.preprocessing.
All last four solvers support both dense and sparse data. However,
only 'sag' supports sparse input when `fit_intercept` is True.
.. versionadded:: 0.17
Stochastic Average Gradient descent solver.
tol : float
Precision of the solution.
random_state : int seed, RandomState instance, or None (default)
The seed of the pseudo random number generator to use when
shuffling the data. Used only in 'sag' solver.
.. versionadded:: 0.17
*random_state* to support Stochastic Average Gradient.
Attributes
----------
coef_ : array, shape (n_features,) or (n_targets, n_features)
Weight vector(s).
intercept_ : float | array, shape = (n_targets,)
Independent term in decision function. Set to 0.0 if
``fit_intercept = False``.
n_iter_ : array or None, shape (n_targets,)
Actual number of iterations for each target. Available only for
sag and lsqr solvers. Other solvers will return None.
.. versionadded:: 0.17
See also
--------
RidgeClassifier, RidgeCV, :class:`sklearn.kernel_ridge.KernelRidge`
Examples
--------
>>> from sklearn.linear_model import Ridge
>>> import numpy as np
>>> n_samples, n_features = 10, 5
>>> np.random.seed(0)
>>> y = np.random.randn(n_samples)
>>> X = np.random.randn(n_samples, n_features)
>>> clf = Ridge(alpha=1.0)
>>> clf.fit(X, y) # doctest: +NORMALIZE_WHITESPACE
Ridge(alpha=1.0, copy_X=True, fit_intercept=True, max_iter=None,
normalize=False, random_state=None, solver='auto', tol=0.001)
"""
def __init__(self, alpha=1.0, fit_intercept=True, normalize=False,
copy_X=True, max_iter=None, tol=1e-3, solver="auto",
random_state=None):
super(Ridge, self).__init__(alpha=alpha, fit_intercept=fit_intercept,
normalize=normalize, copy_X=copy_X,
max_iter=max_iter, tol=tol, solver=solver,
random_state=random_state)
def fit(self, X, y, sample_weight=None):
"""Fit Ridge regression model
Parameters
----------
X : {array-like, sparse matrix}, shape = [n_samples, n_features]
Training data
y : array-like, shape = [n_samples] or [n_samples, n_targets]
Target values
sample_weight : float or numpy array of shape [n_samples]
Individual weights for each sample
Returns
-------
self : returns an instance of self.
"""
return super(Ridge, self).fit(X, y, sample_weight=sample_weight)
class RidgeClassifier(LinearClassifierMixin, _BaseRidge):
"""Classifier using Ridge regression.
Read more in the :ref:`User Guide <ridge_regression>`.
Parameters
----------
alpha : float
Regularization strength; must be a positive float. Regularization
improves the conditioning of the problem and reduces the variance of
the estimates. Larger values specify stronger regularization.
Alpha corresponds to ``C^-1`` in other linear models such as
LogisticRegression or LinearSVC.
class_weight : dict or 'balanced', optional
Weights associated with classes in the form ``{class_label: weight}``.
If not given, all classes are supposed to have weight one.
The "balanced" mode uses the values of y to automatically adjust
weights inversely proportional to class frequencies in the input data
as ``n_samples / (n_classes * np.bincount(y))``
copy_X : boolean, optional, default True
If True, X will be copied; else, it may be overwritten.
fit_intercept : boolean
Whether to calculate the intercept for this model. If set to false, no
intercept will be used in calculations (e.g. data is expected to be
already centered).
max_iter : int, optional
Maximum number of iterations for conjugate gradient solver.
The default value is determined by scipy.sparse.linalg.
normalize : boolean, optional, default False
If True, the regressors X will be normalized before regression.
This parameter is ignored when `fit_intercept` is set to False.
When the regressors are normalized, note that this makes the
hyperparameters learnt more robust and almost independent of the number
of samples. The same property is not valid for standardized data.
However, if you wish to standardize, please use
`preprocessing.StandardScaler` before calling `fit` on an estimator
with `normalize=False`.
solver : {'auto', 'svd', 'cholesky', 'lsqr', 'sparse_cg', 'sag'}
Solver to use in the computational routines:
- 'auto' chooses the solver automatically based on the type of data.
- 'svd' uses a Singular Value Decomposition of X to compute the Ridge
coefficients. More stable for singular matrices than
'cholesky'.
- 'cholesky' uses the standard scipy.linalg.solve function to
obtain a closed-form solution.
- 'sparse_cg' uses the conjugate gradient solver as found in
scipy.sparse.linalg.cg. As an iterative algorithm, this solver is
more appropriate than 'cholesky' for large-scale data
(possibility to set `tol` and `max_iter`).
- 'lsqr' uses the dedicated regularized least-squares routine
scipy.sparse.linalg.lsqr. It is the fastest but may not be available
in old scipy versions. It also uses an iterative procedure.
- 'sag' uses a Stochastic Average Gradient descent. It also uses an
iterative procedure, and is faster than other solvers when both
n_samples and n_features are large.
.. versionadded:: 0.17
Stochastic Average Gradient descent solver.
tol : float
Precision of the solution.
random_state : int seed, RandomState instance, or None (default)
The seed of the pseudo random number generator to use when
shuffling the data. Used in 'sag' solver.
Attributes
----------
coef_ : array, shape (n_features,) or (n_classes, n_features)
Weight vector(s).
intercept_ : float | array, shape = (n_targets,)
Independent term in decision function. Set to 0.0 if
``fit_intercept = False``.
n_iter_ : array or None, shape (n_targets,)
Actual number of iterations for each target. Available only for
sag and lsqr solvers. Other solvers will return None.
See also
--------
Ridge, RidgeClassifierCV
Notes
-----
For multi-class classification, n_class classifiers are trained in
a one-versus-all approach. Concretely, this is implemented by taking
advantage of the multi-variate response support in Ridge.
"""
def __init__(self, alpha=1.0, fit_intercept=True, normalize=False,
copy_X=True, max_iter=None, tol=1e-3, class_weight=None,
solver="auto", random_state=None):
super(RidgeClassifier, self).__init__(
alpha=alpha, fit_intercept=fit_intercept, normalize=normalize,
copy_X=copy_X, max_iter=max_iter, tol=tol, solver=solver,
random_state=random_state)
self.class_weight = class_weight
def fit(self, X, y, sample_weight=None):
"""Fit Ridge regression model.
Parameters
----------
X : {array-like, sparse matrix}, shape = [n_samples,n_features]
Training data
y : array-like, shape = [n_samples]
Target values
sample_weight : float or numpy array of shape (n_samples,)
Sample weight.
.. versionadded:: 0.17
*sample_weight* support to Classifier.
Returns
-------
self : returns an instance of self.
"""
self._label_binarizer = LabelBinarizer(pos_label=1, neg_label=-1)
Y = self._label_binarizer.fit_transform(y)
if not self._label_binarizer.y_type_.startswith('multilabel'):
y = column_or_1d(y, warn=True)
else:
# we don't (yet) support multi-label classification in Ridge
raise ValueError(
"%s doesn't support multi-label classification" % (
self.__class__.__name__))
if self.class_weight:
if sample_weight is None:
sample_weight = 1.
# modify the sample weights with the corresponding class weight
sample_weight = (sample_weight *
compute_sample_weight(self.class_weight, y))
super(RidgeClassifier, self).fit(X, Y, sample_weight=sample_weight)
return self
@property
def classes_(self):
return self._label_binarizer.classes_
class _RidgeGCV(LinearModel):
"""Ridge regression with built-in Generalized Cross-Validation
It allows efficient Leave-One-Out cross-validation.
This class is not intended to be used directly. Use RidgeCV instead.
Notes
-----
We want to solve (K + alpha*Id)c = y,
where K = X X^T is the kernel matrix.
Let G = (K + alpha*Id)^-1.
Dual solution: c = Gy
Primal solution: w = X^T c
Compute eigendecomposition K = Q V Q^T.
Then G = Q (V + alpha*Id)^-1 Q^T,
where (V + alpha*Id) is diagonal.
It is thus inexpensive to inverse for many alphas.
Let loov be the vector of prediction values for each example
when the model was fitted with all examples but this example.
loov = (KGY - diag(KG)Y) / diag(I-KG)
Let looe be the vector of prediction errors for each example
when the model was fitted with all examples but this example.
looe = y - loov = c / diag(G)
References
----------
http://cbcl.mit.edu/projects/cbcl/publications/ps/MIT-CSAIL-TR-2007-025.pdf
http://www.mit.edu/~9.520/spring07/Classes/rlsslides.pdf
"""
def __init__(self, alphas=(0.1, 1.0, 10.0),
fit_intercept=True, normalize=False,
scoring=None, copy_X=True,
gcv_mode=None, store_cv_values=False):
self.alphas = np.asarray(alphas)
self.fit_intercept = fit_intercept
self.normalize = normalize
self.scoring = scoring
self.copy_X = copy_X
self.gcv_mode = gcv_mode
self.store_cv_values = store_cv_values
def _pre_compute(self, X, y, centered_kernel=True):
# even if X is very sparse, K is usually very dense
K = safe_sparse_dot(X, X.T, dense_output=True)
# the following emulates an additional constant regressor
# corresponding to fit_intercept=True
# but this is done only when the features have been centered
if centered_kernel:
K += np.ones_like(K)
v, Q = linalg.eigh(K)
QT_y = np.dot(Q.T, y)
return v, Q, QT_y
def _decomp_diag(self, v_prime, Q):
# compute diagonal of the matrix: dot(Q, dot(diag(v_prime), Q^T))
return (v_prime * Q ** 2).sum(axis=-1)
def _diag_dot(self, D, B):
# compute dot(diag(D), B)
if len(B.shape) > 1:
# handle case where B is > 1-d
D = D[(slice(None), ) + (np.newaxis, ) * (len(B.shape) - 1)]
return D * B
def _errors_and_values_helper(self, alpha, y, v, Q, QT_y):
"""Helper function to avoid code duplication between self._errors and
self._values.
Notes
-----
We don't construct matrix G, instead compute action on y & diagonal.
"""
w = 1. / (v + alpha)
constant_column = np.var(Q, 0) < 1.e-12
# detect constant columns
w[constant_column] = 0 # cancel the regularization for the intercept
w[v == 0] = 0
c = np.dot(Q, self._diag_dot(w, QT_y))
G_diag = self._decomp_diag(w, Q)
# handle case where y is 2-d
if len(y.shape) != 1:
G_diag = G_diag[:, np.newaxis]
return G_diag, c
def _errors(self, alpha, y, v, Q, QT_y):
G_diag, c = self._errors_and_values_helper(alpha, y, v, Q, QT_y)
return (c / G_diag) ** 2, c
def _values(self, alpha, y, v, Q, QT_y):
G_diag, c = self._errors_and_values_helper(alpha, y, v, Q, QT_y)
return y - (c / G_diag), c
def _pre_compute_svd(self, X, y, centered_kernel=True):
if sparse.issparse(X):
raise TypeError("SVD not supported for sparse matrices")
if centered_kernel:
X = np.hstack((X, np.ones((X.shape[0], 1))))
# to emulate fit_intercept=True situation, add a column on ones
# Note that by centering, the other columns are orthogonal to that one
U, s, _ = linalg.svd(X, full_matrices=0)
v = s ** 2
UT_y = np.dot(U.T, y)
return v, U, UT_y
def _errors_and_values_svd_helper(self, alpha, y, v, U, UT_y):
"""Helper function to avoid code duplication between self._errors_svd
and self._values_svd.
"""
constant_column = np.var(U, 0) < 1.e-12
# detect columns colinear to ones
w = ((v + alpha) ** -1) - (alpha ** -1)
w[constant_column] = - (alpha ** -1)
# cancel the regularization for the intercept
c = np.dot(U, self._diag_dot(w, UT_y)) + (alpha ** -1) * y
G_diag = self._decomp_diag(w, U) + (alpha ** -1)
if len(y.shape) != 1:
# handle case where y is 2-d
G_diag = G_diag[:, np.newaxis]
return G_diag, c
def _errors_svd(self, alpha, y, v, U, UT_y):
G_diag, c = self._errors_and_values_svd_helper(alpha, y, v, U, UT_y)
return (c / G_diag) ** 2, c
def _values_svd(self, alpha, y, v, U, UT_y):
G_diag, c = self._errors_and_values_svd_helper(alpha, y, v, U, UT_y)
return y - (c / G_diag), c
def fit(self, X, y, sample_weight=None):
"""Fit Ridge regression model
Parameters
----------
X : {array-like, sparse matrix}, shape = [n_samples, n_features]
Training data
y : array-like, shape = [n_samples] or [n_samples, n_targets]
Target values
sample_weight : float or array-like of shape [n_samples]
Sample weight
Returns
-------
self : Returns self.
"""
X, y = check_X_y(X, y, ['csr', 'csc', 'coo'], dtype=np.float64,
multi_output=True, y_numeric=True)
n_samples, n_features = X.shape
X, y, X_offset, y_offset, X_scale = LinearModel._preprocess_data(
X, y, self.fit_intercept, self.normalize, self.copy_X,
sample_weight=sample_weight)
gcv_mode = self.gcv_mode
with_sw = len(np.shape(sample_weight))
if gcv_mode is None or gcv_mode == 'auto':
if sparse.issparse(X) or n_features > n_samples or with_sw:
gcv_mode = 'eigen'
else:
gcv_mode = 'svd'
elif gcv_mode == "svd" and with_sw:
# FIXME non-uniform sample weights not yet supported
warnings.warn("non-uniform sample weights unsupported for svd, "
"forcing usage of eigen")
gcv_mode = 'eigen'
if gcv_mode == 'eigen':
_pre_compute = self._pre_compute
_errors = self._errors
_values = self._values
elif gcv_mode == 'svd':
# assert n_samples >= n_features
_pre_compute = self._pre_compute_svd
_errors = self._errors_svd
_values = self._values_svd
else:
raise ValueError('bad gcv_mode "%s"' % gcv_mode)
if sample_weight is not None:
X, y = _rescale_data(X, y, sample_weight)
centered_kernel = not sparse.issparse(X) and self.fit_intercept
v, Q, QT_y = _pre_compute(X, y, centered_kernel)
n_y = 1 if len(y.shape) == 1 else y.shape[1]
cv_values = np.zeros((n_samples * n_y, len(self.alphas)))
C = []
scorer = check_scoring(self, scoring=self.scoring, allow_none=True)
error = scorer is None
for i, alpha in enumerate(self.alphas):
if error:
out, c = _errors(alpha, y, v, Q, QT_y)
else:
out, c = _values(alpha, y, v, Q, QT_y)
cv_values[:, i] = out.ravel()
C.append(c)
if error:
best = cv_values.mean(axis=0).argmin()
else:
# The scorer want an object that will make the predictions but
# they are already computed efficiently by _RidgeGCV. This
# identity_estimator will just return them
def identity_estimator():
pass
identity_estimator.decision_function = lambda y_predict: y_predict
identity_estimator.predict = lambda y_predict: y_predict
out = [scorer(identity_estimator, y.ravel(), cv_values[:, i])
for i in range(len(self.alphas))]
best = np.argmax(out)
self.alpha_ = self.alphas[best]
self.dual_coef_ = C[best]
self.coef_ = safe_sparse_dot(self.dual_coef_.T, X)
self._set_intercept(X_offset, y_offset, X_scale)
if self.store_cv_values:
if len(y.shape) == 1:
cv_values_shape = n_samples, len(self.alphas)
else:
cv_values_shape = n_samples, n_y, len(self.alphas)
self.cv_values_ = cv_values.reshape(cv_values_shape)
return self
class _BaseRidgeCV(LinearModel):
def __init__(self, alphas=(0.1, 1.0, 10.0),
fit_intercept=True, normalize=False, scoring=None,
cv=None, gcv_mode=None,
store_cv_values=False):
self.alphas = alphas
self.fit_intercept = fit_intercept
self.normalize = normalize
self.scoring = scoring
self.cv = cv
self.gcv_mode = gcv_mode
self.store_cv_values = store_cv_values
def fit(self, X, y, sample_weight=None):
"""Fit Ridge regression model
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Training data
y : array-like, shape = [n_samples] or [n_samples, n_targets]
Target values
sample_weight : float or array-like of shape [n_samples]
Sample weight
Returns
-------
self : Returns self.
"""
if self.cv is None:
estimator = _RidgeGCV(self.alphas,
fit_intercept=self.fit_intercept,
normalize=self.normalize,
scoring=self.scoring,
gcv_mode=self.gcv_mode,
store_cv_values=self.store_cv_values)
estimator.fit(X, y, sample_weight=sample_weight)
self.alpha_ = estimator.alpha_
if self.store_cv_values:
self.cv_values_ = estimator.cv_values_
else:
if self.store_cv_values:
raise ValueError("cv!=None and store_cv_values=True "
" are incompatible")
parameters = {'alpha': self.alphas}
fit_params = {'sample_weight': sample_weight}
gs = GridSearchCV(Ridge(fit_intercept=self.fit_intercept),
parameters, fit_params=fit_params, cv=self.cv,
scoring=self.scoring)
gs.fit(X, y)
estimator = gs.best_estimator_
self.alpha_ = gs.best_estimator_.alpha
self.coef_ = estimator.coef_
self.intercept_ = estimator.intercept_
return self
class RidgeCV(_BaseRidgeCV, RegressorMixin):
"""Ridge regression with built-in cross-validation.
By default, it performs Generalized Cross-Validation, which is a form of
efficient Leave-One-Out cross-validation.
Read more in the :ref:`User Guide <ridge_regression>`.
Parameters
----------
alphas : numpy array of shape [n_alphas]
Array of alpha values to try.
Regularization strength; must be a positive float. Regularization
improves the conditioning of the problem and reduces the variance of
the estimates. Larger values specify stronger regularization.
Alpha corresponds to ``C^-1`` in other linear models such as
LogisticRegression or LinearSVC.
fit_intercept : boolean
Whether to calculate the intercept for this model. If set
to false, no intercept will be used in calculations
(e.g. data is expected to be already centered).
normalize : boolean, optional, default False
If True, the regressors X will be normalized before regression.
This parameter is ignored when `fit_intercept` is set to False.
When the regressors are normalized, note that this makes the
hyperparameters learnt more robust and almost independent of the number
of samples. The same property is not valid for standardized data.
However, if you wish to standardize, please use
`preprocessing.StandardScaler` before calling `fit` on an estimator
with `normalize=False`.
scoring : string, callable or None, optional, default: None
A string (see model evaluation documentation) or
a scorer callable object / function with signature
``scorer(estimator, X, y)``.
cv : int, cross-validation generator or an iterable, optional
Determines the cross-validation splitting strategy.
Possible inputs for cv are:
- None, to use the efficient Leave-One-Out cross-validation
- integer, to specify the number of folds.
- An object to be used as a cross-validation generator.
- An iterable yielding train/test splits.
For integer/None inputs, if ``y`` is binary or multiclass,
:class:`sklearn.model_selection.StratifiedKFold` is used, else,
:class:`sklearn.model_selection.KFold` is used.
Refer :ref:`User Guide <cross_validation>` for the various
cross-validation strategies that can be used here.
gcv_mode : {None, 'auto', 'svd', eigen'}, optional
Flag indicating which strategy to use when performing
Generalized Cross-Validation. Options are::
'auto' : use svd if n_samples > n_features or when X is a sparse
matrix, otherwise use eigen
'svd' : force computation via singular value decomposition of X
(does not work for sparse matrices)
'eigen' : force computation via eigendecomposition of X^T X
The 'auto' mode is the default and is intended to pick the cheaper
option of the two depending upon the shape and format of the training
data.
store_cv_values : boolean, default=False
Flag indicating if the cross-validation values corresponding to
each alpha should be stored in the `cv_values_` attribute (see
below). This flag is only compatible with `cv=None` (i.e. using
Generalized Cross-Validation).
Attributes
----------
cv_values_ : array, shape = [n_samples, n_alphas] or \
shape = [n_samples, n_targets, n_alphas], optional
Cross-validation values for each alpha (if `store_cv_values=True` and \
`cv=None`). After `fit()` has been called, this attribute will \
contain the mean squared errors (by default) or the values of the \
`{loss,score}_func` function (if provided in the constructor).
coef_ : array, shape = [n_features] or [n_targets, n_features]
Weight vector(s).
intercept_ : float | array, shape = (n_targets,)
Independent term in decision function. Set to 0.0 if
``fit_intercept = False``.
alpha_ : float
Estimated regularization parameter.
See also
--------
Ridge: Ridge regression
RidgeClassifier: Ridge classifier
RidgeClassifierCV: Ridge classifier with built-in cross validation
"""
pass
class RidgeClassifierCV(LinearClassifierMixin, _BaseRidgeCV):
"""Ridge classifier with built-in cross-validation.
By default, it performs Generalized Cross-Validation, which is a form of
efficient Leave-One-Out cross-validation. Currently, only the n_features >
n_samples case is handled efficiently.
Read more in the :ref:`User Guide <ridge_regression>`.
Parameters
----------
alphas : numpy array of shape [n_alphas]
Array of alpha values to try.
Regularization strength; must be a positive float. Regularization
improves the conditioning of the problem and reduces the variance of
the estimates. Larger values specify stronger regularization.
Alpha corresponds to ``C^-1`` in other linear models such as
LogisticRegression or LinearSVC.
fit_intercept : boolean
Whether to calculate the intercept for this model. If set
to false, no intercept will be used in calculations
(e.g. data is expected to be already centered).
normalize : boolean, optional, default False
If True, the regressors X will be normalized before regression.
This parameter is ignored when `fit_intercept` is set to False.
When the regressors are normalized, note that this makes the
hyperparameters learnt more robust and almost independent of the number
of samples. The same property is not valid for standardized data.
However, if you wish to standardize, please use
`preprocessing.StandardScaler` before calling `fit` on an estimator
with `normalize=False`.
scoring : string, callable or None, optional, default: None
A string (see model evaluation documentation) or
a scorer callable object / function with signature
``scorer(estimator, X, y)``.
cv : int, cross-validation generator or an iterable, optional
Determines the cross-validation splitting strategy.
Possible inputs for cv are:
- None, to use the efficient Leave-One-Out cross-validation
- integer, to specify the number of folds.
- An object to be used as a cross-validation generator.
- An iterable yielding train/test splits.
Refer :ref:`User Guide <cross_validation>` for the various
cross-validation strategies that can be used here.
class_weight : dict or 'balanced', optional
Weights associated with classes in the form ``{class_label: weight}``.
If not given, all classes are supposed to have weight one.
The "balanced" mode uses the values of y to automatically adjust
weights inversely proportional to class frequencies in the input data
as ``n_samples / (n_classes * np.bincount(y))``
Attributes
----------
cv_values_ : array, shape = [n_samples, n_alphas] or \
shape = [n_samples, n_responses, n_alphas], optional
Cross-validation values for each alpha (if `store_cv_values=True` and
`cv=None`). After `fit()` has been called, this attribute will contain \
the mean squared errors (by default) or the values of the \
`{loss,score}_func` function (if provided in the constructor).
coef_ : array, shape = [n_features] or [n_targets, n_features]
Weight vector(s).
intercept_ : float | array, shape = (n_targets,)
Independent term in decision function. Set to 0.0 if
``fit_intercept = False``.
alpha_ : float
Estimated regularization parameter
See also
--------
Ridge: Ridge regression
RidgeClassifier: Ridge classifier
RidgeCV: Ridge regression with built-in cross validation
Notes
-----
For multi-class classification, n_class classifiers are trained in
a one-versus-all approach. Concretely, this is implemented by taking
advantage of the multi-variate response support in Ridge.
"""
def __init__(self, alphas=(0.1, 1.0, 10.0), fit_intercept=True,
normalize=False, scoring=None, cv=None, class_weight=None):
super(RidgeClassifierCV, self).__init__(
alphas=alphas, fit_intercept=fit_intercept, normalize=normalize,
scoring=scoring, cv=cv)
self.class_weight = class_weight
def fit(self, X, y, sample_weight=None):
"""Fit the ridge classifier.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Training vectors, where n_samples is the number of samples
and n_features is the number of features.
y : array-like, shape (n_samples,)
Target values.
sample_weight : float or numpy array of shape (n_samples,)
Sample weight.
Returns
-------
self : object
Returns self.
"""
self._label_binarizer = LabelBinarizer(pos_label=1, neg_label=-1)
Y = self._label_binarizer.fit_transform(y)
if not self._label_binarizer.y_type_.startswith('multilabel'):
y = column_or_1d(y, warn=True)
if self.class_weight:
if sample_weight is None:
sample_weight = 1.
# modify the sample weights with the corresponding class weight
sample_weight = (sample_weight *
compute_sample_weight(self.class_weight, y))
_BaseRidgeCV.fit(self, X, Y, sample_weight=sample_weight)
return self
@property
def classes_(self):
return self._label_binarizer.classes_
| mit |
mne-tools/mne-tools.github.io | 0.19/_downloads/a5d4e64d0843ff17526c0588f9967f97/plot_covariance_whitening_dspm.py | 5 | 6810 | """
===================================================
Demonstrate impact of whitening on source estimates
===================================================
This example demonstrates the relationship between the noise covariance
estimate and the MNE / dSPM source amplitudes. It computes source estimates for
the SPM faces data and compares proper regularization with insufficient
regularization based on the methods described in [1]_. The example demonstrates
that improper regularization can lead to overestimation of source amplitudes.
This example makes use of the previous, non-optimized code path that was used
before implementing the suggestions presented in [1]_.
This example does quite a bit of processing, so even on a
fast machine it can take a couple of minutes to complete.
.. warning:: Please do not copy the patterns presented here for your own
analysis, this is example is purely illustrative.
References
----------
.. [1] Engemann D. and Gramfort A. (2015) Automated model selection in
covariance estimation and spatial whitening of MEG and EEG signals,
vol. 108, 328-342, NeuroImage.
"""
# Author: Denis A. Engemann <[email protected]>
#
# License: BSD (3-clause)
import numpy as np
import matplotlib.pyplot as plt
import mne
from mne import io
from mne.datasets import spm_face
from mne.minimum_norm import apply_inverse, make_inverse_operator
from mne.cov import compute_covariance
print(__doc__)
##############################################################################
# Get data
data_path = spm_face.data_path()
subjects_dir = data_path + '/subjects'
raw_fname = data_path + '/MEG/spm/SPM_CTF_MEG_example_faces%d_3D.ds'
raw = io.read_raw_ctf(raw_fname % 1) # Take first run
# To save time and memory for this demo, we'll just use the first
# 2.5 minutes (all we need to get 30 total events) and heavily
# resample 480->60 Hz (usually you wouldn't do either of these!)
raw = raw.crop(0, 150.).load_data()
picks = mne.pick_types(raw.info, meg=True, exclude='bads')
raw.filter(None, 20.)
events = mne.find_events(raw, stim_channel='UPPT001')
event_ids = {"faces": 1, "scrambled": 2}
tmin, tmax = -0.2, 0.5
baseline = (None, 0)
reject = dict(mag=3e-12)
# Make forward
trans = data_path + '/MEG/spm/SPM_CTF_MEG_example_faces1_3D_raw-trans.fif'
src = data_path + '/subjects/spm/bem/spm-oct-6-src.fif'
bem = data_path + '/subjects/spm/bem/spm-5120-5120-5120-bem-sol.fif'
forward = mne.make_forward_solution(raw.info, trans, src, bem)
del src
# inverse parameters
conditions = 'faces', 'scrambled'
snr = 3.0
lambda2 = 1.0 / snr ** 2
clim = dict(kind='value', lims=[0, 2.5, 5])
###############################################################################
# Estimate covariances
samples_epochs = 5, 15,
method = 'empirical', 'shrunk'
colors = 'steelblue', 'red'
evokeds = list()
stcs = list()
methods_ordered = list()
for n_train in samples_epochs:
# estimate covs based on a subset of samples
# make sure we have the same number of conditions.
events_ = np.concatenate([events[events[:, 2] == id_][:n_train]
for id_ in [event_ids[k] for k in conditions]])
events_ = events_[np.argsort(events_[:, 0])]
epochs_train = mne.Epochs(raw, events_, event_ids, tmin, tmax, picks=picks,
baseline=baseline, preload=True, reject=reject,
decim=8)
epochs_train.equalize_event_counts(event_ids)
assert len(epochs_train) == 2 * n_train
# We know some of these have too few samples, so suppress warning
# with verbose='error'
noise_covs = compute_covariance(
epochs_train, method=method, tmin=None, tmax=0, # baseline only
return_estimators=True, rank=None, verbose='error') # returns list
# prepare contrast
evokeds = [epochs_train[k].average() for k in conditions]
del epochs_train, events_
# do contrast
# We skip empirical rank estimation that we introduced in response to
# the findings in reference [1] to use the naive code path that
# triggered the behavior described in [1]. The expected true rank is
# 274 for this dataset. Please do not do this with your data but
# rely on the default rank estimator that helps regularizing the
# covariance.
stcs.append(list())
methods_ordered.append(list())
for cov in noise_covs:
inverse_operator = make_inverse_operator(evokeds[0].info, forward,
cov, loose=0.2, depth=0.8)
assert len(inverse_operator['sing']) == 274 # sanity check
stc_a, stc_b = (apply_inverse(e, inverse_operator, lambda2, "dSPM",
pick_ori=None) for e in evokeds)
stc = stc_a - stc_b
methods_ordered[-1].append(cov['method'])
stcs[-1].append(stc)
del inverse_operator, evokeds, cov, noise_covs, stc, stc_a, stc_b
del raw, forward # save some memory
##############################################################################
# Show the resulting source estimates
fig, (axes1, axes2) = plt.subplots(2, 3, figsize=(9.5, 5))
for ni, (n_train, axes) in enumerate(zip(samples_epochs, (axes1, axes2))):
# compute stc based on worst and best
ax_dynamics = axes[1]
for stc, ax, method, kind, color in zip(stcs[ni],
axes[::2],
methods_ordered[ni],
['best', 'worst'],
colors):
brain = stc.plot(subjects_dir=subjects_dir, hemi='both', clim=clim,
initial_time=0.175, background='w', foreground='k')
brain.show_view('ven')
im = brain.screenshot()
brain.close()
ax.axis('off')
ax.get_xaxis().set_visible(False)
ax.get_yaxis().set_visible(False)
ax.imshow(im)
ax.set_title('{0} ({1} epochs)'.format(kind, n_train * 2))
# plot spatial mean
stc_mean = stc.data.mean(0)
ax_dynamics.plot(stc.times * 1e3, stc_mean,
label='{0} ({1})'.format(method, kind),
color=color)
# plot spatial std
stc_var = stc.data.std(0)
ax_dynamics.fill_between(stc.times * 1e3, stc_mean - stc_var,
stc_mean + stc_var, alpha=0.2, color=color)
# signal dynamics worst and best
ax_dynamics.set(title='{0} epochs'.format(n_train * 2),
xlabel='Time (ms)', ylabel='Source Activation (dSPM)',
xlim=(tmin * 1e3, tmax * 1e3), ylim=(-3, 3))
ax_dynamics.legend(loc='upper left', fontsize=10)
fig.subplots_adjust(hspace=0.2, left=0.01, right=0.99, wspace=0.03)
| bsd-3-clause |
thilbern/scikit-learn | sklearn/ensemble/tests/test_forest.py | 7 | 30960 | """
Testing for the forest module (sklearn.ensemble.forest).
"""
# Authors: Gilles Louppe,
# Brian Holt,
# Andreas Mueller,
# Arnaud Joly
# License: BSD 3 clause
import pickle
from collections import defaultdict
from itertools import product
import numpy as np
from scipy.sparse import csr_matrix, csc_matrix, coo_matrix
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_false, assert_true
from sklearn.utils.testing import assert_less, assert_greater
from sklearn.utils.testing import assert_greater_equal
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_warns
from sklearn.utils.testing import ignore_warnings
from sklearn import datasets
from sklearn.decomposition import TruncatedSVD
from sklearn.ensemble import ExtraTreesClassifier
from sklearn.ensemble import ExtraTreesRegressor
from sklearn.ensemble import RandomForestClassifier
from sklearn.ensemble import RandomForestRegressor
from sklearn.ensemble import RandomTreesEmbedding
from sklearn.grid_search import GridSearchCV
from sklearn.svm import LinearSVC
from sklearn.utils.validation import check_random_state
from sklearn.tree.tree import SPARSE_SPLITTERS
# toy sample
X = [[-2, -1], [-1, -1], [-1, -2], [1, 1], [1, 2], [2, 1]]
y = [-1, -1, -1, 1, 1, 1]
T = [[-1, -1], [2, 2], [3, 2]]
true_result = [-1, 1, 1]
# also load the iris dataset
# and randomly permute it
iris = datasets.load_iris()
rng = check_random_state(0)
perm = rng.permutation(iris.target.size)
iris.data = iris.data[perm]
iris.target = iris.target[perm]
# also load the boston dataset
# and randomly permute it
boston = datasets.load_boston()
perm = rng.permutation(boston.target.size)
boston.data = boston.data[perm]
boston.target = boston.target[perm]
FOREST_CLASSIFIERS = {
"ExtraTreesClassifier": ExtraTreesClassifier,
"RandomForestClassifier": RandomForestClassifier,
}
FOREST_REGRESSORS = {
"ExtraTreesRegressor": ExtraTreesRegressor,
"RandomForestRegressor": RandomForestRegressor,
}
FOREST_TRANSFORMERS = {
"RandomTreesEmbedding": RandomTreesEmbedding,
}
FOREST_ESTIMATORS = dict()
FOREST_ESTIMATORS.update(FOREST_CLASSIFIERS)
FOREST_ESTIMATORS.update(FOREST_REGRESSORS)
FOREST_ESTIMATORS.update(FOREST_TRANSFORMERS)
def check_classification_toy(name):
"""Check classification on a toy dataset."""
ForestClassifier = FOREST_CLASSIFIERS[name]
clf = ForestClassifier(n_estimators=10, random_state=1)
clf.fit(X, y)
assert_array_equal(clf.predict(T), true_result)
assert_equal(10, len(clf))
clf = ForestClassifier(n_estimators=10, max_features=1, random_state=1)
clf.fit(X, y)
assert_array_equal(clf.predict(T), true_result)
assert_equal(10, len(clf))
# also test apply
leaf_indices = clf.apply(X)
assert_equal(leaf_indices.shape, (len(X), clf.n_estimators))
def test_classification_toy():
for name in FOREST_CLASSIFIERS:
yield check_classification_toy, name
def check_iris_criterion(name, criterion):
"""Check consistency on dataset iris."""
ForestClassifier = FOREST_CLASSIFIERS[name]
clf = ForestClassifier(n_estimators=10, criterion=criterion,
random_state=1)
clf.fit(iris.data, iris.target)
score = clf.score(iris.data, iris.target)
assert_greater(score, 0.9, "Failed with criterion %s and score = %f"
% (criterion, score))
clf = ForestClassifier(n_estimators=10, criterion=criterion,
max_features=2, random_state=1)
clf.fit(iris.data, iris.target)
score = clf.score(iris.data, iris.target)
assert_greater(score, 0.5, "Failed with criterion %s and score = %f"
% (criterion, score))
def test_iris():
for name, criterion in product(FOREST_CLASSIFIERS, ("gini", "entropy")):
yield check_iris_criterion, name, criterion
def check_boston_criterion(name, criterion):
"""Check consistency on dataset boston house prices."""
ForestRegressor = FOREST_REGRESSORS[name]
clf = ForestRegressor(n_estimators=5, criterion=criterion, random_state=1)
clf.fit(boston.data, boston.target)
score = clf.score(boston.data, boston.target)
assert_greater(score, 0.95, "Failed with max_features=None, criterion %s "
"and score = %f" % (criterion, score))
clf = ForestRegressor(n_estimators=5, criterion=criterion,
max_features=6, random_state=1)
clf.fit(boston.data, boston.target)
score = clf.score(boston.data, boston.target)
assert_greater(score, 0.95, "Failed with max_features=6, criterion %s "
"and score = %f" % (criterion, score))
def test_boston():
for name, criterion in product(FOREST_REGRESSORS, ("mse", )):
yield check_boston_criterion, name, criterion
def check_regressor_attributes(name):
"""Regression models should not have a classes_ attribute."""
r = FOREST_REGRESSORS[name](random_state=0)
assert_false(hasattr(r, "classes_"))
assert_false(hasattr(r, "n_classes_"))
r.fit([[1, 2, 3], [4, 5, 6]], [1, 2])
assert_false(hasattr(r, "classes_"))
assert_false(hasattr(r, "n_classes_"))
def test_regressor_attributes():
for name in FOREST_REGRESSORS:
yield check_regressor_attributes, name
def check_probability(name):
"""Predict probabilities."""
ForestClassifier = FOREST_CLASSIFIERS[name]
with np.errstate(divide="ignore"):
clf = ForestClassifier(n_estimators=10, random_state=1, max_features=1,
max_depth=1)
clf.fit(iris.data, iris.target)
assert_array_almost_equal(np.sum(clf.predict_proba(iris.data), axis=1),
np.ones(iris.data.shape[0]))
assert_array_almost_equal(clf.predict_proba(iris.data),
np.exp(clf.predict_log_proba(iris.data)))
def test_probability():
for name in FOREST_CLASSIFIERS:
yield check_probability, name
def check_importances(name, X, y):
"""Check variable importances."""
ForestClassifier = FOREST_CLASSIFIERS[name]
for n_jobs in [1, 2]:
clf = ForestClassifier(n_estimators=10, n_jobs=n_jobs)
clf.fit(X, y)
importances = clf.feature_importances_
n_important = np.sum(importances > 0.1)
assert_equal(importances.shape[0], 10)
assert_equal(n_important, 3)
X_new = clf.transform(X, threshold="mean")
assert_less(0 < X_new.shape[1], X.shape[1])
# Check with sample weights
sample_weight = np.ones(y.shape)
sample_weight[y == 1] *= 100
clf = ForestClassifier(n_estimators=50, n_jobs=n_jobs, random_state=0)
clf.fit(X, y, sample_weight=sample_weight)
importances = clf.feature_importances_
assert_true(np.all(importances >= 0.0))
clf = ForestClassifier(n_estimators=50, n_jobs=n_jobs, random_state=0)
clf.fit(X, y, sample_weight=3 * sample_weight)
importances_bis = clf.feature_importances_
assert_almost_equal(importances, importances_bis)
def test_importances():
X, y = datasets.make_classification(n_samples=1000, n_features=10,
n_informative=3, n_redundant=0,
n_repeated=0, shuffle=False,
random_state=0)
for name in FOREST_CLASSIFIERS:
yield check_importances, name, X, y
def check_unfitted_feature_importances(name):
assert_raises(ValueError, getattr, FOREST_ESTIMATORS[name](random_state=0),
"feature_importances_")
def test_unfitted_feature_importances():
for name in FOREST_ESTIMATORS:
yield check_unfitted_feature_importances, name
def check_oob_score(name, X, y, n_estimators=20):
"""Check that oob prediction is a good estimation of the generalization
error."""
# Proper behavior
est = FOREST_ESTIMATORS[name](oob_score=True, random_state=0,
n_estimators=n_estimators, bootstrap=True)
n_samples = X.shape[0]
est.fit(X[:n_samples // 2, :], y[:n_samples // 2])
test_score = est.score(X[n_samples // 2:, :], y[n_samples // 2:])
if name in FOREST_CLASSIFIERS:
assert_less(abs(test_score - est.oob_score_), 0.1)
else:
assert_greater(test_score, est.oob_score_)
assert_greater(est.oob_score_, .8)
# Check warning if not enough estimators
with np.errstate(divide="ignore", invalid="ignore"):
est = FOREST_ESTIMATORS[name](oob_score=True, random_state=0,
n_estimators=1, bootstrap=True)
assert_warns(UserWarning, est.fit, X, y)
def test_oob_score():
for name in FOREST_CLASSIFIERS:
yield check_oob_score, name, iris.data, iris.target
# non-contiguous targets in classification
yield check_oob_score, name, iris.data, iris.target * 2 + 1
for name in FOREST_REGRESSORS:
yield check_oob_score, name, boston.data, boston.target, 50
def check_oob_score_raise_error(name):
ForestEstimator = FOREST_ESTIMATORS[name]
if name in FOREST_TRANSFORMERS:
for oob_score in [True, False]:
assert_raises(TypeError, ForestEstimator, oob_score=oob_score)
assert_raises(NotImplementedError, ForestEstimator()._set_oob_score,
X, y)
else:
# Unfitted / no bootstrap / no oob_score
for oob_score, bootstrap in [(True, False), (False, True),
(False, False)]:
est = ForestEstimator(oob_score=oob_score, bootstrap=bootstrap,
random_state=0)
assert_false(hasattr(est, "oob_score_"))
# No bootstrap
assert_raises(ValueError, ForestEstimator(oob_score=True,
bootstrap=False).fit, X, y)
def test_oob_score_raise_error():
for name in FOREST_ESTIMATORS:
yield check_oob_score_raise_error, name
def check_gridsearch(name):
forest = FOREST_CLASSIFIERS[name]()
clf = GridSearchCV(forest, {'n_estimators': (1, 2), 'max_depth': (1, 2)})
clf.fit(iris.data, iris.target)
def test_gridsearch():
"""Check that base trees can be grid-searched."""
for name in FOREST_CLASSIFIERS:
yield check_gridsearch, name
def check_parallel(name, X, y):
"""Check parallel computations in classification"""
ForestEstimator = FOREST_ESTIMATORS[name]
forest = ForestEstimator(n_estimators=10, n_jobs=3, random_state=0)
forest.fit(X, y)
assert_equal(len(forest), 10)
forest.set_params(n_jobs=1)
y1 = forest.predict(X)
forest.set_params(n_jobs=2)
y2 = forest.predict(X)
assert_array_almost_equal(y1, y2, 3)
def test_parallel():
for name in FOREST_CLASSIFIERS:
yield check_parallel, name, iris.data, iris.target
for name in FOREST_REGRESSORS:
yield check_parallel, name, boston.data, boston.target
def check_pickle(name, X, y):
"""Check pickability."""
ForestEstimator = FOREST_ESTIMATORS[name]
obj = ForestEstimator(random_state=0)
obj.fit(X, y)
score = obj.score(X, y)
pickle_object = pickle.dumps(obj)
obj2 = pickle.loads(pickle_object)
assert_equal(type(obj2), obj.__class__)
score2 = obj2.score(X, y)
assert_equal(score, score2)
def test_pickle():
for name in FOREST_CLASSIFIERS:
yield check_pickle, name, iris.data[::2], iris.target[::2]
for name in FOREST_REGRESSORS:
yield check_pickle, name, boston.data[::2], boston.target[::2]
def check_multioutput(name):
"""Check estimators on multi-output problems."""
X_train = [[-2, -1], [-1, -1], [-1, -2], [1, 1], [1, 2], [2, 1], [-2, 1],
[-1, 1], [-1, 2], [2, -1], [1, -1], [1, -2]]
y_train = [[-1, 0], [-1, 0], [-1, 0], [1, 1], [1, 1], [1, 1], [-1, 2],
[-1, 2], [-1, 2], [1, 3], [1, 3], [1, 3]]
X_test = [[-1, -1], [1, 1], [-1, 1], [1, -1]]
y_test = [[-1, 0], [1, 1], [-1, 2], [1, 3]]
est = FOREST_ESTIMATORS[name](random_state=0, bootstrap=False)
y_pred = est.fit(X_train, y_train).predict(X_test)
assert_array_almost_equal(y_pred, y_test)
if name in FOREST_CLASSIFIERS:
with np.errstate(divide="ignore"):
proba = est.predict_proba(X_test)
assert_equal(len(proba), 2)
assert_equal(proba[0].shape, (4, 2))
assert_equal(proba[1].shape, (4, 4))
log_proba = est.predict_log_proba(X_test)
assert_equal(len(log_proba), 2)
assert_equal(log_proba[0].shape, (4, 2))
assert_equal(log_proba[1].shape, (4, 4))
def test_multioutput():
for name in FOREST_CLASSIFIERS:
yield check_multioutput, name
for name in FOREST_REGRESSORS:
yield check_multioutput, name
def check_classes_shape(name):
"""Test that n_classes_ and classes_ have proper shape."""
ForestClassifier = FOREST_CLASSIFIERS[name]
# Classification, single output
clf = ForestClassifier(random_state=0).fit(X, y)
assert_equal(clf.n_classes_, 2)
assert_array_equal(clf.classes_, [-1, 1])
# Classification, multi-output
_y = np.vstack((y, np.array(y) * 2)).T
clf = ForestClassifier(random_state=0).fit(X, _y)
assert_array_equal(clf.n_classes_, [2, 2])
assert_array_equal(clf.classes_, [[-1, 1], [-2, 2]])
def test_classes_shape():
for name in FOREST_CLASSIFIERS:
yield check_classes_shape, name
def test_random_trees_dense_type():
'''
Test that the `sparse_output` parameter of RandomTreesEmbedding
works by returning a dense array.
'''
# Create the RTE with sparse=False
hasher = RandomTreesEmbedding(n_estimators=10, sparse_output=False)
X, y = datasets.make_circles(factor=0.5)
X_transformed = hasher.fit_transform(X)
# Assert that type is ndarray, not scipy.sparse.csr.csr_matrix
assert_equal(type(X_transformed), np.ndarray)
def test_random_trees_dense_equal():
'''
Test that the `sparse_output` parameter of RandomTreesEmbedding
works by returning the same array for both argument
values.
'''
# Create the RTEs
hasher_dense = RandomTreesEmbedding(n_estimators=10, sparse_output=False,
random_state=0)
hasher_sparse = RandomTreesEmbedding(n_estimators=10, sparse_output=True,
random_state=0)
X, y = datasets.make_circles(factor=0.5)
X_transformed_dense = hasher_dense.fit_transform(X)
X_transformed_sparse = hasher_sparse.fit_transform(X)
# Assert that dense and sparse hashers have same array.
assert_array_equal(X_transformed_sparse.toarray(), X_transformed_dense)
def test_random_hasher():
# test random forest hashing on circles dataset
# make sure that it is linearly separable.
# even after projected to two SVD dimensions
# Note: Not all random_states produce perfect results.
hasher = RandomTreesEmbedding(n_estimators=30, random_state=1)
X, y = datasets.make_circles(factor=0.5)
X_transformed = hasher.fit_transform(X)
# test fit and transform:
hasher = RandomTreesEmbedding(n_estimators=30, random_state=1)
assert_array_equal(hasher.fit(X).transform(X).toarray(),
X_transformed.toarray())
# one leaf active per data point per forest
assert_equal(X_transformed.shape[0], X.shape[0])
assert_array_equal(X_transformed.sum(axis=1), hasher.n_estimators)
svd = TruncatedSVD(n_components=2)
X_reduced = svd.fit_transform(X_transformed)
linear_clf = LinearSVC()
linear_clf.fit(X_reduced, y)
assert_equal(linear_clf.score(X_reduced, y), 1.)
def test_random_hasher_sparse_data():
X, y = datasets.make_multilabel_classification(return_indicator=True,
random_state=0)
hasher = RandomTreesEmbedding(n_estimators=30, random_state=1)
X_transformed = hasher.fit_transform(X)
X_transformed_sparse = hasher.fit_transform(csc_matrix(X))
assert_array_equal(X_transformed_sparse.toarray(), X_transformed.toarray())
def test_parallel_train():
rng = check_random_state(12321)
n_samples, n_features = 80, 30
X_train = rng.randn(n_samples, n_features)
y_train = rng.randint(0, 2, n_samples)
clfs = [
RandomForestClassifier(n_estimators=20, n_jobs=n_jobs,
random_state=12345).fit(X_train, y_train)
for n_jobs in [1, 2, 3, 8, 16, 32]
]
X_test = rng.randn(n_samples, n_features)
probas = [clf.predict_proba(X_test) for clf in clfs]
for proba1, proba2 in zip(probas, probas[1:]):
assert_array_almost_equal(proba1, proba2)
def test_distribution():
rng = check_random_state(12321)
# Single variable with 4 values
X = rng.randint(0, 4, size=(1000, 1))
y = rng.rand(1000)
n_trees = 500
clf = ExtraTreesRegressor(n_estimators=n_trees, random_state=42).fit(X, y)
uniques = defaultdict(int)
for tree in clf.estimators_:
tree = "".join(("%d,%d/" % (f, int(t)) if f >= 0 else "-")
for f, t in zip(tree.tree_.feature,
tree.tree_.threshold))
uniques[tree] += 1
uniques = sorted([(1. * count / n_trees, tree)
for tree, count in uniques.items()])
# On a single variable problem where X_0 has 4 equiprobable values, there
# are 5 ways to build a random tree. The more compact (0,1/0,0/--0,2/--) of
# them has probability 1/3 while the 4 others have probability 1/6.
assert_equal(len(uniques), 5)
assert_greater(0.20, uniques[0][0]) # Rough approximation of 1/6.
assert_greater(0.20, uniques[1][0])
assert_greater(0.20, uniques[2][0])
assert_greater(0.20, uniques[3][0])
assert_greater(uniques[4][0], 0.3)
assert_equal(uniques[4][1], "0,1/0,0/--0,2/--")
# Two variables, one with 2 values, one with 3 values
X = np.empty((1000, 2))
X[:, 0] = np.random.randint(0, 2, 1000)
X[:, 1] = np.random.randint(0, 3, 1000)
y = rng.rand(1000)
clf = ExtraTreesRegressor(n_estimators=100, max_features=1,
random_state=1).fit(X, y)
uniques = defaultdict(int)
for tree in clf.estimators_:
tree = "".join(("%d,%d/" % (f, int(t)) if f >= 0 else "-")
for f, t in zip(tree.tree_.feature,
tree.tree_.threshold))
uniques[tree] += 1
uniques = [(count, tree) for tree, count in uniques.items()]
assert_equal(len(uniques), 8)
def check_max_leaf_nodes_max_depth(name, X, y):
"""Test precedence of max_leaf_nodes over max_depth. """
ForestEstimator = FOREST_ESTIMATORS[name]
est = ForestEstimator(max_depth=1, max_leaf_nodes=4,
n_estimators=1).fit(X, y)
assert_greater(est.estimators_[0].tree_.max_depth, 1)
est = ForestEstimator(max_depth=1, n_estimators=1).fit(X, y)
assert_equal(est.estimators_[0].tree_.max_depth, 1)
def test_max_leaf_nodes_max_depth():
X, y = datasets.make_hastie_10_2(n_samples=100, random_state=1)
for name in FOREST_ESTIMATORS:
yield check_max_leaf_nodes_max_depth, name, X, y
def check_min_samples_leaf(name, X, y):
"""Test if leaves contain more than leaf_count training examples"""
ForestEstimator = FOREST_ESTIMATORS[name]
# test both DepthFirstTreeBuilder and BestFirstTreeBuilder
# by setting max_leaf_nodes
for max_leaf_nodes in (None, 1000):
est = ForestEstimator(min_samples_leaf=5,
max_leaf_nodes=max_leaf_nodes,
random_state=0)
est.fit(X, y)
out = est.estimators_[0].tree_.apply(X)
node_counts = np.bincount(out)
# drop inner nodes
leaf_count = node_counts[node_counts != 0]
assert_greater(np.min(leaf_count), 4,
"Failed with {0}".format(name))
def test_min_samples_leaf():
X, y = datasets.make_hastie_10_2(n_samples=100, random_state=1)
X = X.astype(np.float32)
for name in FOREST_ESTIMATORS:
yield check_min_samples_leaf, name, X, y
def check_min_weight_fraction_leaf(name, X, y):
"""Test if leaves contain at least min_weight_fraction_leaf of the
training set"""
ForestEstimator = FOREST_ESTIMATORS[name]
rng = np.random.RandomState(0)
weights = rng.rand(X.shape[0])
total_weight = np.sum(weights)
# test both DepthFirstTreeBuilder and BestFirstTreeBuilder
# by setting max_leaf_nodes
for max_leaf_nodes in (None, 1000):
for frac in np.linspace(0, 0.5, 6):
est = ForestEstimator(min_weight_fraction_leaf=frac,
max_leaf_nodes=max_leaf_nodes,
random_state=0)
if isinstance(est, (RandomForestClassifier,
RandomForestRegressor)):
est.bootstrap = False
est.fit(X, y, sample_weight=weights)
out = est.estimators_[0].tree_.apply(X)
node_weights = np.bincount(out, weights=weights)
# drop inner nodes
leaf_weights = node_weights[node_weights != 0]
assert_greater_equal(
np.min(leaf_weights),
total_weight * est.min_weight_fraction_leaf,
"Failed with {0} "
"min_weight_fraction_leaf={1}".format(
name, est.min_weight_fraction_leaf))
def test_min_weight_fraction_leaf():
X, y = datasets.make_hastie_10_2(n_samples=100, random_state=1)
X = X.astype(np.float32)
for name in FOREST_ESTIMATORS:
yield check_min_weight_fraction_leaf, name, X, y
def check_sparse_input(name, X, X_sparse, y):
ForestEstimator = FOREST_ESTIMATORS[name]
dense = ForestEstimator(random_state=0, max_depth=2).fit(X, y)
sparse = ForestEstimator(random_state=0, max_depth=2).fit(X_sparse, y)
assert_array_almost_equal(sparse.apply(X), dense.apply(X))
if name in FOREST_CLASSIFIERS or name in FOREST_REGRESSORS:
assert_array_almost_equal(sparse.predict(X), dense.predict(X))
assert_array_almost_equal(sparse.feature_importances_,
dense.feature_importances_)
if name in FOREST_CLASSIFIERS:
assert_array_almost_equal(sparse.predict_proba(X),
dense.predict_proba(X))
assert_array_almost_equal(sparse.predict_log_proba(X),
dense.predict_log_proba(X))
if name in FOREST_TRANSFORMERS:
assert_array_almost_equal(sparse.transform(X).toarray(),
dense.transform(X).toarray())
assert_array_almost_equal(sparse.fit_transform(X).toarray(),
dense.fit_transform(X).toarray())
def test_sparse_input():
X, y = datasets.make_multilabel_classification(return_indicator=True,
random_state=0,
n_samples=40)
for name, sparse_matrix in product(FOREST_ESTIMATORS,
(csr_matrix, csc_matrix, coo_matrix)):
yield check_sparse_input, name, X, sparse_matrix(X), y
def check_memory_layout(name, dtype):
"""Check that it works no matter the memory layout"""
est = FOREST_ESTIMATORS[name](random_state=0, bootstrap=False)
# Nothing
X = np.asarray(iris.data, dtype=dtype)
y = iris.target
assert_array_equal(est.fit(X, y).predict(X), y)
# C-order
X = np.asarray(iris.data, order="C", dtype=dtype)
y = iris.target
assert_array_equal(est.fit(X, y).predict(X), y)
# F-order
X = np.asarray(iris.data, order="F", dtype=dtype)
y = iris.target
assert_array_equal(est.fit(X, y).predict(X), y)
# Contiguous
X = np.ascontiguousarray(iris.data, dtype=dtype)
y = iris.target
assert_array_equal(est.fit(X, y).predict(X), y)
if est.base_estimator.splitter in SPARSE_SPLITTERS:
# csr matrix
X = csr_matrix(iris.data, dtype=dtype)
y = iris.target
assert_array_equal(est.fit(X, y).predict(X), y)
# csc_matrix
X = csc_matrix(iris.data, dtype=dtype)
y = iris.target
assert_array_equal(est.fit(X, y).predict(X), y)
# coo_matrix
X = coo_matrix(iris.data, dtype=dtype)
y = iris.target
assert_array_equal(est.fit(X, y).predict(X), y)
# Strided
X = np.asarray(iris.data[::3], dtype=dtype)
y = iris.target[::3]
assert_array_equal(est.fit(X, y).predict(X), y)
def test_memory_layout():
for name, dtype in product(FOREST_CLASSIFIERS, [np.float64, np.float32]):
yield check_memory_layout, name, dtype
for name, dtype in product(FOREST_REGRESSORS, [np.float64, np.float32]):
yield check_memory_layout, name, dtype
def check_1d_input(name, X, X_2d, y):
ForestEstimator = FOREST_ESTIMATORS[name]
assert_raises(ValueError, ForestEstimator(random_state=0).fit, X, y)
est = ForestEstimator(random_state=0)
est.fit(X_2d, y)
if name in FOREST_CLASSIFIERS or name in FOREST_REGRESSORS:
assert_raises(ValueError, est.predict, X)
def test_1d_input():
X = iris.data[:, 0].ravel()
X_2d = iris.data[:, 0].reshape((-1, 1))
y = iris.target
for name in FOREST_ESTIMATORS:
yield check_1d_input, name, X, X_2d, y
def check_warm_start(name, random_state=42):
"""Test if fitting incrementally with warm start gives a forest of the
right size and the same results as a normal fit."""
X, y = datasets.make_hastie_10_2(n_samples=20, random_state=1)
ForestEstimator = FOREST_ESTIMATORS[name]
clf_ws = None
for n_estimators in [5, 10]:
if clf_ws is None:
clf_ws = ForestEstimator(n_estimators=n_estimators,
random_state=random_state,
warm_start=True)
else:
clf_ws.set_params(n_estimators=n_estimators)
clf_ws.fit(X, y)
assert_equal(len(clf_ws), n_estimators)
clf_no_ws = ForestEstimator(n_estimators=10, random_state=random_state,
warm_start=False)
clf_no_ws.fit(X, y)
assert_equal(set([tree.random_state for tree in clf_ws]),
set([tree.random_state for tree in clf_no_ws]))
assert_array_equal(clf_ws.apply(X), clf_no_ws.apply(X),
err_msg="Failed with {0}".format(name))
def test_warm_start():
for name in FOREST_ESTIMATORS:
yield check_warm_start, name
def check_warm_start_clear(name):
"""Test if fit clears state and grows a new forest when warm_start==False.
"""
X, y = datasets.make_hastie_10_2(n_samples=20, random_state=1)
ForestEstimator = FOREST_ESTIMATORS[name]
clf = ForestEstimator(n_estimators=5, max_depth=1, warm_start=False,
random_state=1)
clf.fit(X, y)
clf_2 = ForestEstimator(n_estimators=5, max_depth=1, warm_start=True,
random_state=2)
clf_2.fit(X, y) # inits state
clf_2.set_params(warm_start=False, random_state=1)
clf_2.fit(X, y) # clears old state and equals clf
assert_array_almost_equal(clf_2.apply(X), clf.apply(X))
def test_warm_start_clear():
for name in FOREST_ESTIMATORS:
yield check_warm_start_clear, name
def check_warm_start_smaller_n_estimators(name):
"""Test if warm start second fit with smaller n_estimators raises error."""
X, y = datasets.make_hastie_10_2(n_samples=20, random_state=1)
ForestEstimator = FOREST_ESTIMATORS[name]
clf = ForestEstimator(n_estimators=5, max_depth=1, warm_start=True)
clf.fit(X, y)
clf.set_params(n_estimators=4)
assert_raises(ValueError, clf.fit, X, y)
def test_warm_start_smaller_n_estimators():
for name in FOREST_ESTIMATORS:
yield check_warm_start_smaller_n_estimators, name
def check_warm_start_equal_n_estimators(name):
"""Test if warm start with equal n_estimators does nothing and returns the
same forest and raises a warning."""
X, y = datasets.make_hastie_10_2(n_samples=20, random_state=1)
ForestEstimator = FOREST_ESTIMATORS[name]
clf = ForestEstimator(n_estimators=5, max_depth=3, warm_start=True,
random_state=1)
clf.fit(X, y)
clf_2 = ForestEstimator(n_estimators=5, max_depth=3, warm_start=True,
random_state=1)
clf_2.fit(X, y)
# Now clf_2 equals clf.
clf_2.set_params(random_state=2)
assert_warns(UserWarning, clf_2.fit, X, y)
# If we had fit the trees again we would have got a different forest as we
# changed the random state.
assert_array_equal(clf.apply(X), clf_2.apply(X))
def test_warm_start_equal_n_estimators():
for name in FOREST_ESTIMATORS:
yield check_warm_start_equal_n_estimators, name
def check_warm_start_oob(name):
"""Test that the warm start computes oob score when asked."""
X, y = datasets.make_hastie_10_2(n_samples=20, random_state=1)
ForestEstimator = FOREST_ESTIMATORS[name]
# Use 15 estimators to avoid 'some inputs do not have OOB scores' warning.
clf = ForestEstimator(n_estimators=15, max_depth=3, warm_start=False,
random_state=1, bootstrap=True, oob_score=True)
clf.fit(X, y)
clf_2 = ForestEstimator(n_estimators=5, max_depth=3, warm_start=False,
random_state=1, bootstrap=True, oob_score=False)
clf_2.fit(X, y)
clf_2.set_params(warm_start=True, oob_score=True, n_estimators=15)
clf_2.fit(X, y)
assert_true(hasattr(clf_2, 'oob_score_'))
assert_equal(clf.oob_score_, clf_2.oob_score_)
# Test that oob_score is computed even if we don't need to train
# additional trees.
clf_3 = ForestEstimator(n_estimators=15, max_depth=3, warm_start=True,
random_state=1, bootstrap=True, oob_score=False)
clf_3.fit(X, y)
assert_true(not(hasattr(clf_3, 'oob_score_')))
clf_3.set_params(oob_score=True)
ignore_warnings(clf_3.fit)(X, y)
assert_equal(clf.oob_score_, clf_3.oob_score_)
def test_warm_start_oob():
for name in FOREST_CLASSIFIERS:
yield check_warm_start_oob, name
for name in FOREST_REGRESSORS:
yield check_warm_start_oob, name
if __name__ == "__main__":
import nose
nose.runmodule()
| bsd-3-clause |
wkfwkf/statsmodels | statsmodels/sandbox/examples/ex_kaplan_meier.py | 33 | 2838 | #An example for the Kaplan-Meier estimator
from __future__ import print_function
from statsmodels.compat.python import lrange
import statsmodels.api as sm
import matplotlib.pyplot as plt
import numpy as np
from statsmodels.sandbox.survival2 import KaplanMeier
#Getting the strike data as an array
dta = sm.datasets.strikes.load()
print('basic data')
print('\n')
dta = list(dta.values()[-1])
print(dta[lrange(5),:])
print('\n')
#Create the KaplanMeier object and fit the model
km = KaplanMeier(dta,0)
km.fit()
#show the results
km.plot()
print('basic model')
print('\n')
km.summary()
print('\n')
#Mutiple survival curves
km2 = KaplanMeier(dta,0,exog=1)
km2.fit()
print('more than one curve')
print('\n')
km2.summary()
print('\n')
km2.plot()
#with censoring
censoring = np.ones_like(dta[:,0])
censoring[dta[:,0] > 80] = 0
dta = np.c_[dta,censoring]
print('with censoring')
print('\n')
print(dta[lrange(5),:])
print('\n')
km3 = KaplanMeier(dta,0,exog=1,censoring=2)
km3.fit()
km3.summary()
print('\n')
km3.plot()
#Test for difference of survival curves
log_rank = km3.test_diff([0.0645,-0.03957])
print('log rank test')
print('\n')
print(log_rank)
print('\n')
#The zeroth element of log_rank is the chi-square test statistic
#for the difference between the survival curves for exog = 0.0645
#and exog = -0.03957, the index one element is the degrees of freedom for
#the test, and the index two element is the p-value for the test
wilcoxon = km3.test_diff([0.0645,-0.03957], rho=1)
print('Wilcoxon')
print('\n')
print(wilcoxon)
print('\n')
#Same info as log_rank, but for Peto and Peto modification to the
#Gehan-Wilcoxon test
#User specified functions for tests
#A wider range of rates can be accessed by using the 'weight' parameter
#for the test_diff method
#For example, if the desire weights are S(t)*(1-S(t)), where S(t) is a pooled
#estimate for the survival function, this could be computed by doing
def weights(t):
#must accept one arguement, even though it is not used here
s = KaplanMeier(dta,0,censoring=2)
s.fit()
s = s.results[0][0]
s = s * (1 - s)
return s
#KaplanMeier provides an array of times to the weighting function
#internally, so the weighting function must accept one arguement
test = km3.test_diff([0.0645,-0.03957], weight=weights)
print('user specified weights')
print('\n')
print(test)
print('\n')
#Groups with nan names
#These can be handled by passing the data to KaplanMeier as an array of strings
groups = np.ones_like(dta[:,1])
groups = groups.astype('S4')
groups[dta[:,1] > 0] = 'high'
groups[dta[:,1] <= 0] = 'low'
dta = dta.astype('S4')
dta[:,1] = groups
print('with nan group names')
print('\n')
print(dta[lrange(5),:])
print('\n')
km4 = KaplanMeier(dta,0,exog=1,censoring=2)
km4.fit()
km4.summary()
print('\n')
km4.plot()
#show all the plots
plt.show()
| bsd-3-clause |
ajaybhat/scikit-image | skimage/feature/tests/test_util.py | 35 | 2818 | import numpy as np
try:
import matplotlib.pyplot as plt
except ImportError:
plt = None
from numpy.testing import assert_equal, assert_raises
from skimage.feature.util import (FeatureDetector, DescriptorExtractor,
_prepare_grayscale_input_2D,
_mask_border_keypoints, plot_matches)
def test_feature_detector():
assert_raises(NotImplementedError, FeatureDetector().detect, None)
def test_descriptor_extractor():
assert_raises(NotImplementedError, DescriptorExtractor().extract,
None, None)
def test_prepare_grayscale_input_2D():
assert_raises(ValueError, _prepare_grayscale_input_2D, np.zeros((3, 3, 3)))
assert_raises(ValueError, _prepare_grayscale_input_2D, np.zeros((3, 1)))
assert_raises(ValueError, _prepare_grayscale_input_2D, np.zeros((3, 1, 1)))
img = _prepare_grayscale_input_2D(np.zeros((3, 3)))
img = _prepare_grayscale_input_2D(np.zeros((3, 3, 1)))
img = _prepare_grayscale_input_2D(np.zeros((1, 3, 3)))
def test_mask_border_keypoints():
keypoints = np.array([[0, 0], [1, 1], [2, 2], [3, 3], [4, 4]])
assert_equal(_mask_border_keypoints((10, 10), keypoints, 0),
[1, 1, 1, 1, 1])
assert_equal(_mask_border_keypoints((10, 10), keypoints, 2),
[0, 0, 1, 1, 1])
assert_equal(_mask_border_keypoints((4, 4), keypoints, 2),
[0, 0, 1, 0, 0])
assert_equal(_mask_border_keypoints((10, 10), keypoints, 5),
[0, 0, 0, 0, 0])
assert_equal(_mask_border_keypoints((10, 10), keypoints, 4),
[0, 0, 0, 0, 1])
@np.testing.decorators.skipif(plt is None)
def test_plot_matches():
fig, ax = plt.subplots(nrows=1, ncols=1)
shapes = (((10, 10), (10, 10)),
((10, 10), (12, 10)),
((10, 10), (10, 12)),
((10, 10), (12, 12)),
((12, 10), (10, 10)),
((10, 12), (10, 10)),
((12, 12), (10, 10)))
keypoints1 = 10 * np.random.rand(10, 2)
keypoints2 = 10 * np.random.rand(10, 2)
idxs1 = np.random.randint(10, size=10)
idxs2 = np.random.randint(10, size=10)
matches = np.column_stack((idxs1, idxs2))
for shape1, shape2 in shapes:
img1 = np.zeros(shape1)
img2 = np.zeros(shape2)
plot_matches(ax, img1, img2, keypoints1, keypoints2, matches)
plot_matches(ax, img1, img2, keypoints1, keypoints2, matches,
only_matches=True)
plot_matches(ax, img1, img2, keypoints1, keypoints2, matches,
keypoints_color='r')
plot_matches(ax, img1, img2, keypoints1, keypoints2, matches,
matches_color='r')
if __name__ == '__main__':
from numpy import testing
testing.run_module_suite()
| bsd-3-clause |
Canas/kaftools | examples/klms_pretrained.py | 1 | 1192 | import matplotlib.pyplot as plt
import numpy as np
from kaftools.filters import KlmsFilter
from kaftools.kernels import GaussianKernel
from kaftools.utils.shortcuts import plot_series, plot_squared_error
from kaftools.sparsifiers import NoveltyCriterion
if __name__ == "__main__":
# Cargar datos
data = np.load('./data/pretrained_data_lorentz.npz')
# sparsify lorentz : lr(1e-2), novelty(0.99919, 1.0)
# sparsify wind: lr(1e-2), novelty(0.9934, 1.0)
# Configurar KLMS
klms_params = {
'kernel': GaussianKernel(sigma=float(data['sigma_k_post'])),
'learning_rate': 1e-1,
'delay': int(data['delay']),
#'sparsifiers': [NoveltyCriterion(0.99919, 1.0)]
'coefs': data['a_post'],
'dict': data['s_post'].T,
'freeze_dict': True
}
# np.seterr(all='raise')
klms = KlmsFilter(data['y_prog'], data['y_prog'])
klms.fit(**klms_params)
print(len(klms.support_vectors))
plot_series(data['y_prog'], klms.estimate, markersize=1, linewidth=1, figsize=(15, 3))
plot_squared_error(klms.error_history)
import matplotlib.pyplot as plt
#plt.semilogy(np.array(klms.error_history)**2)
#plt.show() | mit |
Statoil/libecl | python/ecl/summary/ecl_sum.py | 1 | 58477 | # Copyright (C) 2011 Equinor ASA, Norway.
#
# The file 'ecl_sum.py' is part of ERT - Ensemble based Reservoir Tool.
#
# ERT is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# ERT is distributed in the hope that it will be useful, but WITHOUT ANY
# WARRANTY; without even the implied warranty of MERCHANTABILITY or
# FITNESS FOR A PARTICULAR PURPOSE.
#
# See the GNU General Public License at <http://www.gnu.org/licenses/gpl.html>
# for more details.
"""
Module for loading and querying summary data.
The low-level organisation of summary data is extensively documented
in the C source files ecl_sum.c, ecl_smspec.c and ecl_sum_data in the
libecl/src directory.
"""
import warnings
import numpy
import datetime
import os.path
import ctypes
import pandas
import re
# Observe that there is some convention conflict with the C code
# regarding order of arguments: The C code generally takes the time
# index as the first argument and the key/key_index as second
# argument. In the python code this order has been reversed.
from cwrap import BaseCClass, CFILE
from ecl.util.util import monkey_the_camel
from ecl.util.util import StringList, CTime, DoubleVector, TimeVector, IntVector
from ecl.summary import EclSumTStep
from ecl.summary import EclSumVarType
from ecl.summary.ecl_sum_vector import EclSumVector
from ecl.summary.ecl_smspec_node import EclSMSPECNode
from ecl import EclPrototype, EclUnitTypeEnum
#, EclSumKeyWordVector
#import ecl.ecl_plot.sum_plot as sum_plot
# The date2num function is a verbatim copy of the _to_ordinalf()
# function from the matplotlib.dates module. Inserted here only to
# avoid importing the full matplotlib library. The date2num
# implementation could be replaced with:
#
# from matplotlib.dates import date2num
HOURS_PER_DAY = 24.0
MINUTES_PER_DAY = 60 * HOURS_PER_DAY
SECONDS_PER_DAY = 60 * MINUTES_PER_DAY
MUSECONDS_PER_DAY = 1e6 * SECONDS_PER_DAY
def date2num(dt):
"""
Convert a python datetime instance to UTC float days.
Convert datetime to the Gregorian date as UTC float days,
preserving hours, minutes, seconds and microseconds, return value
is a float. The function is a verbatim copy of the _to_ordinalf()
function from the matplotlib.dates module.
"""
if hasattr(dt, 'tzinfo') and dt.tzinfo is not None:
delta = dt.tzinfo.utcoffset(dt)
if delta is not None:
dt -= delta
base = float(dt.toordinal())
if hasattr(dt, 'hour'):
base += (dt.hour/HOURS_PER_DAY +
dt.minute/MINUTES_PER_DAY +
dt.second/SECONDS_PER_DAY +
dt.microsecond/MUSECONDS_PER_DAY)
return base
class EclSum(BaseCClass):
TYPE_NAME = "ecl_sum"
_fread_alloc_case2 = EclPrototype("void* ecl_sum_fread_alloc_case2__(char*, char*, bool, bool, int)", bind=False)
_fread_alloc = EclPrototype("void* ecl_sum_fread_alloc(char*, stringlist, char*, bool)", bind=False)
_create_restart_writer = EclPrototype("ecl_sum_obj ecl_sum_alloc_restart_writer2(char*, char*, int, bool, bool, char*, time_t, bool, int, int, int)", bind = False)
_create_writer = EclPrototype("ecl_sum_obj ecl_sum_alloc_writer(char*, bool, bool, char*, time_t, bool, int, int, int)", bind = False)
_resample = EclPrototype("ecl_sum_obj ecl_sum_alloc_resample( ecl_sum, char*, time_t_vector, bool, bool)")
_iiget = EclPrototype("double ecl_sum_iget(ecl_sum, int, int)")
_free = EclPrototype("void ecl_sum_free(ecl_sum)")
_data_length = EclPrototype("int ecl_sum_get_data_length(ecl_sum)")
_iget_sim_days = EclPrototype("double ecl_sum_iget_sim_days(ecl_sum, int) ")
_iget_report_step = EclPrototype("int ecl_sum_iget_report_step(ecl_sum, int) ")
_iget_sim_time = EclPrototype("time_t ecl_sum_iget_sim_time(ecl_sum, int) ")
_get_report_end = EclPrototype("int ecl_sum_iget_report_end(ecl_sum, int)")
_get_general_var = EclPrototype("double ecl_sum_get_general_var(ecl_sum, int, char*)")
_get_general_var_index = EclPrototype("int ecl_sum_get_general_var_params_index(ecl_sum, char*)")
_get_general_var_from_sim_days = EclPrototype("double ecl_sum_get_general_var_from_sim_days(ecl_sum, double, char*)")
_get_general_var_from_sim_time = EclPrototype("double ecl_sum_get_general_var_from_sim_time(ecl_sum, time_t, char*)")
_solve_days = EclPrototype("double_vector_obj ecl_sum_alloc_days_solution(ecl_sum, char*, double, bool)")
_solve_dates = EclPrototype("time_t_vector_obj ecl_sum_alloc_time_solution(ecl_sum, char*, double, bool)")
_get_first_gt = EclPrototype("int ecl_sum_get_first_gt(ecl_sum, int, double)")
_get_first_lt = EclPrototype("int ecl_sum_get_first_lt(ecl_sum, int, double)")
_get_start_date = EclPrototype("time_t ecl_sum_get_start_time(ecl_sum)")
_get_end_date = EclPrototype("time_t ecl_sum_get_end_time(ecl_sum)")
_get_last_report_step = EclPrototype("int ecl_sum_get_last_report_step(ecl_sum)")
_get_first_report_step = EclPrototype("int ecl_sum_get_first_report_step(ecl_sum)")
_select_matching_keys = EclPrototype("void ecl_sum_select_matching_general_var_list(ecl_sum, char*, stringlist)")
_has_key = EclPrototype("bool ecl_sum_has_general_var(ecl_sum, char*)")
_check_sim_time = EclPrototype("bool ecl_sum_check_sim_time(ecl_sum, time_t)")
_check_sim_days = EclPrototype("bool ecl_sum_check_sim_days(ecl_sum, double)")
_sim_length = EclPrototype("double ecl_sum_get_sim_length(ecl_sum)")
_get_first_day = EclPrototype("double ecl_sum_get_first_day(ecl_sum)")
_get_data_start = EclPrototype("time_t ecl_sum_get_data_start(ecl_sum)")
_get_unit = EclPrototype("char* ecl_sum_get_unit(ecl_sum, char*)")
_get_restart_case = EclPrototype("ecl_sum_ref ecl_sum_get_restart_case(ecl_sum)")
_get_restart_step = EclPrototype("int ecl_sum_get_restart_step(ecl_sum)")
_get_simcase = EclPrototype("char* ecl_sum_get_case(ecl_sum)")
_get_unit_system = EclPrototype("ecl_unit_enum ecl_sum_get_unit_system(ecl_sum)")
_get_base = EclPrototype("char* ecl_sum_get_base(ecl_sum)")
_get_path = EclPrototype("char* ecl_sum_get_path(ecl_sum)")
_get_abs_path = EclPrototype("char* ecl_sum_get_abs_path(ecl_sum)")
_get_report_step_from_time = EclPrototype("int ecl_sum_get_report_step_from_time(ecl_sum, time_t)")
_get_report_step_from_days = EclPrototype("int ecl_sum_get_report_step_from_days(ecl_sum, double)")
_get_report_time = EclPrototype("time_t ecl_sum_get_report_time(ecl_sum, int)")
_fwrite_sum = EclPrototype("void ecl_sum_fwrite(ecl_sum)")
_can_write = EclPrototype("bool ecl_sum_can_write(ecl_sum)")
_set_case = EclPrototype("void ecl_sum_set_case(ecl_sum, char*)")
_alloc_time_vector = EclPrototype("time_t_vector_obj ecl_sum_alloc_time_vector(ecl_sum, bool)")
_alloc_data_vector = EclPrototype("double_vector_obj ecl_sum_alloc_data_vector(ecl_sum, int, bool)")
_get_var_node = EclPrototype("smspec_node_ref ecl_sum_get_general_var_node(ecl_sum, char*)")
_create_well_list = EclPrototype("stringlist_obj ecl_sum_alloc_well_list(ecl_sum, char*)")
_create_group_list = EclPrototype("stringlist_obj ecl_sum_alloc_group_list(ecl_sum, char*)")
_add_variable = EclPrototype("smspec_node_ref ecl_sum_add_var(ecl_sum, char*, char*, int, char*, double)")
_add_tstep = EclPrototype("ecl_sum_tstep_ref ecl_sum_add_tstep(ecl_sum, int, double)")
_export_csv = EclPrototype("void ecl_sum_export_csv(ecl_sum, char*, stringlist, char*, char*)")
_identify_var_type = EclPrototype("ecl_sum_var_type ecl_sum_identify_var_type(char*)", bind = False)
_is_rate = EclPrototype("bool smspec_node_identify_rate(char*)", bind = False)
_is_total = EclPrototype("bool smspec_node_identify_total(char*, ecl_sum_var_type)", bind = False)
_get_last_value = EclPrototype("double ecl_sum_get_last_value_gen_key(ecl_sum, char*)")
_get_first_value = EclPrototype("double ecl_sum_get_first_value_gen_key(ecl_sum, char*)")
_init_numpy_vector = EclPrototype("void ecl_sum_init_double_vector(ecl_sum, char*, double*)")
_init_numpy_vector_interp = EclPrototype("void ecl_sum_init_double_vector_interp(ecl_sum, char*, time_t_vector, double*)")
_init_numpy_datetime64 = EclPrototype("void ecl_sum_init_datetime64_vector(ecl_sum, int64*, int)")
def __init__(self, load_case, join_string=":", include_restart=True, lazy_load=True, file_options=0):
"""Loads a new EclSum instance with summary data.
Loads a new summary results from the ECLIPSE case given by
argument @load_case; @load_case should be the basename of the ECLIPSE
simulation you want to load. @load_case can contain a leading path
component, and also an extension - the latter will be ignored.
The @join_string is the string used when combining elements
from the WGNAMES, KEYWORDS and NUMS vectors into a composit
key; with @join_string == ":" the water cut in well OP_1 will
be available as "WWCT:OP_1".
If the @include_restart parameter is set to true the summary
loader will, in the case of a restarted ECLIPSE simulation,
try to load summary results also from the restarted case.
If the @lazy_load parameter is set to true the loader will not load all
the data from a UNSMRY file at creation time, but wait until the data
is actually requested. This will reduce startup time and memory usage,
whereas getting a vector will be slower. When the summary data is split
over multiple CASE.Snnn files all the data will be loaded at
construction time, and the @lazy_load option is ignored. If the
lazy_load functionality is used the file_options intege flag is passed
when opening the UNSMRY file.
"""
if not load_case:
raise ValueError('load_case must be the basename of the simulation')
c_pointer = self._fread_alloc_case2(load_case, join_string, include_restart, lazy_load, file_options)
if c_pointer is None:
raise IOError("Failed to create summary instance from argument:%s" % load_case)
super(EclSum, self).__init__(c_pointer)
self._load_case = load_case
@classmethod
def load(cls, smspec_file, unsmry_file, key_join_string = ":", include_restart = True):
if not os.path.isfile( smspec_file ):
raise IOError("No such file: %s" % smspec_file)
if not os.path.isfile( unsmry_file ):
raise IOError("No such file: %s" % unsmry_file )
data_files = StringList( )
data_files.append( unsmry_file )
c_ptr = cls._fread_alloc(smspec_file, data_files, key_join_string, include_restart)
if c_ptr is None:
raise IOError("Failed to create summary instance")
ecl_sum = cls.createPythonObject( c_ptr )
ecl_sum._load_case = smspec_file
return ecl_sum
@classmethod
def createCReference(cls, c_pointer, parent=None):
result = super(EclSum, cls).createCReference(c_pointer, parent)
return result
@classmethod
def createPythonObject(cls, c_pointer):
result = super(EclSum, cls).createPythonObject(c_pointer)
return result
@staticmethod
def var_type(keyword):
return EclSum._identify_var_type(keyword)
@staticmethod
def is_rate(keyword):
return EclSum._is_rate(keyword)
@staticmethod
def is_total(keyword):
return EclSum._is_total(keyword, EclSum.var_type(keyword))
@staticmethod
def writer(case,
start_time,
nx,ny,nz,
fmt_output=False,
unified=True,
time_in_days=True,
key_join_string=":"):
"""
The writer is not generally usable.
@rtype: EclSum
"""
start = CTime(start_time)
smry = EclSum._create_writer(case,
fmt_output,
unified,
key_join_string,
start,
time_in_days,
nx,
ny,
nz)
smry._load_case = 'writer'
return smry
@staticmethod
def restart_writer(case,
restart_case,
restart_step,
start_time,
nx,ny,nz,
fmt_output=False,
unified=True,
time_in_days=True,
key_join_string=":"):
"""
The writer is not generally usable.
@rtype: EclSum
"""
start = CTime(start_time)
smry = EclSum._create_restart_writer(case,
restart_case,
restart_step,
fmt_output,
unified,
key_join_string,
start,
time_in_days,
nx,
ny,
nz)
smry._load_case = 'restart_writer'
return smry
def add_variable(self, variable, wgname=None, num=0, unit="None", default_value=0):
return self._add_variable(variable, wgname, num, unit, default_value).setParent(parent=self)
def add_t_step(self, report_step, sim_days):
""" @rtype: EclSumTStep """
# report_step int
if not isinstance(report_step, int):
raise TypeError('Parameter report_step should be int, was %r' % report_step)
try:
float(sim_days)
except TypeError:
raise TypeError('Parameter sim_days should be float, was %r' % sim_days)
sim_seconds = sim_days * 24 * 60 * 60
tstep = self._add_tstep(report_step, sim_seconds).setParent(parent=self)
return tstep
def get_vector(self, key, report_only=False):
"""
Will return EclSumVector according to @key.
Will raise exception KeyError if the summary object does not
have @key.
"""
warnings.warn("The method get_vector() has been deprecated, use numpy_vector() instead", DeprecationWarning)
self.assertKeyValid(key)
if report_only:
return EclSumVector(self, key, report_only=True)
else:
return EclSumVector(self, key)
def report_index_list(self):
"""
Internal function for working with report_steps.
"""
first_report = self.first_report
last_report = self.last_report
index_list = IntVector()
for report_step in range(first_report, last_report + 1):
time_index = self._get_report_end(report_step)
index_list.append(time_index)
return index_list
def wells(self, pattern=None):
"""
Will return a list of all the well names in case.
If the pattern variable is different from None only wells
matching the pattern will be returned; the matching is based
on fnmatch(), i.e. shell style wildcards.
"""
return self._create_well_list(pattern)
def groups(self, pattern=None):
"""
Will return a list of all the group names in case.
If the pattern variable is different from None only groups
matching the pattern will be returned; the matching is based
on fnmatch(), i.e. shell style wildcards.
"""
return self._create_group_list(pattern)
def get_values(self, key, report_only=False):
"""
Will return numpy vector of all values according to @key.
If the optional argument report_only is true only the values
corresponding to report steps are included. The method is
also available as the 'values' property of an EclSumVector
instance.
"""
warnings.warn("The method get_values() has been deprecated - use numpy_vector() instead.", DeprecationWarning)
if self.has_key(key):
key_index = self._get_general_var_index(key)
if report_only:
index_list = self.report_index_list()
values = numpy.zeros(len(index_list))
for i in range(len(index_list)):
time_index = index_list[i]
values[i] = self._iiget(time_index, key_index)
else:
length = self._data_length()
values = numpy.zeros(length)
for i in range(length):
values[i] = self._iiget(i, key_index)
return values
else:
raise KeyError("Summary object does not have key:%s" % key)
def _make_time_vector(self, time_index):
time_points = TimeVector()
for t in time_index:
time_points.append(t)
return time_points
def numpy_vector(self, key, time_index=None, report_only=False):
"""Will return numpy vector of all the values corresponding to @key.
The optional argument @time_index can be used to limit the time points
where you want evaluation. The time_index argument should be a list of
datetime instances. The values will be interpolated to the time points
given in the time_index vector. If the time points in the time_inedx
vector are outside of the simulated range you will get an extrapolated
value:
Rates -> 0
Not rate -> first or last simulated value.
The function will raise KeyError if the requested key does not exist.
If many keys are needed it will be faster to use the pandas_frame()
function.
If you set the optional argument report_only to True the you will only
get values at the report dates. Observe that passing report_only=True
can not be combined with a value for time_index, that will give you a
ValueError exception.
"""
if key not in self:
raise KeyError("No such key:%s" % key)
if report_only:
if time_index is None:
time_index = self.report_dates
else:
raise ValueError("Can not suuply both time_index and report_only=True")
if time_index is None:
np_vector = numpy.zeros(len(self))
self._init_numpy_vector(key ,np_vector.ctypes.data_as(ctypes.POINTER(ctypes.c_double)))
return np_vector
else:
time_vector = self._make_time_vector(time_index)
np_vector = numpy.zeros(len(time_vector))
self._init_numpy_vector_interp(key, time_vector, np_vector.ctypes.data_as(ctypes.POINTER(ctypes.c_double)))
return np_vector
@property
def numpy_dates(self):
"""
Will return numpy vector of numpy.datetime64() values for all the simulated timepoints.
"""
np_dates = numpy.zeros(len(self), dtype="datetime64[ms]")
self._init_numpy_datetime64(np_dates.ctypes.data_as(ctypes.POINTER(ctypes.c_int64)), 1000)
return np_dates
@property
def dates(self):
"""
Will return ordinary Python list of datetime.datetime() objects of simulated timepoints.
"""
np_dates = self.numpy_dates
return np_dates.tolist()
@property
def report_dates(self):
dates = []
if len(self):
for report in range(self.first_report,self.last_report + 1):
dates.append(self.get_report_time( report ))
return dates
def pandas_frame(self, time_index = None, column_keys = None):
"""Will create a pandas frame with summary data.
By default you will get all time points in the summary case, but by
using the time_index argument you can control which times you are
interested in. If you have supplied a time_index argument the data will
be interpolated to these time values. If the time points in the
time_index vector are outside of the simulated range you will get an
extrapolated value:
Rates -> 0
Not rate -> first or last simulated value.
By default the frame will contain all the summary vectors in the case,
but this can be controlled by using the column_keys argument. The
column_keys should be a list of strings, and each summary vector
matching one of the elements in the @column_keys will get a column in
the frame, you can use wildcards like "WWCT:*" and "*:OP". If you
supply a column_keys argument which does not resolve to any valid
summary keys you will get a ValueError exception.
sum = EclSum(case)
monthly_dates = sum.time_range(interval="1M")
data = sum.pandas_frame(time_index = monthly_dates, column_keys=["F*PT"])
FOPT FGPT FWPT
2010-01-01 100.7 200.0 25.0
2010-02-01 150.7 275.0 67.6
2010-03-01 276.7 310.6 67.0
2010-04-01 672.7 620.4 78.7
....
"""
from ecl.summary import EclSumKeyWordVector
if column_keys is None:
keywords = EclSumKeyWordVector(self, add_keywords = True)
else:
keywords = EclSumKeyWordVector(self)
for key in column_keys:
keywords.add_keywords(key)
if len(keywords) == 0:
raise ValueError("No valid key")
if time_index is None:
time_index = self.numpy_dates
data = numpy.zeros([len(time_index), len(keywords)])
EclSum._init_pandas_frame(self, keywords,data.ctypes.data_as(ctypes.POINTER(ctypes.c_double)))
else:
time_points = self._make_time_vector(time_index)
data = numpy.zeros([len(time_points), len(keywords)])
EclSum._init_pandas_frame_interp(self, keywords, time_points, data.ctypes.data_as(ctypes.POINTER(ctypes.c_double)))
frame = pandas.DataFrame(index = time_index, columns=list(keywords), data=data)
return frame
@staticmethod
def _compile_headers_list(headers, dims):
var_list = []
for key in headers:
lst = re.split(':', key)
kw = lst[0]
wgname = None
num = 0;
unit = "UNIT"
if len(lst) > 1:
nums = []
if lst[1][0].isdigit():
nums = re.split(',', lst[1])
else:
wgname = lst[1]
if len(lst) == 3:
nums = re.split(",", lst[2])
if len(nums) == 3:
i = int(nums[0])-1
j = int(nums[1])-1
k = int(nums[2])-1
if dims is None:
raise ValueError("For key %s When using indexing i,j,k you must supply a valid value for the dims argument" % key)
num = i + j * dims[0] + k * dims[0]*dims[1] + 1
elif len(nums) == 1:
num = int(nums[0])
var_list.append( [kw, wgname, num, unit] )
return var_list
@classmethod
def from_pandas(cls, case, frame, dims = None, headers = None):
start_time = frame.index[0]
var_list = []
if headers is None:
header_list = EclSum._compile_headers_list( frame.columns.values, dims )
else:
header_list = EclSum._compile_headers_list( headers, dims )
if dims is None:
dims = [1,1,1];
ecl_sum = EclSum.writer(case,
start_time.to_pydatetime(),
dims[0], dims[1], dims[2])
for kw, wgname, num, unit in header_list:
var_list.append( ecl_sum.addVariable( kw , wgname = wgname , num = num, unit =unit).getKey1() )
for i, time in enumerate(frame.index):
days = (time - start_time).days
t_step = ecl_sum.addTStep( i+1 , days )
for var in var_list:
t_step[var] = frame.iloc[i][var]
return ecl_sum
def get_key_index(self, key):
"""
Lookup parameter index of @key.
All the summary keys identified in the SMSPEC file have a
corresponding index which is used internally. This function
will return that index for input key @key, this can then be
used in subsequent calls to e.g. the iiget() method. This is a
minor optimization in the case of many lookups of the same
key:
sum = ecl.EclSum(case)
key_index = sum.get_key_index(key)
for time_index in range(sum.length):
value = sum.iiget(time_index, key_index)
Quite low-level function, should probably rather use a
EclSumVector based function?
"""
index = self._get_general_var_index(key)
if index >= 0:
return index
else:
return None
def last_value(self, key):
"""
Will return the last value corresponding to @key.
Typically useful to get the total production at end of
simulation:
total_production = sum.last_value("FOPT")
The alternative method 'last' will return a EclSumNode
instance with some extra time related information.
"""
if not key in self:
raise KeyError("No such key:%s" % key)
return self._get_last_value(key)
def first_value(self, key):
"""
Will return first value corresponding to @key.
"""
if not key in self:
raise KeyError("No such key:%s" % key)
return self._get_first_value(key)
def get_last_value(self,key):
warnings.warn("The function get_last_value() is deprecated, use last_value() instead",DeprecationWarning)
return self.last_value(key)
def get_last(self, key):
"""
Will return the last EclSumNode corresponding to @key.
If you are only interested in the final value, you can use the
last_value() method.
"""
return self[key].last
def iiget(self, time_index, key_index):
"""
Lookup a summary value based on naive @time_index and
@key_index.
The iiget() method will lookup a summary value based on the
'time' value give by @time_index (i.e. naive counting of
time steps starting at zero), and a key index given by
@key_index. The @key_index value will typically be obtained
with the get_key_index() method first.
This is a quite low level function, in most cases it will be
natural to go via e.g. an EclSumVector instance.
"""
return self._iiget(time_index, key_index)
def iget(self, key, time_index):
"""
Lookup summary value based on @time_index and key.
The @time_index value should be an integer [0,num_steps) and
@key should be string key. To get all the water cut values
from a well:
for time_index in range(sum.length):
wwct = sum.iget(time_index, "WWCT:W5")
This is a quite low level function, in most cases it will be
natural to go via e.g. an EclSumVector instance.
"""
return self._get_general_var(time_index, key)
def __len__(self):
"""
The number of timesteps in the dataset; the return when evaluating
len(case).
"""
return self._data_length()
def __contains__(self, key):
if self._has_key(key):
return True
else:
return False
def assert_key_valid(self, key):
if not key in self:
raise KeyError("The summary key:%s was not recognized" % key)
def __iter__(self):
return iter(self.keys())
def __getitem__(self, key):
"""
Implements [] operator - @key should be a summary key.
The returned value will be a EclSumVector instance.
"""
warnings.warn("The method the [] operator will change behaviour in the future. It will then return a plain numpy vector. You are advised to change to use the numpy_vector() method right away", DeprecationWarning)
return self.get_vector(key)
def scale_vector(self, key, scalar):
msg = """The function EclSum.scale_vector has been removed. As an alternative you
are advised to fetch vector as a numpy vector and then scale that yourself:
vec = ecl_sum.numpy_vector(key)
vec *= scalar
"""
raise NotImplementedError(msg)
def shift_vector(self, key, addend):
msg = """The function EclSum.shift_vector has been removed. As an alternative you
are advised to fetch vector as a numpy vector and then scale that yourself:
vec = ecl_sum.numpy_vector(key)
vec += scalar
"""
raise NotImplementedError(msg)
def check_sim_time(self, date):
"""
Will check if the input date is in the time span [sim_start, sim_end].
"""
if not isinstance(date, CTime):
date = CTime(date)
return self._check_sim_time(date)
def get_interp_direct(self,key, date):
if not isinstance(date, CTime):
date = CTime(date)
return self._get_general_var_from_sim_time(date, key)
def get_interp(self, key, days=None, date=None):
"""
Will lookup vector @key at time given by @days or @date.
Requiers exactly one input argument @days or @date; will raise
exception ValueError if this is not satisfied.
The method will check that the time argument is within the
time limits of the simulation; if else the method will raise
exception ValueError.
Also available as method get_interp() on the EclSumVector
class.
"""
self.assertKeyValid(key)
if days is None and date is None:
raise ValueError("Must supply either days or date")
if days is None:
t = CTime(date)
if self.check_sim_time(t):
return self._get_general_var_from_sim_time(t, key)
else:
raise ValueError("date:%s is outside range of simulation data" % date)
elif date is None:
if self._check_sim_days(days):
return self._get_general_var_from_sim_days(days, key)
else:
raise ValueError("days:%s is outside range of simulation: [%g,%g]" % (days, self.first_day, self.sim_length))
else:
raise ValueError("Must supply either days or date")
def get_interp_row(self, key_list, sim_time, invalid_value = -1):
ctime = CTime(sim_time)
data = DoubleVector( initial_size = len(key_list) , default_value = invalid_value)
EclSum._get_interp_vector(self, ctime, key_list, data)
return data
def time_range(self, start=None, end=None, interval="1Y", num_timestep = None, extend_end=True):
"""Will create a vector of timepoints based on the current case.
By default the timepoints will be regularly sampled based on the
interval given by the @interval string. Alternatively the total number
of timesteps can be specified, if the @num_timestep option is specified
that will take presedence.
"""
(num, timeUnit) = TimeVector.parseTimeUnit(interval)
if start is None:
start = self.getDataStartTime()
else:
if isinstance(start, datetime.date):
start = datetime.datetime(start.year, start.month, start.day, 0, 0, 0)
if start < self.getDataStartTime():
start = self.getDataStartTime()
if end is None:
end = self.getEndTime()
else:
if isinstance(end, datetime.date):
end = datetime.datetime(end.year, end.month, end.day, 0, 0, 0)
if end > self.getEndTime():
end = self.getEndTime()
if end < start:
raise ValueError("Invalid time interval start after end")
if not num_timestep is None:
return TimeVector.create_linear(CTime(start), CTime(end), num_timestep)
range_start = start
range_end = end
if not timeUnit == "d":
year1 = start.year
year2 = end.year
month1 = start.month
month2 = end.month
day1 = start.day
day2 = end.day
if extend_end:
if timeUnit == 'm':
if day2 > 1:
month2 += 1
if month2 == 13:
year2 += 1
month2 = 1
elif timeUnit == "y":
month1 = 1
if year2 > 1 or day2 > 1:
year2 += 1
month2 = 1
day1 = 1
day2 = 1
range_start = datetime.date(year1, month1, day1)
range_end = datetime.date(year2, month2, day2)
trange = TimeVector.createRegular(range_start, range_end, interval)
# If the simulation does not start at the first of the month
# the start value will be before the simulation start; we
# manually shift the first element in the trange to the start
# value; the same for the end of list.
if trange[-1] < end:
if extend_end:
trange.appendTime(num, timeUnit)
else:
trange.append(end)
data_start = self.getDataStartTime()
if trange[0] < data_start:
trange[0] = CTime(data_start)
return trange
def blocked_production(self, totalKey, timeRange):
node = self.smspec_node(totalKey)
if node.isTotal():
total = DoubleVector()
for t in timeRange:
if t < CTime(self.start_time):
total.append(0)
elif t >= CTime(self.end_time):
total.append(self.last_value(totalKey))
else:
total.append(self.get_interp(totalKey, date=t))
tmp = total << 1
total.pop()
return tmp - total
else:
raise TypeError("The blockedProduction method must be called with one of the TOTAL keys like e.g. FOPT or GWIT")
def get_report(self, date=None, days=None):
"""
Will return the report step corresponding to input @date or @days.
If the input argument does not correspond to any report steps
the function will return -1. Observe that the function
requires strict equality.
"""
if date:
if days:
raise ValueError("Must supply either days or date")
step = self._get_report_step_from_time(CTime(date))
elif days:
step = self._get_report_step_from_days(days)
return step
def get_report_time(self, report):
"""
Will return the datetime corresponding to the report_step @report.
"""
return CTime(self._get_report_time(report)).date()
def get_interp_vector(self, key, days_list=None, date_list=None):
"""
Will return numpy vector with interpolated values.
Requiers exactly one input argument @days or @date; will raise
exception ValueError if this is not satisfied.
The method will check that the time arguments are within the
time limits of the simulation; if else the method will raise
exception ValueError.
Also available as method get_interp_vector() on the
EclSumVector class.
"""
self.assertKeyValid(key)
if days_list:
if date_list:
raise ValueError("Must supply either days_list or date_list")
else:
vector = numpy.zeros(len(days_list))
sim_length = self.sim_length
sim_start = self.first_day
index = 0
for days in days_list:
if (days >= sim_start) and (days <= sim_length):
vector[index] = self._get_general_var_from_sim_days(days, key)
else:
raise ValueError("Invalid days value")
index += 1
elif date_list:
start_time = self.data_start
end_time = self.end_date
vector = numpy.zeros(len(date_list))
index = 0
for date in date_list:
ct = CTime(date)
if start_time <= ct <= end_time:
vector[index] = self._get_general_var_from_sim_time(ct, key)
else:
raise ValueError("Invalid date value")
index += 1
else:
raise ValueError("Must supply either days_list or date_list")
return vector
def get_from_report(self, key, report_step):
"""
Return summary value of @key at time @report_step.
"""
time_index = self._get_report_end(report_step)
return self._get_general_var(time_index, key)
def has_key(self, key):
"""
Check if summary object has key @key.
"""
return key in self
def smspec_node(self, key):
"""
Will return a EclSMSPECNode instance corresponding to @key.
The returned EclSMPECNode instance can then be used to ask for
various properties of the variable; i.e. if it is a rate
variable, what is the unit, if it is a total variable and so
on.
"""
if self.has_key(key):
node = self._get_var_node(key).setParent(self)
return node
else:
raise KeyError("Summary case does not have key:%s" % key)
def unit(self, key):
"""
Will return the unit of @key.
"""
node = self.smspec_node(key)
return node.unit
@property
def unit_system(self):
"""
Will return the unit system in use for this case.
"""
return self._get_unit_system()
@property
def case(self):
"""
Will return the case name of the current instance - optionally including path.
"""
return self._get_simcase()
@property
def restart_step(self):
"""
Will return the report step this case has been restarted from, or -1.
"""
return self._get_restart_step()
@property
def restart_case(self):
restart_case = self._get_restart_case()
if restart_case:
restart_case.setParent(parent=self)
return restart_case
@property
def path(self):
"""
Will return the path to the current case. Will be None for
case in CWD. See also abs_path.
"""
return self._get_path()
@property
def base(self):
"""
Will return the basename of the current case - no path.
"""
return self._get_base()
@property
def abs_path(self):
"""
Will return the absolute path to the current case.
"""
return self._get_abs_path()
#-----------------------------------------------------------------
# Here comes functions for getting vectors of the time
# dimension. All the get_xxx() functions have an optional boolean
# argument @report_only. If this argument is set to True the
# functions will return time vectors only corresponding to the
# report times.
#
# In addition to the get_xxx() methods there are properties with
# the same name (excluding the 'get'); these properties correspond
# to an get_xxx() invocation with optional argument report_only
# set to False (i.e. the defualt).
@property
def days(self):
"""
Will return a numpy vector of simulations days.
"""
return self.get_days(False)
def get_days(self, report_only=False):
"""
Will return a numpy vector of simulations days.
If the optional argument @report_only is set to True, only
'days' values corresponding to report steps will be included.
"""
if report_only:
dates = self.report_dates
start_date = self.data_start
start = datetime.date(start_date.year, start_date.month, start_date.day)
return [ (x - start).total_seconds( ) / 86400 for x in dates ]
else:
return [ self._iget_sim_days(index) for index in range(len(self)) ]
def get_dates(self, report_only=False):
"""
Will return a list of simulation dates.
The list will be an ordinary Python list, and the dates will
be in terms ordinary Python datetime values. If the optional
argument @report_only is set to True, only dates corresponding
to report steps will be included.
"""
if report_only:
return self.report_dates
else:
return self.dates
@property
def mpl_dates(self):
"""
Will return a numpy vector of dates ready for matplotlib
The content of the vector are dates in matplotlib format,
i.e. floats - generated by the date2num() function at the top
of this file.
"""
warnings.warn("The mpl_dates property has been deprecated - use numpy_dates instead", DeprecationWarning)
return self.get_mpl_dates(False)
def get_mpl_dates(self, report_only=False):
"""
Will return a numpy vector of dates ready for matplotlib
If the optional argument @report_only is set to True, only
dates values corresponding to report steps will be
included. The content of the vector are dates in matplotlib
format, i.e. floats - generated by the date2num() function at
the top of this file.
"""
warnings.warn("The get_mpl_dates( ) method has been deprecated - use numpy_dates instead", DeprecationWarning)
if report_only:
return [ date2num(dt) for dt in self.report_dates ]
else:
return [date2num(dt) for dt in self.dates]
@property
def report_step(self):
"""
Will return a list of report steps.
The simulator will typically use several simulation timesteps
for each report step, and the number will change between
different report steps. So - assuming that the first report
step one has five simulations timesteps and the next two have
three the report_step vector can look like:
[...,1,1,1,1,1,2,2,2,3,3,3,....]
"""
return self.get_report_step(False)
def get_report_step(self, report_only=False):
if report_only:
report_steps = list(range(self.first_report, self.last_report + 1))
else:
report_steps = []
for index in range(len(self)):
report_steps.append( self._iget_report_step(index) )
return report_steps
#-----------------------------------------------------------------
def iget_days(self, time_index):
"""
Returns the number of simulation days for element nr @time_index.
"""
return self._iget_sim_days(time_index)
def iget_date(self, time_index):
"""
Returns the simulation date for element nr @time_index.
"""
long_time = self._iget_sim_time(time_index)
ct = CTime(long_time)
return ct.datetime()
def iget_report(self, time_index):
"""
Returns the report step corresponding to @time_index.
One report step will in general contain many ministeps.
"""
return self._iget_report_step(time_index)
@property
def length(self):
"""
The number of timesteps in the dataset.
"""
return self._data_length()
@property
def first_day(self):
"""
The first day we have simulation data for; normally 0.
"""
return self._get_first_day()
@property
def sim_length(self):
"""Will return the total time span for the simulation data.
The lengt will be returned in time unit used in the simulation data;
i.e. typically days.
"""
return self.getSimulationLength()
@property
def start_date(self):
"""
A Python date instance with the start date.
The start time is taken from the SMSPEC file, and in case not
all timesteps have been loaded, e.g. for a restarted case, the
returned start_date might be different from the datetime of
the first (loaded) timestep.
"""
ct = self._get_start_date()
return CTime(ct).date()
@property
def end_date(self):
"""
The date of the last (loaded) time step.
"""
return CTime(self._get_end_date()).date()
@property
def data_start(self):
return self.getDataStartTime()
@property
def end_time(self):
"""
The time of the last (loaded) time step.
"""
return self.getEndTime()
@property
def start_time(self):
return self.getStartTime()
def get_data_start_time(self):
"""The first date we have data for.
Thiw will mostly be equal to getStartTime(), but in the case
of restarts, where the case we have restarted from is not
found, this time will be later than the true start of the
field.
"""
return CTime(self._get_data_start()).datetime()
def get_start_time(self):
"""
A Python datetime instance with the start time.
See start_date() for further details.
"""
return CTime(self._get_start_date()).datetime()
def get_end_time(self):
"""
A Python datetime instance with the last loaded time.
"""
return CTime(self._get_end_date()).datetime()
def getSimulationLength(self):
"""
The length of the current dataset in simulation days.
Will include the length of a leading restart section,
irrespective of whether we have data for this or not.
"""
return self._sim_length()
@property
def last_report(self):
"""
The number of the last report step in the dataset.
"""
return self._get_last_report_step()
@property
def first_report(self):
"""
The number of the first report step in the dataset.
"""
return self._get_first_report_step()
def first_gt_index(self, key, limit):
"""
Returns the first index where @key is above @limit.
"""
key_index = self._get_general_var_index(key)
time_index = self._get_first_gt(key_index, limit)
return time_index
def first_lt_index(self, key, limit):
"""
Returns the first index where @key is below @limit.
"""
key_index = self._get_general_var_index(key)
time_index = self._get_first_lt(key_index, limit)
return time_index
def first_gt(self, key, limit):
"""
First EclSumNode of @key which is above @limit.
"""
vector = self[key]
return vector.first_gt(limit)
def first_lt(self, key, limit):
"""
First EclSumNode of @key which is below @limit.
"""
vector = self[key]
return vector.first_lt(limit)
def solve_dates(self, key, value, rates_clamp_lower=True):
"""Will solve the equation vector[@key] == value for dates.
See solveDays() for further details.
"""
if not key in self:
raise KeyError("Unrecognized key:%s" % key)
if len(self) < 2:
raise ValueError("Must have at least two elements to start solving")
return [ x.datetime() for x in self._solve_dates(key, value, rates_clamp_lower)]
def solve_days(self, key, value, rates_clamp_lower=True):
"""Will solve the equation vector[@key] == value.
This method will solve find tha approximate simulation days
where the vector @key is equal @value. The method will return
a list of values, which can have zero, one or multiple values:
case = EclSum("CASE")
days = case.solveDays("RPR:2", 200)
if len(days) == 0:
print("Pressure was never equal to 200 BARSA")
elif len(days) == 1:
print("Pressure equal to 200 BARSA after %s simulation days" % days[0])
else:
print("Pressure equal to 200 BARSA multiple times")
for index,day in enumerate(days):
print("Solution[%d] : %s days" % (index, day))
For variables like pressure and total volumes the solution is
based on straightforward linear interpolation between the
simulated values; that is quite intuitive. However - rates is
less intuitive, and how a rate like FOPR is handled can be
surprising:
Fundamentally the simulator works with *volumes*. Assume that
the simulator calculates that between the times t1 and t2 the
total volume of oil produced is V, then the oil production
rate is given as:
FOPR = V / (t2 - t1)
This is the average production rate in the timespan (t1,t2];
the simulator does not have any information on a finer time
scale than this - so the natural assumption is that the
production is constant at this value for the whole time
period. The logical consequence of this is that production
rates should be visualized as a piecewise constant function:
A B
| |
/|\ OPR | |
| \|/ \|/
|
| +============X
| | |
|-------------------------------------------------- X
| | |
| +=============X
| |
| | +===========X
|=========X |
|
+---------+-------------+------------+-----------+-->
t0 t1 t2 t3 t4 time
This figure shows a plot of the OPR as a piecewise constant
function. In a strict mathematical sense the equation:
OPR = X
Does not have a solution at all, but since this inequality:
OPR(t2) < X < OPR(t3)
it is natural to say that the equation has a solution. The
default behaviour is to say that the (first) solution in this
case is:
tx = t2 + epsilon
corresponding to the arrow 'A' on the figure. Alternatively if
you set the optional argument 'rates_clamp_lower' to false the
method will find the solution:
tx = t3
corresponding to the arrow 'B* in the figure.
"""
if not key in self:
raise KeyError("Unrecognized key:%s" % key)
if len(self) < 2:
raise ValueError("Must have at least two elements to start solving")
return self._solve_days(key, value, rates_clamp_lower)
def keys(self, pattern=None):
"""
Return a StringList of summary keys matching @pattern.
The matching algorithm is ultimately based on the fnmatch()
function, i.e. normal shell-character syntax is used. With
@pattern == "WWCT:*" you will get a list of watercut keys for
all wells.
If pattern is None you will get all the keys of summary
object.
"""
s = StringList()
self._select_matching_keys(pattern, s)
return s
def can_write(self):
return self._can_write( )
def fwrite(self, ecl_case=None):
if not self.can_write():
raise NotImplementedError("Write method is not implemented for this case. lazy_load=True??")
if ecl_case:
self._set_case(ecl_case)
self._fwrite_sum()
def alloc_time_vector(self, report_only):
return self._alloc_time_vector(report_only)
def alloc_data_vector(self, data_index, report_only):
return self._alloc_data_vector(data_index, report_only)
def get_general_var_index(self, key):
return self._get_general_var_index(key)
def free(self):
self._free()
def _nicename(self):
"""load_case is often full path to summary file,
if so, output basename, else name
"""
name = self._load_case
if name and os.path.isfile(name):
name = os.path.basename(name)
return name
def __repr__(self):
"""Returns, e.g.
EclSum("NORNE_ATW2013.UNSMRY", [1997-11-06 00:00:00, 2006-12-01 00:00:00], keys=3781) at 0x1609e20
"""
name = self._nicename()
s_time = self.getStartTime()
e_time = self.getEndTime()
num_keys = len(self.keys())
content = 'name="%s", time=[%s, %s], keys=%d' % (name, s_time, e_time, num_keys)
return self._create_repr(content)
def dump_csv_line(self, time, keywords, pfile):
"""
Will dump a csv formatted line of the keywords in @keywords,
evaluated at the intertpolated time @time. @pfile should point to an open Python file handle.
"""
cfile = CFILE(pfile)
ctime = CTime(time)
EclSum._dump_csv_line(self, ctime, keywords, cfile)
def export_csv(self, filename, keys=None, date_format="%Y-%m-%d", sep=";"):
"""Will create a CSV file with summary data.
By default all the vectors in the summary case will be
exported, but by using the optional keys parameter you can
limit the keys which are exported:
ecl_sum = EclSum("CASE")
ecl_sum.exportCSV("case.csv", keys=["W*:OP1", "W*:OP2", "F*T"])
Will export all well related variables for wells 'OP1' and
'OP2' and all total field vectors.
"""
if keys is None:
var_list = self.keys()
else:
var_list = StringList()
for key in keys:
var_list |= self.keys(pattern=key)
self._export_csv(filename, var_list, date_format, sep)
def resample(self, new_case_name, time_points, lower_extrapolation=False, upper_extrapolation=False):
new_case = self._resample(new_case_name, time_points, lower_extrapolation, upper_extrapolation)
if new_case is None:
raise ValueError("Failed to create new resampled case:{}".format(new_case_name))
return new_case
import ecl.summary.ecl_sum_keyword_vector
EclSum._dump_csv_line = EclPrototype("void ecl_sum_fwrite_interp_csv_line(ecl_sum, time_t, ecl_sum_vector, FILE)", bind=False)
EclSum._get_interp_vector = EclPrototype("void ecl_sum_get_interp_vector(ecl_sum, time_t, ecl_sum_vector, double_vector)", bind=False)
EclSum._init_pandas_frame = EclPrototype("void ecl_sum_init_double_frame(ecl_sum, ecl_sum_vector, double*)", bind=False)
EclSum._init_pandas_frame_interp = EclPrototype("void ecl_sum_init_double_frame_interp(ecl_sum, ecl_sum_vector, time_t_vector, double*)", bind=False)
monkey_the_camel(EclSum, 'varType', EclSum.var_type, classmethod)
monkey_the_camel(EclSum, 'addVariable', EclSum.add_variable)
monkey_the_camel(EclSum, 'addTStep', EclSum.add_t_step)
monkey_the_camel(EclSum, 'assertKeyValid', EclSum.assert_key_valid)
monkey_the_camel(EclSum, 'scaleVector', EclSum.scale_vector)
monkey_the_camel(EclSum, 'shiftVector', EclSum.shift_vector)
monkey_the_camel(EclSum, 'timeRange', EclSum.time_range)
monkey_the_camel(EclSum, 'blockedProduction', EclSum.blocked_production)
monkey_the_camel(EclSum, 'getDataStartTime', EclSum.get_data_start_time)
monkey_the_camel(EclSum, 'getStartTime', EclSum.get_start_time)
monkey_the_camel(EclSum, 'getEndTime', EclSum.get_end_time)
monkey_the_camel(EclSum, 'solveDates', EclSum.solve_dates)
monkey_the_camel(EclSum, 'solveDays', EclSum.solve_days)
monkey_the_camel(EclSum, 'dumpCSVLine', EclSum.dump_csv_line)
monkey_the_camel(EclSum, 'exportCSV', EclSum.export_csv)
| gpl-3.0 |
Sentient07/scikit-learn | examples/tree/plot_unveil_tree_structure.py | 13 | 4839 | """
=========================================
Understanding the decision tree structure
=========================================
The decision tree structure can be analysed to gain further insight on the
relation between the features and the target to predict. In this example, we
show how to retrieve:
- the binary tree structure;
- the depth of each node and whether or not it's a leaf;
- the nodes that were reached by a sample using the ``decision_path`` method;
- the leaf that was reached by a sample using the apply method;
- the rules that were used to predict a sample;
- the decision path shared by a group of samples.
"""
import numpy as np
from sklearn.model_selection import train_test_split
from sklearn.datasets import load_iris
from sklearn.tree import DecisionTreeClassifier
iris = load_iris()
X = iris.data
y = iris.target
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=0)
estimator = DecisionTreeClassifier(max_leaf_nodes=3, random_state=0)
estimator.fit(X_train, y_train)
# The decision estimator has an attribute called tree_ which stores the entire
# tree structure and allows access to low level attributes. The binary tree
# tree_ is represented as a number of parallel arrays. The i-th element of each
# array holds information about the node `i`. Node 0 is the tree's root. NOTE:
# Some of the arrays only apply to either leaves or split nodes, resp. In this
# case the values of nodes of the other type are arbitrary!
#
# Among those arrays, we have:
# - left_child, id of the left child of the node
# - right_child, id of the right child of the node
# - feature, feature used for splitting the node
# - threshold, threshold value at the node
#
# Using those arrays, we can parse the tree structure:
n_nodes = estimator.tree_.node_count
children_left = estimator.tree_.children_left
children_right = estimator.tree_.children_right
feature = estimator.tree_.feature
threshold = estimator.tree_.threshold
# The tree structure can be traversed to compute various properties such
# as the depth of each node and whether or not it is a leaf.
node_depth = np.zeros(shape=n_nodes, dtype=np.int64)
is_leaves = np.zeros(shape=n_nodes, dtype=bool)
stack = [(0, -1)] # seed is the root node id and its parent depth
while len(stack) > 0:
node_id, parent_depth = stack.pop()
node_depth[node_id] = parent_depth + 1
# If we have a test node
if (children_left[node_id] != children_right[node_id]):
stack.append((children_left[node_id], parent_depth + 1))
stack.append((children_right[node_id], parent_depth + 1))
else:
is_leaves[node_id] = True
print("The binary tree structure has %s nodes and has "
"the following tree structure:"
% n_nodes)
for i in range(n_nodes):
if is_leaves[i]:
print("%snode=%s leaf node." % (node_depth[i] * "\t", i))
else:
print("%snode=%s test node: go to node %s if X[:, %s] <= %s else to "
"node %s."
% (node_depth[i] * "\t",
i,
children_left[i],
feature[i],
threshold[i],
children_right[i],
))
print()
# First let's retrieve the decision path of each sample. The decision_path
# method allows to retrieve the node indicator functions. A non zero element of
# indicator matrix at the position (i, j) indicates that the sample i goes
# through the node j.
node_indicator = estimator.decision_path(X_test)
# Similarly, we can also have the leaves ids reached by each sample.
leave_id = estimator.apply(X_test)
# Now, it's possible to get the tests that were used to predict a sample or
# a group of samples. First, let's make it for the sample.
sample_id = 0
node_index = node_indicator.indices[node_indicator.indptr[sample_id]:
node_indicator.indptr[sample_id + 1]]
print('Rules used to predict sample %s: ' % sample_id)
for node_id in node_index:
if leave_id[sample_id] != node_id:
continue
if (X_test[sample_id, feature[node_id]] <= threshold[node_id]):
threshold_sign = "<="
else:
threshold_sign = ">"
print("decision id node %s : (X[%s, %s] (= %s) %s %s)"
% (node_id,
sample_id,
feature[node_id],
X_test[i, feature[node_id]],
threshold_sign,
threshold[node_id]))
# For a group of samples, we have the following common node.
sample_ids = [0, 1]
common_nodes = (node_indicator.toarray()[sample_ids].sum(axis=0) ==
len(sample_ids))
common_node_id = np.arange(n_nodes)[common_nodes]
print("\nThe following samples %s share the node %s in the tree"
% (sample_ids, common_node_id))
print("It is %s %% of all nodes." % (100 * len(common_node_id) / n_nodes,))
| bsd-3-clause |
pydata/xarray | xarray/coding/times.py | 1 | 19732 | import re
import warnings
from datetime import datetime, timedelta
from distutils.version import LooseVersion
from functools import partial
import numpy as np
import pandas as pd
from pandas.errors import OutOfBoundsDatetime
from ..core import indexing
from ..core.common import contains_cftime_datetimes
from ..core.formatting import first_n_items, format_timestamp, last_item
from ..core.variable import Variable
from .variables import (
SerializationWarning,
VariableCoder,
lazy_elemwise_func,
pop_to,
safe_setitem,
unpack_for_decoding,
unpack_for_encoding,
)
# standard calendars recognized by cftime
_STANDARD_CALENDARS = {"standard", "gregorian", "proleptic_gregorian"}
_NS_PER_TIME_DELTA = {
"ns": 1,
"us": int(1e3),
"ms": int(1e6),
"s": int(1e9),
"m": int(1e9) * 60,
"h": int(1e9) * 60 * 60,
"D": int(1e9) * 60 * 60 * 24,
}
_US_PER_TIME_DELTA = {
"microseconds": 1,
"milliseconds": 1_000,
"seconds": 1_000_000,
"minutes": 60 * 1_000_000,
"hours": 60 * 60 * 1_000_000,
"days": 24 * 60 * 60 * 1_000_000,
}
_NETCDF_TIME_UNITS_CFTIME = [
"days",
"hours",
"minutes",
"seconds",
"milliseconds",
"microseconds",
]
_NETCDF_TIME_UNITS_NUMPY = _NETCDF_TIME_UNITS_CFTIME + ["nanoseconds"]
TIME_UNITS = frozenset(
[
"days",
"hours",
"minutes",
"seconds",
"milliseconds",
"microseconds",
"nanoseconds",
]
)
def _is_standard_calendar(calendar):
return calendar.lower() in _STANDARD_CALENDARS
def _netcdf_to_numpy_timeunit(units):
units = units.lower()
if not units.endswith("s"):
units = f"{units}s"
return {
"nanoseconds": "ns",
"microseconds": "us",
"milliseconds": "ms",
"seconds": "s",
"minutes": "m",
"hours": "h",
"days": "D",
}[units]
def _ensure_padded_year(ref_date):
# Reference dates without a padded year (e.g. since 1-1-1 or since 2-3-4)
# are ambiguous (is it YMD or DMY?). This can lead to some very odd
# behaviour e.g. pandas (via dateutil) passes '1-1-1 00:00:0.0' as
# '2001-01-01 00:00:00' (because it assumes a) DMY and b) that year 1 is
# shorthand for 2001 (like 02 would be shorthand for year 2002)).
# Here we ensure that there is always a four-digit year, with the
# assumption being that year comes first if we get something ambiguous.
matches_year = re.match(r".*\d{4}.*", ref_date)
if matches_year:
# all good, return
return ref_date
# No four-digit strings, assume the first digits are the year and pad
# appropriately
matches_start_digits = re.match(r"(\d+)(.*)", ref_date)
if not matches_start_digits:
raise ValueError(f"invalid reference date for time units: {ref_date}")
ref_year, everything_else = [s for s in matches_start_digits.groups()]
ref_date_padded = "{:04d}{}".format(int(ref_year), everything_else)
warning_msg = (
f"Ambiguous reference date string: {ref_date}. The first value is "
"assumed to be the year hence will be padded with zeros to remove "
f"the ambiguity (the padded reference date string is: {ref_date_padded}). "
"To remove this message, remove the ambiguity by padding your reference "
"date strings with zeros."
)
warnings.warn(warning_msg, SerializationWarning)
return ref_date_padded
def _unpack_netcdf_time_units(units):
# CF datetime units follow the format: "UNIT since DATE"
# this parses out the unit and date allowing for extraneous
# whitespace. It also ensures that the year is padded with zeros
# so it will be correctly understood by pandas (via dateutil).
matches = re.match(r"(.+) since (.+)", units)
if not matches:
raise ValueError(f"invalid time units: {units}")
delta_units, ref_date = [s.strip() for s in matches.groups()]
ref_date = _ensure_padded_year(ref_date)
return delta_units, ref_date
def _decode_cf_datetime_dtype(data, units, calendar, use_cftime):
# Verify that at least the first and last date can be decoded
# successfully. Otherwise, tracebacks end up swallowed by
# Dataset.__repr__ when users try to view their lazily decoded array.
values = indexing.ImplicitToExplicitIndexingAdapter(indexing.as_indexable(data))
example_value = np.concatenate(
[first_n_items(values, 1) or [0], last_item(values) or [0]]
)
try:
result = decode_cf_datetime(example_value, units, calendar, use_cftime)
except Exception:
calendar_msg = (
"the default calendar" if calendar is None else f"calendar {calendar!r}"
)
msg = (
f"unable to decode time units {units!r} with {calendar_msg!r}. Try "
"opening your dataset with decode_times=False or installing cftime "
"if it is not installed."
)
raise ValueError(msg)
else:
dtype = getattr(result, "dtype", np.dtype("object"))
return dtype
def _decode_datetime_with_cftime(num_dates, units, calendar):
import cftime
return np.asarray(
cftime.num2date(num_dates, units, calendar, only_use_cftime_datetimes=True)
)
def _decode_datetime_with_pandas(flat_num_dates, units, calendar):
if not _is_standard_calendar(calendar):
raise OutOfBoundsDatetime(
"Cannot decode times from a non-standard calendar, {!r}, using "
"pandas.".format(calendar)
)
delta, ref_date = _unpack_netcdf_time_units(units)
delta = _netcdf_to_numpy_timeunit(delta)
try:
ref_date = pd.Timestamp(ref_date)
except ValueError:
# ValueError is raised by pd.Timestamp for non-ISO timestamp
# strings, in which case we fall back to using cftime
raise OutOfBoundsDatetime
with warnings.catch_warnings():
warnings.filterwarnings("ignore", "invalid value encountered", RuntimeWarning)
pd.to_timedelta(flat_num_dates.min(), delta) + ref_date
pd.to_timedelta(flat_num_dates.max(), delta) + ref_date
# To avoid integer overflow when converting to nanosecond units for integer
# dtypes smaller than np.int64 cast all integer-dtype arrays to np.int64
# (GH 2002).
if flat_num_dates.dtype.kind == "i":
flat_num_dates = flat_num_dates.astype(np.int64)
# Cast input ordinals to integers of nanoseconds because pd.to_timedelta
# works much faster when dealing with integers (GH 1399).
flat_num_dates_ns_int = (flat_num_dates * _NS_PER_TIME_DELTA[delta]).astype(
np.int64
)
# Use pd.to_timedelta to safely cast integer values to timedeltas,
# and add those to a Timestamp to safely produce a DatetimeIndex. This
# ensures that we do not encounter integer overflow at any point in the
# process without raising OutOfBoundsDatetime.
return (pd.to_timedelta(flat_num_dates_ns_int, "ns") + ref_date).values
def decode_cf_datetime(num_dates, units, calendar=None, use_cftime=None):
"""Given an array of numeric dates in netCDF format, convert it into a
numpy array of date time objects.
For standard (Gregorian) calendars, this function uses vectorized
operations, which makes it much faster than cftime.num2date. In such a
case, the returned array will be of type np.datetime64.
Note that time unit in `units` must not be smaller than microseconds and
not larger than days.
See Also
--------
cftime.num2date
"""
num_dates = np.asarray(num_dates)
flat_num_dates = num_dates.ravel()
if calendar is None:
calendar = "standard"
if use_cftime is None:
try:
dates = _decode_datetime_with_pandas(flat_num_dates, units, calendar)
except (KeyError, OutOfBoundsDatetime, OverflowError):
dates = _decode_datetime_with_cftime(
flat_num_dates.astype(float), units, calendar
)
if (
dates[np.nanargmin(num_dates)].year < 1678
or dates[np.nanargmax(num_dates)].year >= 2262
):
if _is_standard_calendar(calendar):
warnings.warn(
"Unable to decode time axis into full "
"numpy.datetime64 objects, continuing using "
"cftime.datetime objects instead, reason: dates out "
"of range",
SerializationWarning,
stacklevel=3,
)
else:
if _is_standard_calendar(calendar):
dates = cftime_to_nptime(dates)
elif use_cftime:
dates = _decode_datetime_with_cftime(flat_num_dates, units, calendar)
else:
dates = _decode_datetime_with_pandas(flat_num_dates, units, calendar)
return dates.reshape(num_dates.shape)
def to_timedelta_unboxed(value, **kwargs):
if LooseVersion(pd.__version__) < "0.25.0":
result = pd.to_timedelta(value, **kwargs, box=False)
else:
result = pd.to_timedelta(value, **kwargs).to_numpy()
assert result.dtype == "timedelta64[ns]"
return result
def to_datetime_unboxed(value, **kwargs):
if LooseVersion(pd.__version__) < "0.25.0":
result = pd.to_datetime(value, **kwargs, box=False)
else:
result = pd.to_datetime(value, **kwargs).to_numpy()
assert result.dtype == "datetime64[ns]"
return result
def decode_cf_timedelta(num_timedeltas, units):
"""Given an array of numeric timedeltas in netCDF format, convert it into a
numpy timedelta64[ns] array.
"""
num_timedeltas = np.asarray(num_timedeltas)
units = _netcdf_to_numpy_timeunit(units)
result = to_timedelta_unboxed(num_timedeltas.ravel(), unit=units)
return result.reshape(num_timedeltas.shape)
def _unit_timedelta_cftime(units):
return timedelta(microseconds=_US_PER_TIME_DELTA[units])
def _unit_timedelta_numpy(units):
numpy_units = _netcdf_to_numpy_timeunit(units)
return np.timedelta64(_NS_PER_TIME_DELTA[numpy_units], "ns")
def _infer_time_units_from_diff(unique_timedeltas):
if unique_timedeltas.dtype == np.dtype("O"):
time_units = _NETCDF_TIME_UNITS_CFTIME
unit_timedelta = _unit_timedelta_cftime
zero_timedelta = timedelta(microseconds=0)
timedeltas = unique_timedeltas
else:
time_units = _NETCDF_TIME_UNITS_NUMPY
unit_timedelta = _unit_timedelta_numpy
zero_timedelta = np.timedelta64(0, "ns")
# Note that the modulus operator was only implemented for np.timedelta64
# arrays as of NumPy version 1.16.0. Once our minimum version of NumPy
# supported is greater than or equal to this we will no longer need to cast
# unique_timedeltas to a TimedeltaIndex. In the meantime, however, the
# modulus operator works for TimedeltaIndex objects.
timedeltas = pd.TimedeltaIndex(unique_timedeltas)
for time_unit in time_units:
if np.all(timedeltas % unit_timedelta(time_unit) == zero_timedelta):
return time_unit
return "seconds"
def infer_calendar_name(dates):
"""Given an array of datetimes, infer the CF calendar name"""
if np.asarray(dates).dtype == "datetime64[ns]":
return "proleptic_gregorian"
else:
return np.asarray(dates).ravel()[0].calendar
def infer_datetime_units(dates):
"""Given an array of datetimes, returns a CF compatible time-unit string of
the form "{time_unit} since {date[0]}", where `time_unit` is 'days',
'hours', 'minutes' or 'seconds' (the first one that can evenly divide all
unique time deltas in `dates`)
"""
dates = np.asarray(dates).ravel()
if np.asarray(dates).dtype == "datetime64[ns]":
dates = to_datetime_unboxed(dates)
dates = dates[pd.notnull(dates)]
reference_date = dates[0] if len(dates) > 0 else "1970-01-01"
reference_date = pd.Timestamp(reference_date)
else:
reference_date = dates[0] if len(dates) > 0 else "1970-01-01"
reference_date = format_cftime_datetime(reference_date)
unique_timedeltas = np.unique(np.diff(dates))
units = _infer_time_units_from_diff(unique_timedeltas)
return f"{units} since {reference_date}"
def format_cftime_datetime(date):
"""Converts a cftime.datetime object to a string with the format:
YYYY-MM-DD HH:MM:SS.UUUUUU
"""
return "{:04d}-{:02d}-{:02d} {:02d}:{:02d}:{:02d}.{:06d}".format(
date.year,
date.month,
date.day,
date.hour,
date.minute,
date.second,
date.microsecond,
)
def infer_timedelta_units(deltas):
"""Given an array of timedeltas, returns a CF compatible time-unit from
{'days', 'hours', 'minutes' 'seconds'} (the first one that can evenly
divide all unique time deltas in `deltas`)
"""
deltas = to_timedelta_unboxed(np.asarray(deltas).ravel())
unique_timedeltas = np.unique(deltas[pd.notnull(deltas)])
return _infer_time_units_from_diff(unique_timedeltas)
def cftime_to_nptime(times):
"""Given an array of cftime.datetime objects, return an array of
numpy.datetime64 objects of the same size"""
times = np.asarray(times)
new = np.empty(times.shape, dtype="M8[ns]")
for i, t in np.ndenumerate(times):
try:
# Use pandas.Timestamp in place of datetime.datetime, because
# NumPy casts it safely it np.datetime64[ns] for dates outside
# 1678 to 2262 (this is not currently the case for
# datetime.datetime).
dt = pd.Timestamp(
t.year, t.month, t.day, t.hour, t.minute, t.second, t.microsecond
)
except ValueError as e:
raise ValueError(
"Cannot convert date {} to a date in the "
"standard calendar. Reason: {}.".format(t, e)
)
new[i] = np.datetime64(dt)
return new
def _cleanup_netcdf_time_units(units):
delta, ref_date = _unpack_netcdf_time_units(units)
try:
units = "{} since {}".format(delta, format_timestamp(ref_date))
except OutOfBoundsDatetime:
# don't worry about reifying the units if they're out of bounds
pass
return units
def _encode_datetime_with_cftime(dates, units, calendar):
"""Fallback method for encoding dates using cftime.
This method is more flexible than xarray's parsing using datetime64[ns]
arrays but also slower because it loops over each element.
"""
import cftime
if np.issubdtype(dates.dtype, np.datetime64):
# numpy's broken datetime conversion only works for us precision
dates = dates.astype("M8[us]").astype(datetime)
def encode_datetime(d):
return np.nan if d is None else cftime.date2num(d, units, calendar)
return np.array([encode_datetime(d) for d in dates.ravel()]).reshape(dates.shape)
def cast_to_int_if_safe(num):
int_num = np.array(num, dtype=np.int64)
if (num == int_num).all():
num = int_num
return num
def encode_cf_datetime(dates, units=None, calendar=None):
"""Given an array of datetime objects, returns the tuple `(num, units,
calendar)` suitable for a CF compliant time variable.
Unlike `date2num`, this function can handle datetime64 arrays.
See Also
--------
cftime.date2num
"""
dates = np.asarray(dates)
if units is None:
units = infer_datetime_units(dates)
else:
units = _cleanup_netcdf_time_units(units)
if calendar is None:
calendar = infer_calendar_name(dates)
delta, ref_date = _unpack_netcdf_time_units(units)
try:
if not _is_standard_calendar(calendar) or dates.dtype.kind == "O":
# parse with cftime instead
raise OutOfBoundsDatetime
assert dates.dtype == "datetime64[ns]"
delta_units = _netcdf_to_numpy_timeunit(delta)
time_delta = np.timedelta64(1, delta_units).astype("timedelta64[ns]")
ref_date = pd.Timestamp(ref_date)
# If the ref_date Timestamp is timezone-aware, convert to UTC and
# make it timezone-naive (GH 2649).
if ref_date.tz is not None:
ref_date = ref_date.tz_convert(None)
# Wrap the dates in a DatetimeIndex to do the subtraction to ensure
# an OverflowError is raised if the ref_date is too far away from
# dates to be encoded (GH 2272).
dates_as_index = pd.DatetimeIndex(dates.ravel())
time_deltas = dates_as_index - ref_date
# Use floor division if time_delta evenly divides all differences
# to preserve integer dtype if possible (GH 4045).
if np.all(time_deltas % time_delta == np.timedelta64(0, "ns")):
num = time_deltas // time_delta
else:
num = time_deltas / time_delta
num = num.values.reshape(dates.shape)
except (OutOfBoundsDatetime, OverflowError):
num = _encode_datetime_with_cftime(dates, units, calendar)
num = cast_to_int_if_safe(num)
return (num, units, calendar)
def encode_cf_timedelta(timedeltas, units=None):
if units is None:
units = infer_timedelta_units(timedeltas)
np_unit = _netcdf_to_numpy_timeunit(units)
num = 1.0 * timedeltas / np.timedelta64(1, np_unit)
num = np.where(pd.isnull(timedeltas), np.nan, num)
num = cast_to_int_if_safe(num)
return (num, units)
class CFDatetimeCoder(VariableCoder):
def __init__(self, use_cftime=None):
self.use_cftime = use_cftime
def encode(self, variable, name=None):
dims, data, attrs, encoding = unpack_for_encoding(variable)
if np.issubdtype(data.dtype, np.datetime64) or contains_cftime_datetimes(
variable
):
(data, units, calendar) = encode_cf_datetime(
data, encoding.pop("units", None), encoding.pop("calendar", None)
)
safe_setitem(attrs, "units", units, name=name)
safe_setitem(attrs, "calendar", calendar, name=name)
return Variable(dims, data, attrs, encoding)
def decode(self, variable, name=None):
dims, data, attrs, encoding = unpack_for_decoding(variable)
if "units" in attrs and "since" in attrs["units"]:
units = pop_to(attrs, encoding, "units")
calendar = pop_to(attrs, encoding, "calendar")
dtype = _decode_cf_datetime_dtype(data, units, calendar, self.use_cftime)
transform = partial(
decode_cf_datetime,
units=units,
calendar=calendar,
use_cftime=self.use_cftime,
)
data = lazy_elemwise_func(data, transform, dtype)
return Variable(dims, data, attrs, encoding)
class CFTimedeltaCoder(VariableCoder):
def encode(self, variable, name=None):
dims, data, attrs, encoding = unpack_for_encoding(variable)
if np.issubdtype(data.dtype, np.timedelta64):
data, units = encode_cf_timedelta(data, encoding.pop("units", None))
safe_setitem(attrs, "units", units, name=name)
return Variable(dims, data, attrs, encoding)
def decode(self, variable, name=None):
dims, data, attrs, encoding = unpack_for_decoding(variable)
if "units" in attrs and attrs["units"] in TIME_UNITS:
units = pop_to(attrs, encoding, "units")
transform = partial(decode_cf_timedelta, units=units)
dtype = np.dtype("timedelta64[ns]")
data = lazy_elemwise_func(data, transform, dtype=dtype)
return Variable(dims, data, attrs, encoding)
| apache-2.0 |
devanshdalal/scikit-learn | examples/cluster/plot_digits_agglomeration.py | 377 | 1694 | #!/usr/bin/python
# -*- coding: utf-8 -*-
"""
=========================================================
Feature agglomeration
=========================================================
These images how similar features are merged together using
feature agglomeration.
"""
print(__doc__)
# Code source: Gaël Varoquaux
# Modified for documentation by Jaques Grobler
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
from sklearn import datasets, cluster
from sklearn.feature_extraction.image import grid_to_graph
digits = datasets.load_digits()
images = digits.images
X = np.reshape(images, (len(images), -1))
connectivity = grid_to_graph(*images[0].shape)
agglo = cluster.FeatureAgglomeration(connectivity=connectivity,
n_clusters=32)
agglo.fit(X)
X_reduced = agglo.transform(X)
X_restored = agglo.inverse_transform(X_reduced)
images_restored = np.reshape(X_restored, images.shape)
plt.figure(1, figsize=(4, 3.5))
plt.clf()
plt.subplots_adjust(left=.01, right=.99, bottom=.01, top=.91)
for i in range(4):
plt.subplot(3, 4, i + 1)
plt.imshow(images[i], cmap=plt.cm.gray, vmax=16, interpolation='nearest')
plt.xticks(())
plt.yticks(())
if i == 1:
plt.title('Original data')
plt.subplot(3, 4, 4 + i + 1)
plt.imshow(images_restored[i], cmap=plt.cm.gray, vmax=16,
interpolation='nearest')
if i == 1:
plt.title('Agglomerated data')
plt.xticks(())
plt.yticks(())
plt.subplot(3, 4, 10)
plt.imshow(np.reshape(agglo.labels_, images[0].shape),
interpolation='nearest', cmap=plt.cm.spectral)
plt.xticks(())
plt.yticks(())
plt.title('Labels')
plt.show()
| bsd-3-clause |
kaichogami/scikit-learn | benchmarks/bench_plot_svd.py | 325 | 2899 | """Benchmarks of Singular Value Decomposition (Exact and Approximate)
The data is mostly low rank but is a fat infinite tail.
"""
import gc
from time import time
import numpy as np
from collections import defaultdict
from scipy.linalg import svd
from sklearn.utils.extmath import randomized_svd
from sklearn.datasets.samples_generator import make_low_rank_matrix
def compute_bench(samples_range, features_range, n_iter=3, rank=50):
it = 0
results = defaultdict(lambda: [])
max_it = len(samples_range) * len(features_range)
for n_samples in samples_range:
for n_features in features_range:
it += 1
print('====================')
print('Iteration %03d of %03d' % (it, max_it))
print('====================')
X = make_low_rank_matrix(n_samples, n_features,
effective_rank=rank,
tail_strength=0.2)
gc.collect()
print("benchmarking scipy svd: ")
tstart = time()
svd(X, full_matrices=False)
results['scipy svd'].append(time() - tstart)
gc.collect()
print("benchmarking scikit-learn randomized_svd: n_iter=0")
tstart = time()
randomized_svd(X, rank, n_iter=0)
results['scikit-learn randomized_svd (n_iter=0)'].append(
time() - tstart)
gc.collect()
print("benchmarking scikit-learn randomized_svd: n_iter=%d "
% n_iter)
tstart = time()
randomized_svd(X, rank, n_iter=n_iter)
results['scikit-learn randomized_svd (n_iter=%d)'
% n_iter].append(time() - tstart)
return results
if __name__ == '__main__':
from mpl_toolkits.mplot3d import axes3d # register the 3d projection
import matplotlib.pyplot as plt
samples_range = np.linspace(2, 1000, 4).astype(np.int)
features_range = np.linspace(2, 1000, 4).astype(np.int)
results = compute_bench(samples_range, features_range)
label = 'scikit-learn singular value decomposition benchmark results'
fig = plt.figure(label)
ax = fig.gca(projection='3d')
for c, (label, timings) in zip('rbg', sorted(results.iteritems())):
X, Y = np.meshgrid(samples_range, features_range)
Z = np.asarray(timings).reshape(samples_range.shape[0],
features_range.shape[0])
# plot the actual surface
ax.plot_surface(X, Y, Z, rstride=8, cstride=8, alpha=0.3,
color=c)
# dummy point plot to stick the legend to since surface plot do not
# support legends (yet?)
ax.plot([1], [1], [1], color=c, label=label)
ax.set_xlabel('n_samples')
ax.set_ylabel('n_features')
ax.set_zlabel('Time (s)')
ax.legend()
plt.show()
| bsd-3-clause |
Carrotsmile/CS428 | steerstats/tools/plotting/plotVortex.py | 8 | 2777 |
import csv
from mpl_toolkits.mplot3d import Axes3D
import matplotlib.pyplot as plt
from matplotlib import cm
import sys
import scipy
from scipy.interpolate import bisplrep
from scipy.interpolate import bisplev
import numpy as np
import math
sys.path.append('../../')
from util import writeEntropyMetricData
velocity = 1.33
def f(x):
y=x
return y
def f2(x):
y = math.cos(x)/3.6
return y
def f3(x):
y = math.exp(x*4)
return y
def f4(x):
y = float(x)
y = ((y)) / (y+0.3)
return y
def rotateInXZPlane( dir, angle):
s = math.sin(angle)
c = math.cos(angle)
return np.array((dir * c) + (dir.z * s), (dir.y), (dir.z * c) - (dir.x * s))
distance = 30.9
points = 100.0
fps = 16.0
tocm = 100.0
scale = 10
shift = scale * tocm
seconds = distance/velocity
points = math.floor(seconds*fps)
print "points: " + str(points)
x = np.array(np.arange((points)))/float(points)
x2 = np.array(np.arange(points))/float(points)
print "len x " + str(len(x))
y = np.array(map(f4, x*(math.pi/2.0)))
# y = y / np.amax(y)
# print y
fig = plt.figure()
ax = fig.add_subplot(111)
# x=x*2.0
# y=(y)+x2
# y=y -0.5
# y=y*2.0
points = np.append([x], [y], axis=0).transpose()
ax.plot(points[:,0], points[:,1], linewidth=2.0)
# ax.set_ylim([0.0,1.0])
# ax.set_xlim([-1.0,1.0])
# plt.show()
points = (points-0.5)*2.0*scale*tocm
# print points
distance = 0.0
for i in range(len(points)-1):
distance = distance + np.linalg.norm(points[i]-points[i+1])
print "distance: " + str(distance)
agentData = []
agentData.append(np.array(points))
agentData.append(np.array(points+tocm))
agentData.append(np.array(points+-tocm))
tmp_points = np.array(points *-1.0, copy=True)
agentData.append(np.array(tmp_points))
agentData.append(np.array(tmp_points+tocm))
agentData.append(np.array(tmp_points+-tocm))
points[:,0] = points[:,0] * -1.0
points2= np.array(points, copy=True )
length = len(points2)-1
for i in range(len(points2)):
# point[1] = point[1] + (((shift+point[0])-point[1]))
points[i] = points2[length-i]
agentData.append(np.array(points))
tmp_points = np.array(points, copy=True)
tmp_points[:,0] = tmp_points[:,0] + tocm
tmp_points[:,1] = tmp_points[:,1] - tocm
agentData.append(np.array(tmp_points))
agentData.append(np.array(points + (points - tmp_points)))
points = points *-1.0
agentData.append(np.array(points))
tmp_points = np.array(points, copy=True)
tmp_points[:,0] = tmp_points[:,0] + tocm
tmp_points[:,1] = tmp_points[:,1] - tocm
agentData.append(np.array(tmp_points))
agentData.append(np.array(points + (points - tmp_points)))
for agent in agentData:
# print agent
ax.plot(agent[:,0], agent[:,1], linewidth=2.0)
file = open('entropyData.txt', 'w')
writeEntropyMetricData(agentData, file)
file.close()
plt.show() | gpl-3.0 |
go-bears/nupic | external/linux32/lib/python2.6/site-packages/matplotlib/delaunay/interpolate.py | 73 | 7068 | import numpy as np
from matplotlib._delaunay import compute_planes, linear_interpolate_grid, nn_interpolate_grid
from matplotlib._delaunay import nn_interpolate_unstructured
__all__ = ['LinearInterpolator', 'NNInterpolator']
def slice2gridspec(key):
"""Convert a 2-tuple of slices to start,stop,steps for x and y.
key -- (slice(ystart,ystop,ystep), slice(xtart, xstop, xstep))
For now, the only accepted step values are imaginary integers (interpreted
in the same way numpy.mgrid, etc. do).
"""
if ((len(key) != 2) or
(not isinstance(key[0], slice)) or
(not isinstance(key[1], slice))):
raise ValueError("only 2-D slices, please")
x0 = key[1].start
x1 = key[1].stop
xstep = key[1].step
if not isinstance(xstep, complex) or int(xstep.real) != xstep.real:
raise ValueError("only the [start:stop:numsteps*1j] form supported")
xstep = int(xstep.imag)
y0 = key[0].start
y1 = key[0].stop
ystep = key[0].step
if not isinstance(ystep, complex) or int(ystep.real) != ystep.real:
raise ValueError("only the [start:stop:numsteps*1j] form supported")
ystep = int(ystep.imag)
return x0, x1, xstep, y0, y1, ystep
class LinearInterpolator(object):
"""Interpolate a function defined on the nodes of a triangulation by
using the planes defined by the three function values at each corner of
the triangles.
LinearInterpolator(triangulation, z, default_value=numpy.nan)
triangulation -- Triangulation instance
z -- the function values at each node of the triangulation
default_value -- a float giving the default value should the interpolating
point happen to fall outside of the convex hull of the triangulation
At the moment, the only regular rectangular grids are supported for
interpolation.
vals = interp[ystart:ystop:ysteps*1j, xstart:xstop:xsteps*1j]
vals would then be a (ysteps, xsteps) array containing the interpolated
values. These arguments are interpreted the same way as numpy.mgrid.
Attributes:
planes -- (ntriangles, 3) array of floats specifying the plane for each
triangle.
Linear Interpolation
--------------------
Given the Delauany triangulation (or indeed *any* complete triangulation) we
can interpolate values inside the convex hull by locating the enclosing
triangle of the interpolation point and returning the value at that point of
the plane defined by the three node values.
f = planes[tri,0]*x + planes[tri,1]*y + planes[tri,2]
The interpolated function is C0 continuous across the convex hull of the
input points. It is C1 continuous across the convex hull except for the
nodes and the edges of the triangulation.
"""
def __init__(self, triangulation, z, default_value=np.nan):
self.triangulation = triangulation
self.z = np.asarray(z, dtype=np.float64)
self.default_value = default_value
self.planes = compute_planes(triangulation.x, triangulation.y, self.z,
triangulation.triangle_nodes)
def __getitem__(self, key):
x0, x1, xstep, y0, y1, ystep = slice2gridspec(key)
grid = linear_interpolate_grid(x0, x1, xstep, y0, y1, ystep, self.default_value,
self.planes, self.triangulation.x, self.triangulation.y,
self.triangulation.triangle_nodes, self.triangulation.triangle_neighbors)
return grid
class NNInterpolator(object):
"""Interpolate a function defined on the nodes of a triangulation by
the natural neighbors method.
NNInterpolator(triangulation, z, default_value=numpy.nan)
triangulation -- Triangulation instance
z -- the function values at each node of the triangulation
default_value -- a float giving the default value should the interpolating
point happen to fall outside of the convex hull of the triangulation
At the moment, the only regular rectangular grids are supported for
interpolation.
vals = interp[ystart:ystop:ysteps*1j, xstart:xstop:xsteps*1j]
vals would then be a (ysteps, xsteps) array containing the interpolated
values. These arguments are interpreted the same way as numpy.mgrid.
Natural Neighbors Interpolation
-------------------------------
One feature of the Delaunay triangulation is that for each triangle, its
circumcircle contains no other point (although in degenerate cases, like
squares, other points may be *on* the circumcircle). One can also construct
what is called the Voronoi diagram from a Delaunay triangulation by
connecting the circumcenters of the triangles to those of their neighbors to
form a tesselation of irregular polygons covering the plane and containing
only one node from the triangulation. Each point in one node's Voronoi
polygon is closer to that node than any other node.
To compute the Natural Neighbors interpolant, we consider adding the
interpolation point to the triangulation. We define the natural neighbors of
this point as the set of nodes participating in Delaunay triangles whose
circumcircles contain the point. To restore the Delaunay-ness of the
triangulation, one would only have to alter those triangles and Voronoi
polygons. The new Voronooi diagram would have a polygon around the inserted
point. This polygon would "steal" area from the original Voronoi polygons.
For each node i in the natural neighbors set, we compute the area stolen
from its original Voronoi polygon, stolen[i]. We define the natural
neighbors coordinates
phi[i] = stolen[i] / sum(stolen,axis=0)
We then use these phi[i] to weight the corresponding function values from
the input data z to compute the interpolated value.
The interpolated surface is C1-continuous except at the nodes themselves
across the convex hull of the input points. One can find the set of points
that a given node will affect by computing the union of the areas covered by
the circumcircles of each Delaunay triangle that node participates in.
"""
def __init__(self, triangulation, z, default_value=np.nan):
self.triangulation = triangulation
self.z = np.asarray(z, dtype=np.float64)
self.default_value = default_value
def __getitem__(self, key):
x0, x1, xstep, y0, y1, ystep = slice2gridspec(key)
grid = nn_interpolate_grid(x0, x1, xstep, y0, y1, ystep, self.default_value,
self.triangulation.x, self.triangulation.y, self.z,
self.triangulation.circumcenters,
self.triangulation.triangle_nodes,
self.triangulation.triangle_neighbors)
return grid
def __call__(self, intx, inty):
intz = nn_interpolate_unstructured(intx, inty, self.default_value,
self.triangulation.x, self.triangulation.y, self.z,
self.triangulation.circumcenters,
self.triangulation.triangle_nodes,
self.triangulation.triangle_neighbors)
return intz
| agpl-3.0 |
sniemi/SamPy | cosmology/distances.py | 1 | 5054 | """
Functions related cosmology such as diameter and proper motion distances.
Plot some distance measures versus redshift and omega_M.
:requires: CosmoloPy
:requires: cosmocalc
:author: Sami-Matias Niemi
"""
import sys
import numpy
import matplotlib.pyplot as pylab
import matplotlib.cm as cm
import cosmolopy.distance as cd
#import cosmolopy.constants as cc
from cosmocalc import cosmocalc
def getDiameterDistances(data, redshift=0):
"""
Calculates a diameter distance in kpc / arc seconds
from data for all unique redshifts. The redshift
keyword indicates the column of redshifts.
:requires: cosmocalc
:param: data
:param: redshift column
:return: diameter distances
:rtype: dictionary
"""
out = {}
for x in set(data[:, redshift]):
out[x] = cosmocalc(x, 71.0, 0.28)['PS_kpc'] #in kpc / arc seconds
return out
def plot_DM(filename):
"""
The dimensionless proper motion distance DM/DH.
"""
# Set up an array of redshift values.
dz = 0.1
z = numpy.arange(0., 10. + 1.1 * dz, dz)
# Set up a cosmology dictionary, with an array of matter density values.
cosmo = {}
dom = 0.01
om = numpy.atleast_2d(numpy.linspace(0.1, 1.0, (1. - 0.1) / dom)).transpose()
cosmo['omega_M_0'] = om
cosmo['omega_lambda_0'] = 1. - cosmo['omega_M_0']
cosmo['h'] = 0.701
cosmo['omega_k_0'] = 0.0
# Calculate the hubble distance.
dh = cd.hubble_distance_z(0, **cosmo)
# Calculate the comoving distance.
dm, dm_err = cd.comoving_distance_transverse(z, **cosmo)
# Make plots.
plot_dist(z, dz, om, dom, dm, dh, 'proper motion distance', r'D_M',
filename)
plot_dist_ony(z, dz, om, dom, dm, dh, 'proper motion distance', r'D_M',
filename)
def plot_DA(filename):
"""
The dimensionless angular diameter distance DA/DH.
"""
# Set up an array of redshift values.
dz = 0.1
z = numpy.arange(0., 10. + dz, dz)
# Set up a cosmology dictionary, with an array of matter density values.
cosmo = {}
dom = 0.01
om = numpy.atleast_2d(numpy.linspace(0.1, 1.0, (1. - 0.1) / dom)).transpose()
cosmo['omega_M_0'] = om
cosmo['omega_lambda_0'] = 1. - cosmo['omega_M_0']
cosmo['h'] = 0.701
cosmo['omega_k_0'] = 0.0
# Calculate the hubble distance.
dh = cd.hubble_distance_z(0, **cosmo)
# Calculate the angular diameter distance.
da, da_err1, da_err2 = cd.angular_diameter_distance(z, **cosmo)
# Make plots.
plot_dist(z, dz, om, dom, da, dh, 'angular diameter distance', r'D_A',
filename)
plot_dist_ony(z, dz, om, dom, da, dh, 'angular diameter distance', r'D_A',
filename)
def plot_dist(z, dz, om, dom, dist, dh, name, mathname, filename=None):
"""
Make a 2-D plot of a distance versus redshift (x) and matter density (y).
"""
# Grid of redshift and matter density values.
x, y = numpy.meshgrid(z, om)
pylab.figure()
pylab.imshow(dist / dh,
extent=(z.min() - dz / 2.,
z.max() + dz / 2.,
om.max() + dom / 2.,
om.min() - dom / 2.),
interpolation='nearest',
aspect=z.max() / om.max(),
cmap=cm.Spectral
)
cb = pylab.colorbar()
cb.ax.set_ylabel(r'$' + mathname + '/D_H$')
pylab.contour(x, y, dist / dh, 10, colors='k')
pylab.xlim(z.min(), z.max())
pylab.ylim(om.min(), om.max())
pylab.xlabel("Redshift z")
pylab.ylabel(r"$\Omega_M = 1 - \Omega_\lambda$")
pylab.title(name)
if filename is not None:
prefix, extension = filename.split('.')
pylab.savefig(prefix + '_' + mathname + '.' + extension,
bbox_inches="tight")
def plot_dist_ony(z, dz, om, dom, dist, dh, name, mathname, filename=None):
"""
Make a 2-D plot of matter density versus redshift (x) and distance (y)
"""
dist = dist / dh
z = z * numpy.ones(dist.shape)
om = om * numpy.ones(dist.shape)
# pylab.figure(figsize=(5.5,4.5))
pylab.figure()
pylab.contour(z, dist, om, 50)
cb = pylab.colorbar()
cb.ax.set_ylabel(r'$\Omega_M = 1 - \Omega_\lambda$')
pylab.xlim(z.min(), z.max())
pylab.ylim(dist.min(), dist.max())
pylab.xlabel("redshift z")
pylab.ylabel(name + r': $' + mathname + '/D_H$')
pylab.title(name)
if filename is not None:
prefix, extension = filename.split('.')
pylab.savefig(prefix + '_' + mathname + '_ony.' + extension,
bbox_inches="tight")
if __name__ == "__main__":
if len(sys.argv) == 1:
print "Run with a filename argument to produce image files, e.g.:"
print " python plot_2d_distances.py dist2d.png"
print " python plot_2d_distances.py dist2d.eps"
if len(sys.argv) > 1:
filename = sys.argv[1]
else:
filename = None
plot_DM(filename)
plot_DA(filename)
if filename is None:
pylab.show()
| bsd-2-clause |
huobaowangxi/scikit-learn | sklearn/feature_extraction/image.py | 263 | 17600 | """
The :mod:`sklearn.feature_extraction.image` submodule gathers utilities to
extract features from images.
"""
# Authors: Emmanuelle Gouillart <[email protected]>
# Gael Varoquaux <[email protected]>
# Olivier Grisel
# Vlad Niculae
# License: BSD 3 clause
from itertools import product
import numbers
import numpy as np
from scipy import sparse
from numpy.lib.stride_tricks import as_strided
from ..utils import check_array, check_random_state
from ..utils.fixes import astype
from ..base import BaseEstimator
__all__ = ['PatchExtractor',
'extract_patches_2d',
'grid_to_graph',
'img_to_graph',
'reconstruct_from_patches_2d']
###############################################################################
# From an image to a graph
def _make_edges_3d(n_x, n_y, n_z=1):
"""Returns a list of edges for a 3D image.
Parameters
===========
n_x: integer
The size of the grid in the x direction.
n_y: integer
The size of the grid in the y direction.
n_z: integer, optional
The size of the grid in the z direction, defaults to 1
"""
vertices = np.arange(n_x * n_y * n_z).reshape((n_x, n_y, n_z))
edges_deep = np.vstack((vertices[:, :, :-1].ravel(),
vertices[:, :, 1:].ravel()))
edges_right = np.vstack((vertices[:, :-1].ravel(),
vertices[:, 1:].ravel()))
edges_down = np.vstack((vertices[:-1].ravel(), vertices[1:].ravel()))
edges = np.hstack((edges_deep, edges_right, edges_down))
return edges
def _compute_gradient_3d(edges, img):
n_x, n_y, n_z = img.shape
gradient = np.abs(img[edges[0] // (n_y * n_z),
(edges[0] % (n_y * n_z)) // n_z,
(edges[0] % (n_y * n_z)) % n_z] -
img[edges[1] // (n_y * n_z),
(edges[1] % (n_y * n_z)) // n_z,
(edges[1] % (n_y * n_z)) % n_z])
return gradient
# XXX: Why mask the image after computing the weights?
def _mask_edges_weights(mask, edges, weights=None):
"""Apply a mask to edges (weighted or not)"""
inds = np.arange(mask.size)
inds = inds[mask.ravel()]
ind_mask = np.logical_and(np.in1d(edges[0], inds),
np.in1d(edges[1], inds))
edges = edges[:, ind_mask]
if weights is not None:
weights = weights[ind_mask]
if len(edges.ravel()):
maxval = edges.max()
else:
maxval = 0
order = np.searchsorted(np.unique(edges.ravel()), np.arange(maxval + 1))
edges = order[edges]
if weights is None:
return edges
else:
return edges, weights
def _to_graph(n_x, n_y, n_z, mask=None, img=None,
return_as=sparse.coo_matrix, dtype=None):
"""Auxiliary function for img_to_graph and grid_to_graph
"""
edges = _make_edges_3d(n_x, n_y, n_z)
if dtype is None:
if img is None:
dtype = np.int
else:
dtype = img.dtype
if img is not None:
img = np.atleast_3d(img)
weights = _compute_gradient_3d(edges, img)
if mask is not None:
edges, weights = _mask_edges_weights(mask, edges, weights)
diag = img.squeeze()[mask]
else:
diag = img.ravel()
n_voxels = diag.size
else:
if mask is not None:
mask = astype(mask, dtype=np.bool, copy=False)
mask = np.asarray(mask, dtype=np.bool)
edges = _mask_edges_weights(mask, edges)
n_voxels = np.sum(mask)
else:
n_voxels = n_x * n_y * n_z
weights = np.ones(edges.shape[1], dtype=dtype)
diag = np.ones(n_voxels, dtype=dtype)
diag_idx = np.arange(n_voxels)
i_idx = np.hstack((edges[0], edges[1]))
j_idx = np.hstack((edges[1], edges[0]))
graph = sparse.coo_matrix((np.hstack((weights, weights, diag)),
(np.hstack((i_idx, diag_idx)),
np.hstack((j_idx, diag_idx)))),
(n_voxels, n_voxels),
dtype=dtype)
if return_as is np.ndarray:
return graph.toarray()
return return_as(graph)
def img_to_graph(img, mask=None, return_as=sparse.coo_matrix, dtype=None):
"""Graph of the pixel-to-pixel gradient connections
Edges are weighted with the gradient values.
Read more in the :ref:`User Guide <image_feature_extraction>`.
Parameters
----------
img : ndarray, 2D or 3D
2D or 3D image
mask : ndarray of booleans, optional
An optional mask of the image, to consider only part of the
pixels.
return_as : np.ndarray or a sparse matrix class, optional
The class to use to build the returned adjacency matrix.
dtype : None or dtype, optional
The data of the returned sparse matrix. By default it is the
dtype of img
Notes
-----
For sklearn versions 0.14.1 and prior, return_as=np.ndarray was handled
by returning a dense np.matrix instance. Going forward, np.ndarray
returns an np.ndarray, as expected.
For compatibility, user code relying on this method should wrap its
calls in ``np.asarray`` to avoid type issues.
"""
img = np.atleast_3d(img)
n_x, n_y, n_z = img.shape
return _to_graph(n_x, n_y, n_z, mask, img, return_as, dtype)
def grid_to_graph(n_x, n_y, n_z=1, mask=None, return_as=sparse.coo_matrix,
dtype=np.int):
"""Graph of the pixel-to-pixel connections
Edges exist if 2 voxels are connected.
Parameters
----------
n_x : int
Dimension in x axis
n_y : int
Dimension in y axis
n_z : int, optional, default 1
Dimension in z axis
mask : ndarray of booleans, optional
An optional mask of the image, to consider only part of the
pixels.
return_as : np.ndarray or a sparse matrix class, optional
The class to use to build the returned adjacency matrix.
dtype : dtype, optional, default int
The data of the returned sparse matrix. By default it is int
Notes
-----
For sklearn versions 0.14.1 and prior, return_as=np.ndarray was handled
by returning a dense np.matrix instance. Going forward, np.ndarray
returns an np.ndarray, as expected.
For compatibility, user code relying on this method should wrap its
calls in ``np.asarray`` to avoid type issues.
"""
return _to_graph(n_x, n_y, n_z, mask=mask, return_as=return_as,
dtype=dtype)
###############################################################################
# From an image to a set of small image patches
def _compute_n_patches(i_h, i_w, p_h, p_w, max_patches=None):
"""Compute the number of patches that will be extracted in an image.
Read more in the :ref:`User Guide <image_feature_extraction>`.
Parameters
----------
i_h : int
The image height
i_w : int
The image with
p_h : int
The height of a patch
p_w : int
The width of a patch
max_patches : integer or float, optional default is None
The maximum number of patches to extract. If max_patches is a float
between 0 and 1, it is taken to be a proportion of the total number
of patches.
"""
n_h = i_h - p_h + 1
n_w = i_w - p_w + 1
all_patches = n_h * n_w
if max_patches:
if (isinstance(max_patches, (numbers.Integral))
and max_patches < all_patches):
return max_patches
elif (isinstance(max_patches, (numbers.Real))
and 0 < max_patches < 1):
return int(max_patches * all_patches)
else:
raise ValueError("Invalid value for max_patches: %r" % max_patches)
else:
return all_patches
def extract_patches(arr, patch_shape=8, extraction_step=1):
"""Extracts patches of any n-dimensional array in place using strides.
Given an n-dimensional array it will return a 2n-dimensional array with
the first n dimensions indexing patch position and the last n indexing
the patch content. This operation is immediate (O(1)). A reshape
performed on the first n dimensions will cause numpy to copy data, leading
to a list of extracted patches.
Read more in the :ref:`User Guide <image_feature_extraction>`.
Parameters
----------
arr : ndarray
n-dimensional array of which patches are to be extracted
patch_shape : integer or tuple of length arr.ndim
Indicates the shape of the patches to be extracted. If an
integer is given, the shape will be a hypercube of
sidelength given by its value.
extraction_step : integer or tuple of length arr.ndim
Indicates step size at which extraction shall be performed.
If integer is given, then the step is uniform in all dimensions.
Returns
-------
patches : strided ndarray
2n-dimensional array indexing patches on first n dimensions and
containing patches on the last n dimensions. These dimensions
are fake, but this way no data is copied. A simple reshape invokes
a copying operation to obtain a list of patches:
result.reshape([-1] + list(patch_shape))
"""
arr_ndim = arr.ndim
if isinstance(patch_shape, numbers.Number):
patch_shape = tuple([patch_shape] * arr_ndim)
if isinstance(extraction_step, numbers.Number):
extraction_step = tuple([extraction_step] * arr_ndim)
patch_strides = arr.strides
slices = [slice(None, None, st) for st in extraction_step]
indexing_strides = arr[slices].strides
patch_indices_shape = ((np.array(arr.shape) - np.array(patch_shape)) //
np.array(extraction_step)) + 1
shape = tuple(list(patch_indices_shape) + list(patch_shape))
strides = tuple(list(indexing_strides) + list(patch_strides))
patches = as_strided(arr, shape=shape, strides=strides)
return patches
def extract_patches_2d(image, patch_size, max_patches=None, random_state=None):
"""Reshape a 2D image into a collection of patches
The resulting patches are allocated in a dedicated array.
Read more in the :ref:`User Guide <image_feature_extraction>`.
Parameters
----------
image : array, shape = (image_height, image_width) or
(image_height, image_width, n_channels)
The original image data. For color images, the last dimension specifies
the channel: a RGB image would have `n_channels=3`.
patch_size : tuple of ints (patch_height, patch_width)
the dimensions of one patch
max_patches : integer or float, optional default is None
The maximum number of patches to extract. If max_patches is a float
between 0 and 1, it is taken to be a proportion of the total number
of patches.
random_state : int or RandomState
Pseudo number generator state used for random sampling to use if
`max_patches` is not None.
Returns
-------
patches : array, shape = (n_patches, patch_height, patch_width) or
(n_patches, patch_height, patch_width, n_channels)
The collection of patches extracted from the image, where `n_patches`
is either `max_patches` or the total number of patches that can be
extracted.
Examples
--------
>>> from sklearn.feature_extraction import image
>>> one_image = np.arange(16).reshape((4, 4))
>>> one_image
array([[ 0, 1, 2, 3],
[ 4, 5, 6, 7],
[ 8, 9, 10, 11],
[12, 13, 14, 15]])
>>> patches = image.extract_patches_2d(one_image, (2, 2))
>>> print(patches.shape)
(9, 2, 2)
>>> patches[0]
array([[0, 1],
[4, 5]])
>>> patches[1]
array([[1, 2],
[5, 6]])
>>> patches[8]
array([[10, 11],
[14, 15]])
"""
i_h, i_w = image.shape[:2]
p_h, p_w = patch_size
if p_h > i_h:
raise ValueError("Height of the patch should be less than the height"
" of the image.")
if p_w > i_w:
raise ValueError("Width of the patch should be less than the width"
" of the image.")
image = check_array(image, allow_nd=True)
image = image.reshape((i_h, i_w, -1))
n_colors = image.shape[-1]
extracted_patches = extract_patches(image,
patch_shape=(p_h, p_w, n_colors),
extraction_step=1)
n_patches = _compute_n_patches(i_h, i_w, p_h, p_w, max_patches)
if max_patches:
rng = check_random_state(random_state)
i_s = rng.randint(i_h - p_h + 1, size=n_patches)
j_s = rng.randint(i_w - p_w + 1, size=n_patches)
patches = extracted_patches[i_s, j_s, 0]
else:
patches = extracted_patches
patches = patches.reshape(-1, p_h, p_w, n_colors)
# remove the color dimension if useless
if patches.shape[-1] == 1:
return patches.reshape((n_patches, p_h, p_w))
else:
return patches
def reconstruct_from_patches_2d(patches, image_size):
"""Reconstruct the image from all of its patches.
Patches are assumed to overlap and the image is constructed by filling in
the patches from left to right, top to bottom, averaging the overlapping
regions.
Read more in the :ref:`User Guide <image_feature_extraction>`.
Parameters
----------
patches : array, shape = (n_patches, patch_height, patch_width) or
(n_patches, patch_height, patch_width, n_channels)
The complete set of patches. If the patches contain colour information,
channels are indexed along the last dimension: RGB patches would
have `n_channels=3`.
image_size : tuple of ints (image_height, image_width) or
(image_height, image_width, n_channels)
the size of the image that will be reconstructed
Returns
-------
image : array, shape = image_size
the reconstructed image
"""
i_h, i_w = image_size[:2]
p_h, p_w = patches.shape[1:3]
img = np.zeros(image_size)
# compute the dimensions of the patches array
n_h = i_h - p_h + 1
n_w = i_w - p_w + 1
for p, (i, j) in zip(patches, product(range(n_h), range(n_w))):
img[i:i + p_h, j:j + p_w] += p
for i in range(i_h):
for j in range(i_w):
# divide by the amount of overlap
# XXX: is this the most efficient way? memory-wise yes, cpu wise?
img[i, j] /= float(min(i + 1, p_h, i_h - i) *
min(j + 1, p_w, i_w - j))
return img
class PatchExtractor(BaseEstimator):
"""Extracts patches from a collection of images
Read more in the :ref:`User Guide <image_feature_extraction>`.
Parameters
----------
patch_size : tuple of ints (patch_height, patch_width)
the dimensions of one patch
max_patches : integer or float, optional default is None
The maximum number of patches per image to extract. If max_patches is a
float in (0, 1), it is taken to mean a proportion of the total number
of patches.
random_state : int or RandomState
Pseudo number generator state used for random sampling.
"""
def __init__(self, patch_size=None, max_patches=None, random_state=None):
self.patch_size = patch_size
self.max_patches = max_patches
self.random_state = random_state
def fit(self, X, y=None):
"""Do nothing and return the estimator unchanged
This method is just there to implement the usual API and hence
work in pipelines.
"""
return self
def transform(self, X):
"""Transforms the image samples in X into a matrix of patch data.
Parameters
----------
X : array, shape = (n_samples, image_height, image_width) or
(n_samples, image_height, image_width, n_channels)
Array of images from which to extract patches. For color images,
the last dimension specifies the channel: a RGB image would have
`n_channels=3`.
Returns
-------
patches: array, shape = (n_patches, patch_height, patch_width) or
(n_patches, patch_height, patch_width, n_channels)
The collection of patches extracted from the images, where
`n_patches` is either `n_samples * max_patches` or the total
number of patches that can be extracted.
"""
self.random_state = check_random_state(self.random_state)
n_images, i_h, i_w = X.shape[:3]
X = np.reshape(X, (n_images, i_h, i_w, -1))
n_channels = X.shape[-1]
if self.patch_size is None:
patch_size = i_h // 10, i_w // 10
else:
patch_size = self.patch_size
# compute the dimensions of the patches array
p_h, p_w = patch_size
n_patches = _compute_n_patches(i_h, i_w, p_h, p_w, self.max_patches)
patches_shape = (n_images * n_patches,) + patch_size
if n_channels > 1:
patches_shape += (n_channels,)
# extract the patches
patches = np.empty(patches_shape)
for ii, image in enumerate(X):
patches[ii * n_patches:(ii + 1) * n_patches] = extract_patches_2d(
image, patch_size, self.max_patches, self.random_state)
return patches
| bsd-3-clause |
ishank08/scikit-learn | sklearn/ensemble/forest.py | 11 | 67127 | """Forest of trees-based ensemble methods
Those methods include random forests and extremely randomized trees.
The module structure is the following:
- The ``BaseForest`` base class implements a common ``fit`` method for all
the estimators in the module. The ``fit`` method of the base ``Forest``
class calls the ``fit`` method of each sub-estimator on random samples
(with replacement, a.k.a. bootstrap) of the training set.
The init of the sub-estimator is further delegated to the
``BaseEnsemble`` constructor.
- The ``ForestClassifier`` and ``ForestRegressor`` base classes further
implement the prediction logic by computing an average of the predicted
outcomes of the sub-estimators.
- The ``RandomForestClassifier`` and ``RandomForestRegressor`` derived
classes provide the user with concrete implementations of
the forest ensemble method using classical, deterministic
``DecisionTreeClassifier`` and ``DecisionTreeRegressor`` as
sub-estimator implementations.
- The ``ExtraTreesClassifier`` and ``ExtraTreesRegressor`` derived
classes provide the user with concrete implementations of the
forest ensemble method using the extremely randomized trees
``ExtraTreeClassifier`` and ``ExtraTreeRegressor`` as
sub-estimator implementations.
Single and multi-output problems are both handled.
"""
# Authors: Gilles Louppe <[email protected]>
# Brian Holt <[email protected]>
# Joly Arnaud <[email protected]>
# Fares Hedayati <[email protected]>
#
# License: BSD 3 clause
from __future__ import division
import warnings
from warnings import warn
from abc import ABCMeta, abstractmethod
import numpy as np
from scipy.sparse import issparse
from scipy.sparse import hstack as sparse_hstack
from ..base import ClassifierMixin, RegressorMixin
from ..externals.joblib import Parallel, delayed
from ..externals import six
from ..metrics import r2_score
from ..preprocessing import OneHotEncoder
from ..tree import (DecisionTreeClassifier, DecisionTreeRegressor,
ExtraTreeClassifier, ExtraTreeRegressor)
from ..tree._tree import DTYPE, DOUBLE
from ..utils import check_random_state, check_array, compute_sample_weight
from ..exceptions import DataConversionWarning, NotFittedError
from .base import BaseEnsemble, _partition_estimators
from ..utils.fixes import bincount, parallel_helper
from ..utils.multiclass import check_classification_targets
from ..utils.validation import check_is_fitted
__all__ = ["RandomForestClassifier",
"RandomForestRegressor",
"ExtraTreesClassifier",
"ExtraTreesRegressor",
"RandomTreesEmbedding"]
MAX_INT = np.iinfo(np.int32).max
def _generate_sample_indices(random_state, n_samples):
"""Private function used to _parallel_build_trees function."""
random_instance = check_random_state(random_state)
sample_indices = random_instance.randint(0, n_samples, n_samples)
return sample_indices
def _generate_unsampled_indices(random_state, n_samples):
"""Private function used to forest._set_oob_score function."""
sample_indices = _generate_sample_indices(random_state, n_samples)
sample_counts = bincount(sample_indices, minlength=n_samples)
unsampled_mask = sample_counts == 0
indices_range = np.arange(n_samples)
unsampled_indices = indices_range[unsampled_mask]
return unsampled_indices
def _parallel_build_trees(tree, forest, X, y, sample_weight, tree_idx, n_trees,
verbose=0, class_weight=None):
"""Private function used to fit a single tree in parallel."""
if verbose > 1:
print("building tree %d of %d" % (tree_idx + 1, n_trees))
if forest.bootstrap:
n_samples = X.shape[0]
if sample_weight is None:
curr_sample_weight = np.ones((n_samples,), dtype=np.float64)
else:
curr_sample_weight = sample_weight.copy()
indices = _generate_sample_indices(tree.random_state, n_samples)
sample_counts = bincount(indices, minlength=n_samples)
curr_sample_weight *= sample_counts
if class_weight == 'subsample':
with warnings.catch_warnings():
warnings.simplefilter('ignore', DeprecationWarning)
curr_sample_weight *= compute_sample_weight('auto', y, indices)
elif class_weight == 'balanced_subsample':
curr_sample_weight *= compute_sample_weight('balanced', y, indices)
tree.fit(X, y, sample_weight=curr_sample_weight, check_input=False)
else:
tree.fit(X, y, sample_weight=sample_weight, check_input=False)
return tree
class BaseForest(six.with_metaclass(ABCMeta, BaseEnsemble)):
"""Base class for forests of trees.
Warning: This class should not be used directly. Use derived classes
instead.
"""
@abstractmethod
def __init__(self,
base_estimator,
n_estimators=10,
estimator_params=tuple(),
bootstrap=False,
oob_score=False,
n_jobs=1,
random_state=None,
verbose=0,
warm_start=False,
class_weight=None):
super(BaseForest, self).__init__(
base_estimator=base_estimator,
n_estimators=n_estimators,
estimator_params=estimator_params)
self.bootstrap = bootstrap
self.oob_score = oob_score
self.n_jobs = n_jobs
self.random_state = random_state
self.verbose = verbose
self.warm_start = warm_start
self.class_weight = class_weight
def apply(self, X):
"""Apply trees in the forest to X, return leaf indices.
Parameters
----------
X : array-like or sparse matrix, shape = [n_samples, n_features]
The input samples. Internally, its dtype will be converted to
``dtype=np.float32``. If a sparse matrix is provided, it will be
converted into a sparse ``csr_matrix``.
Returns
-------
X_leaves : array_like, shape = [n_samples, n_estimators]
For each datapoint x in X and for each tree in the forest,
return the index of the leaf x ends up in.
"""
X = self._validate_X_predict(X)
results = Parallel(n_jobs=self.n_jobs, verbose=self.verbose,
backend="threading")(
delayed(parallel_helper)(tree, 'apply', X, check_input=False)
for tree in self.estimators_)
return np.array(results).T
def decision_path(self, X):
"""Return the decision path in the forest
.. versionadded:: 0.18
Parameters
----------
X : array-like or sparse matrix, shape = [n_samples, n_features]
The input samples. Internally, its dtype will be converted to
``dtype=np.float32``. If a sparse matrix is provided, it will be
converted into a sparse ``csr_matrix``.
Returns
-------
indicator : sparse csr array, shape = [n_samples, n_nodes]
Return a node indicator matrix where non zero elements
indicates that the samples goes through the nodes.
n_nodes_ptr : array of size (n_estimators + 1, )
The columns from indicator[n_nodes_ptr[i]:n_nodes_ptr[i+1]]
gives the indicator value for the i-th estimator.
"""
X = self._validate_X_predict(X)
indicators = Parallel(n_jobs=self.n_jobs, verbose=self.verbose,
backend="threading")(
delayed(parallel_helper)(tree, 'decision_path', X,
check_input=False)
for tree in self.estimators_)
n_nodes = [0]
n_nodes.extend([i.shape[1] for i in indicators])
n_nodes_ptr = np.array(n_nodes).cumsum()
return sparse_hstack(indicators).tocsr(), n_nodes_ptr
def fit(self, X, y, sample_weight=None):
"""Build a forest of trees from the training set (X, y).
Parameters
----------
X : array-like or sparse matrix of shape = [n_samples, n_features]
The training input samples. Internally, its dtype will be converted to
``dtype=np.float32``. If a sparse matrix is provided, it will be
converted into a sparse ``csc_matrix``.
y : array-like, shape = [n_samples] or [n_samples, n_outputs]
The target values (class labels in classification, real numbers in
regression).
sample_weight : array-like, shape = [n_samples] or None
Sample weights. If None, then samples are equally weighted. Splits
that would create child nodes with net zero or negative weight are
ignored while searching for a split in each node. In the case of
classification, splits are also ignored if they would result in any
single class carrying a negative weight in either child node.
Returns
-------
self : object
Returns self.
"""
# Validate or convert input data
X = check_array(X, accept_sparse="csc", dtype=DTYPE)
y = check_array(y, accept_sparse='csc', ensure_2d=False, dtype=None)
if sample_weight is not None:
sample_weight = check_array(sample_weight, ensure_2d=False)
if issparse(X):
# Pre-sort indices to avoid that each individual tree of the
# ensemble sorts the indices.
X.sort_indices()
# Remap output
n_samples, self.n_features_ = X.shape
y = np.atleast_1d(y)
if y.ndim == 2 and y.shape[1] == 1:
warn("A column-vector y was passed when a 1d array was"
" expected. Please change the shape of y to "
"(n_samples,), for example using ravel().",
DataConversionWarning, stacklevel=2)
if y.ndim == 1:
# reshape is necessary to preserve the data contiguity against vs
# [:, np.newaxis] that does not.
y = np.reshape(y, (-1, 1))
self.n_outputs_ = y.shape[1]
y, expanded_class_weight = self._validate_y_class_weight(y)
if getattr(y, "dtype", None) != DOUBLE or not y.flags.contiguous:
y = np.ascontiguousarray(y, dtype=DOUBLE)
if expanded_class_weight is not None:
if sample_weight is not None:
sample_weight = sample_weight * expanded_class_weight
else:
sample_weight = expanded_class_weight
# Check parameters
self._validate_estimator()
if not self.bootstrap and self.oob_score:
raise ValueError("Out of bag estimation only available"
" if bootstrap=True")
random_state = check_random_state(self.random_state)
if not self.warm_start or not hasattr(self, "estimators_"):
# Free allocated memory, if any
self.estimators_ = []
n_more_estimators = self.n_estimators - len(self.estimators_)
if n_more_estimators < 0:
raise ValueError('n_estimators=%d must be larger or equal to '
'len(estimators_)=%d when warm_start==True'
% (self.n_estimators, len(self.estimators_)))
elif n_more_estimators == 0:
warn("Warm-start fitting without increasing n_estimators does not "
"fit new trees.")
else:
if self.warm_start and len(self.estimators_) > 0:
# We draw from the random state to get the random state we
# would have got if we hadn't used a warm_start.
random_state.randint(MAX_INT, size=len(self.estimators_))
trees = []
for i in range(n_more_estimators):
tree = self._make_estimator(append=False,
random_state=random_state)
trees.append(tree)
# Parallel loop: we use the threading backend as the Cython code
# for fitting the trees is internally releasing the Python GIL
# making threading always more efficient than multiprocessing in
# that case.
trees = Parallel(n_jobs=self.n_jobs, verbose=self.verbose,
backend="threading")(
delayed(_parallel_build_trees)(
t, self, X, y, sample_weight, i, len(trees),
verbose=self.verbose, class_weight=self.class_weight)
for i, t in enumerate(trees))
# Collect newly grown trees
self.estimators_.extend(trees)
if self.oob_score:
self._set_oob_score(X, y)
# Decapsulate classes_ attributes
if hasattr(self, "classes_") and self.n_outputs_ == 1:
self.n_classes_ = self.n_classes_[0]
self.classes_ = self.classes_[0]
return self
@abstractmethod
def _set_oob_score(self, X, y):
"""Calculate out of bag predictions and score."""
def _validate_y_class_weight(self, y):
# Default implementation
return y, None
def _validate_X_predict(self, X):
"""Validate X whenever one tries to predict, apply, predict_proba"""
if self.estimators_ is None or len(self.estimators_) == 0:
raise NotFittedError("Estimator not fitted, "
"call `fit` before exploiting the model.")
return self.estimators_[0]._validate_X_predict(X, check_input=True)
@property
def feature_importances_(self):
"""Return the feature importances (the higher, the more important the
feature).
Returns
-------
feature_importances_ : array, shape = [n_features]
"""
check_is_fitted(self, 'estimators_')
all_importances = Parallel(n_jobs=self.n_jobs,
backend="threading")(
delayed(getattr)(tree, 'feature_importances_')
for tree in self.estimators_)
return sum(all_importances) / len(self.estimators_)
class ForestClassifier(six.with_metaclass(ABCMeta, BaseForest,
ClassifierMixin)):
"""Base class for forest of trees-based classifiers.
Warning: This class should not be used directly. Use derived classes
instead.
"""
@abstractmethod
def __init__(self,
base_estimator,
n_estimators=10,
estimator_params=tuple(),
bootstrap=False,
oob_score=False,
n_jobs=1,
random_state=None,
verbose=0,
warm_start=False,
class_weight=None):
super(ForestClassifier, self).__init__(
base_estimator,
n_estimators=n_estimators,
estimator_params=estimator_params,
bootstrap=bootstrap,
oob_score=oob_score,
n_jobs=n_jobs,
random_state=random_state,
verbose=verbose,
warm_start=warm_start,
class_weight=class_weight)
def _set_oob_score(self, X, y):
"""Compute out-of-bag score"""
X = check_array(X, dtype=DTYPE, accept_sparse='csr')
n_classes_ = self.n_classes_
n_samples = y.shape[0]
oob_decision_function = []
oob_score = 0.0
predictions = []
for k in range(self.n_outputs_):
predictions.append(np.zeros((n_samples, n_classes_[k])))
for estimator in self.estimators_:
unsampled_indices = _generate_unsampled_indices(
estimator.random_state, n_samples)
p_estimator = estimator.predict_proba(X[unsampled_indices, :],
check_input=False)
if self.n_outputs_ == 1:
p_estimator = [p_estimator]
for k in range(self.n_outputs_):
predictions[k][unsampled_indices, :] += p_estimator[k]
for k in range(self.n_outputs_):
if (predictions[k].sum(axis=1) == 0).any():
warn("Some inputs do not have OOB scores. "
"This probably means too few trees were used "
"to compute any reliable oob estimates.")
decision = (predictions[k] /
predictions[k].sum(axis=1)[:, np.newaxis])
oob_decision_function.append(decision)
oob_score += np.mean(y[:, k] ==
np.argmax(predictions[k], axis=1), axis=0)
if self.n_outputs_ == 1:
self.oob_decision_function_ = oob_decision_function[0]
else:
self.oob_decision_function_ = oob_decision_function
self.oob_score_ = oob_score / self.n_outputs_
def _validate_y_class_weight(self, y):
check_classification_targets(y)
y = np.copy(y)
expanded_class_weight = None
if self.class_weight is not None:
y_original = np.copy(y)
self.classes_ = []
self.n_classes_ = []
y_store_unique_indices = np.zeros(y.shape, dtype=np.int)
for k in range(self.n_outputs_):
classes_k, y_store_unique_indices[:, k] = np.unique(y[:, k], return_inverse=True)
self.classes_.append(classes_k)
self.n_classes_.append(classes_k.shape[0])
y = y_store_unique_indices
if self.class_weight is not None:
valid_presets = ('balanced', 'balanced_subsample')
if isinstance(self.class_weight, six.string_types):
if self.class_weight not in valid_presets:
raise ValueError('Valid presets for class_weight include '
'"balanced" and "balanced_subsample". Given "%s".'
% self.class_weight)
if self.warm_start:
warn('class_weight presets "balanced" or "balanced_subsample" are '
'not recommended for warm_start if the fitted data '
'differs from the full dataset. In order to use '
'"balanced" weights, use compute_class_weight("balanced", '
'classes, y). In place of y you can use a large '
'enough sample of the full training set target to '
'properly estimate the class frequency '
'distributions. Pass the resulting weights as the '
'class_weight parameter.')
if (self.class_weight != 'balanced_subsample' or
not self.bootstrap):
if self.class_weight == "balanced_subsample":
class_weight = "balanced"
else:
class_weight = self.class_weight
expanded_class_weight = compute_sample_weight(class_weight,
y_original)
return y, expanded_class_weight
def predict(self, X):
"""Predict class for X.
The predicted class of an input sample is a vote by the trees in
the forest, weighted by their probability estimates. That is,
the predicted class is the one with highest mean probability
estimate across the trees.
Parameters
----------
X : array-like or sparse matrix of shape = [n_samples, n_features]
The input samples. Internally, its dtype will be converted to
``dtype=np.float32``. If a sparse matrix is provided, it will be
converted into a sparse ``csr_matrix``.
Returns
-------
y : array of shape = [n_samples] or [n_samples, n_outputs]
The predicted classes.
"""
proba = self.predict_proba(X)
if self.n_outputs_ == 1:
return self.classes_.take(np.argmax(proba, axis=1), axis=0)
else:
n_samples = proba[0].shape[0]
predictions = np.zeros((n_samples, self.n_outputs_))
for k in range(self.n_outputs_):
predictions[:, k] = self.classes_[k].take(np.argmax(proba[k],
axis=1),
axis=0)
return predictions
def predict_proba(self, X):
"""Predict class probabilities for X.
The predicted class probabilities of an input sample are computed as
the mean predicted class probabilities of the trees in the forest. The
class probability of a single tree is the fraction of samples of the same
class in a leaf.
Parameters
----------
X : array-like or sparse matrix of shape = [n_samples, n_features]
The input samples. Internally, its dtype will be converted to
``dtype=np.float32``. If a sparse matrix is provided, it will be
converted into a sparse ``csr_matrix``.
Returns
-------
p : array of shape = [n_samples, n_classes], or a list of n_outputs
such arrays if n_outputs > 1.
The class probabilities of the input samples. The order of the
classes corresponds to that in the attribute `classes_`.
"""
check_is_fitted(self, 'estimators_')
# Check data
X = self._validate_X_predict(X)
# Assign chunk of trees to jobs
n_jobs, _, _ = _partition_estimators(self.n_estimators, self.n_jobs)
# Parallel loop
all_proba = Parallel(n_jobs=n_jobs, verbose=self.verbose,
backend="threading")(
delayed(parallel_helper)(e, 'predict_proba', X,
check_input=False)
for e in self.estimators_)
# Reduce
proba = all_proba[0]
if self.n_outputs_ == 1:
for j in range(1, len(all_proba)):
proba += all_proba[j]
proba /= len(self.estimators_)
else:
for j in range(1, len(all_proba)):
for k in range(self.n_outputs_):
proba[k] += all_proba[j][k]
for k in range(self.n_outputs_):
proba[k] /= self.n_estimators
return proba
def predict_log_proba(self, X):
"""Predict class log-probabilities for X.
The predicted class log-probabilities of an input sample is computed as
the log of the mean predicted class probabilities of the trees in the
forest.
Parameters
----------
X : array-like or sparse matrix of shape = [n_samples, n_features]
The input samples. Internally, its dtype will be converted to
``dtype=np.float32``. If a sparse matrix is provided, it will be
converted into a sparse ``csr_matrix``.
Returns
-------
p : array of shape = [n_samples, n_classes], or a list of n_outputs
such arrays if n_outputs > 1.
The class probabilities of the input samples. The order of the
classes corresponds to that in the attribute `classes_`.
"""
proba = self.predict_proba(X)
if self.n_outputs_ == 1:
return np.log(proba)
else:
for k in range(self.n_outputs_):
proba[k] = np.log(proba[k])
return proba
class ForestRegressor(six.with_metaclass(ABCMeta, BaseForest, RegressorMixin)):
"""Base class for forest of trees-based regressors.
Warning: This class should not be used directly. Use derived classes
instead.
"""
@abstractmethod
def __init__(self,
base_estimator,
n_estimators=10,
estimator_params=tuple(),
bootstrap=False,
oob_score=False,
n_jobs=1,
random_state=None,
verbose=0,
warm_start=False):
super(ForestRegressor, self).__init__(
base_estimator,
n_estimators=n_estimators,
estimator_params=estimator_params,
bootstrap=bootstrap,
oob_score=oob_score,
n_jobs=n_jobs,
random_state=random_state,
verbose=verbose,
warm_start=warm_start)
def predict(self, X):
"""Predict regression target for X.
The predicted regression target of an input sample is computed as the
mean predicted regression targets of the trees in the forest.
Parameters
----------
X : array-like or sparse matrix of shape = [n_samples, n_features]
The input samples. Internally, its dtype will be converted to
``dtype=np.float32``. If a sparse matrix is provided, it will be
converted into a sparse ``csr_matrix``.
Returns
-------
y : array of shape = [n_samples] or [n_samples, n_outputs]
The predicted values.
"""
check_is_fitted(self, 'estimators_')
# Check data
X = self._validate_X_predict(X)
# Assign chunk of trees to jobs
n_jobs, _, _ = _partition_estimators(self.n_estimators, self.n_jobs)
# Parallel loop
all_y_hat = Parallel(n_jobs=n_jobs, verbose=self.verbose,
backend="threading")(
delayed(parallel_helper)(e, 'predict', X, check_input=False)
for e in self.estimators_)
# Reduce
y_hat = sum(all_y_hat) / len(self.estimators_)
return y_hat
def _set_oob_score(self, X, y):
"""Compute out-of-bag scores"""
X = check_array(X, dtype=DTYPE, accept_sparse='csr')
n_samples = y.shape[0]
predictions = np.zeros((n_samples, self.n_outputs_))
n_predictions = np.zeros((n_samples, self.n_outputs_))
for estimator in self.estimators_:
unsampled_indices = _generate_unsampled_indices(
estimator.random_state, n_samples)
p_estimator = estimator.predict(
X[unsampled_indices, :], check_input=False)
if self.n_outputs_ == 1:
p_estimator = p_estimator[:, np.newaxis]
predictions[unsampled_indices, :] += p_estimator
n_predictions[unsampled_indices, :] += 1
if (n_predictions == 0).any():
warn("Some inputs do not have OOB scores. "
"This probably means too few trees were used "
"to compute any reliable oob estimates.")
n_predictions[n_predictions == 0] = 1
predictions /= n_predictions
self.oob_prediction_ = predictions
if self.n_outputs_ == 1:
self.oob_prediction_ = \
self.oob_prediction_.reshape((n_samples, ))
self.oob_score_ = 0.0
for k in range(self.n_outputs_):
self.oob_score_ += r2_score(y[:, k],
predictions[:, k])
self.oob_score_ /= self.n_outputs_
class RandomForestClassifier(ForestClassifier):
"""A random forest classifier.
A random forest is a meta estimator that fits a number of decision tree
classifiers on various sub-samples of the dataset and use averaging to
improve the predictive accuracy and control over-fitting.
The sub-sample size is always the same as the original
input sample size but the samples are drawn with replacement if
`bootstrap=True` (default).
Read more in the :ref:`User Guide <forest>`.
Parameters
----------
n_estimators : integer, optional (default=10)
The number of trees in the forest.
criterion : string, optional (default="gini")
The function to measure the quality of a split. Supported criteria are
"gini" for the Gini impurity and "entropy" for the information gain.
Note: this parameter is tree-specific.
max_features : int, float, string or None, optional (default="auto")
The number of features to consider when looking for the best split:
- If int, then consider `max_features` features at each split.
- If float, then `max_features` is a percentage and
`int(max_features * n_features)` features are considered at each
split.
- If "auto", then `max_features=sqrt(n_features)`.
- If "sqrt", then `max_features=sqrt(n_features)` (same as "auto").
- If "log2", then `max_features=log2(n_features)`.
- If None, then `max_features=n_features`.
Note: the search for a split does not stop until at least one
valid partition of the node samples is found, even if it requires to
effectively inspect more than ``max_features`` features.
max_depth : integer or None, optional (default=None)
The maximum depth of the tree. If None, then nodes are expanded until
all leaves are pure or until all leaves contain less than
min_samples_split samples.
min_samples_split : int, float, optional (default=2)
The minimum number of samples required to split an internal node:
- If int, then consider `min_samples_split` as the minimum number.
- If float, then `min_samples_split` is a percentage and
`ceil(min_samples_split * n_samples)` are the minimum
number of samples for each split.
.. versionchanged:: 0.18
Added float values for percentages.
min_samples_leaf : int, float, optional (default=1)
The minimum number of samples required to be at a leaf node:
- If int, then consider `min_samples_leaf` as the minimum number.
- If float, then `min_samples_leaf` is a percentage and
`ceil(min_samples_leaf * n_samples)` are the minimum
number of samples for each node.
.. versionchanged:: 0.18
Added float values for percentages.
min_weight_fraction_leaf : float, optional (default=0.)
The minimum weighted fraction of the sum total of weights (of all
the input samples) required to be at a leaf node. Samples have
equal weight when sample_weight is not provided.
max_leaf_nodes : int or None, optional (default=None)
Grow trees with ``max_leaf_nodes`` in best-first fashion.
Best nodes are defined as relative reduction in impurity.
If None then unlimited number of leaf nodes.
min_impurity_split : float, optional (default=1e-7)
Threshold for early stopping in tree growth. A node will split
if its impurity is above the threshold, otherwise it is a leaf.
.. versionadded:: 0.18
bootstrap : boolean, optional (default=True)
Whether bootstrap samples are used when building trees.
oob_score : bool (default=False)
Whether to use out-of-bag samples to estimate
the generalization accuracy.
n_jobs : integer, optional (default=1)
The number of jobs to run in parallel for both `fit` and `predict`.
If -1, then the number of jobs is set to the number of cores.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
verbose : int, optional (default=0)
Controls the verbosity of the tree building process.
warm_start : bool, optional (default=False)
When set to ``True``, reuse the solution of the previous call to fit
and add more estimators to the ensemble, otherwise, just fit a whole
new forest.
class_weight : dict, list of dicts, "balanced",
"balanced_subsample" or None, optional (default=None)
Weights associated with classes in the form ``{class_label: weight}``.
If not given, all classes are supposed to have weight one. For
multi-output problems, a list of dicts can be provided in the same
order as the columns of y.
The "balanced" mode uses the values of y to automatically adjust
weights inversely proportional to class frequencies in the input data
as ``n_samples / (n_classes * np.bincount(y))``
The "balanced_subsample" mode is the same as "balanced" except that
weights are computed based on the bootstrap sample for every tree
grown.
For multi-output, the weights of each column of y will be multiplied.
Note that these weights will be multiplied with sample_weight (passed
through the fit method) if sample_weight is specified.
Attributes
----------
estimators_ : list of DecisionTreeClassifier
The collection of fitted sub-estimators.
classes_ : array of shape = [n_classes] or a list of such arrays
The classes labels (single output problem), or a list of arrays of
class labels (multi-output problem).
n_classes_ : int or list
The number of classes (single output problem), or a list containing the
number of classes for each output (multi-output problem).
n_features_ : int
The number of features when ``fit`` is performed.
n_outputs_ : int
The number of outputs when ``fit`` is performed.
feature_importances_ : array of shape = [n_features]
The feature importances (the higher, the more important the feature).
oob_score_ : float
Score of the training dataset obtained using an out-of-bag estimate.
oob_decision_function_ : array of shape = [n_samples, n_classes]
Decision function computed with out-of-bag estimate on the training
set. If n_estimators is small it might be possible that a data point
was never left out during the bootstrap. In this case,
`oob_decision_function_` might contain NaN.
References
----------
.. [1] L. Breiman, "Random Forests", Machine Learning, 45(1), 5-32, 2001.
See also
--------
DecisionTreeClassifier, ExtraTreesClassifier
"""
def __init__(self,
n_estimators=10,
criterion="gini",
max_depth=None,
min_samples_split=2,
min_samples_leaf=1,
min_weight_fraction_leaf=0.,
max_features="auto",
max_leaf_nodes=None,
min_impurity_split=1e-7,
bootstrap=True,
oob_score=False,
n_jobs=1,
random_state=None,
verbose=0,
warm_start=False,
class_weight=None):
super(RandomForestClassifier, self).__init__(
base_estimator=DecisionTreeClassifier(),
n_estimators=n_estimators,
estimator_params=("criterion", "max_depth", "min_samples_split",
"min_samples_leaf", "min_weight_fraction_leaf",
"max_features", "max_leaf_nodes", "min_impurity_split",
"random_state"),
bootstrap=bootstrap,
oob_score=oob_score,
n_jobs=n_jobs,
random_state=random_state,
verbose=verbose,
warm_start=warm_start,
class_weight=class_weight)
self.criterion = criterion
self.max_depth = max_depth
self.min_samples_split = min_samples_split
self.min_samples_leaf = min_samples_leaf
self.min_weight_fraction_leaf = min_weight_fraction_leaf
self.max_features = max_features
self.max_leaf_nodes = max_leaf_nodes
self.min_impurity_split = min_impurity_split
class RandomForestRegressor(ForestRegressor):
"""A random forest regressor.
A random forest is a meta estimator that fits a number of classifying
decision trees on various sub-samples of the dataset and use averaging
to improve the predictive accuracy and control over-fitting.
The sub-sample size is always the same as the original
input sample size but the samples are drawn with replacement if
`bootstrap=True` (default).
Read more in the :ref:`User Guide <forest>`.
Parameters
----------
n_estimators : integer, optional (default=10)
The number of trees in the forest.
criterion : string, optional (default="mse")
The function to measure the quality of a split. Supported criteria
are "mse" for the mean squared error, which is equal to variance
reduction as feature selection criterion, and "mae" for the mean
absolute error.
.. versionadded:: 0.18
Mean Absolute Error (MAE) criterion.
max_features : int, float, string or None, optional (default="auto")
The number of features to consider when looking for the best split:
- If int, then consider `max_features` features at each split.
- If float, then `max_features` is a percentage and
`int(max_features * n_features)` features are considered at each
split.
- If "auto", then `max_features=n_features`.
- If "sqrt", then `max_features=sqrt(n_features)`.
- If "log2", then `max_features=log2(n_features)`.
- If None, then `max_features=n_features`.
Note: the search for a split does not stop until at least one
valid partition of the node samples is found, even if it requires to
effectively inspect more than ``max_features`` features.
max_depth : integer or None, optional (default=None)
The maximum depth of the tree. If None, then nodes are expanded until
all leaves are pure or until all leaves contain less than
min_samples_split samples.
min_samples_split : int, float, optional (default=2)
The minimum number of samples required to split an internal node:
- If int, then consider `min_samples_split` as the minimum number.
- If float, then `min_samples_split` is a percentage and
`ceil(min_samples_split * n_samples)` are the minimum
number of samples for each split.
.. versionchanged:: 0.18
Added float values for percentages.
min_samples_leaf : int, float, optional (default=1)
The minimum number of samples required to be at a leaf node:
- If int, then consider `min_samples_leaf` as the minimum number.
- If float, then `min_samples_leaf` is a percentage and
`ceil(min_samples_leaf * n_samples)` are the minimum
number of samples for each node.
.. versionchanged:: 0.18
Added float values for percentages.
min_weight_fraction_leaf : float, optional (default=0.)
The minimum weighted fraction of the sum total of weights (of all
the input samples) required to be at a leaf node. Samples have
equal weight when sample_weight is not provided.
max_leaf_nodes : int or None, optional (default=None)
Grow trees with ``max_leaf_nodes`` in best-first fashion.
Best nodes are defined as relative reduction in impurity.
If None then unlimited number of leaf nodes.
min_impurity_split : float, optional (default=1e-7)
Threshold for early stopping in tree growth. A node will split
if its impurity is above the threshold, otherwise it is a leaf.
.. versionadded:: 0.18
bootstrap : boolean, optional (default=True)
Whether bootstrap samples are used when building trees.
oob_score : bool, optional (default=False)
whether to use out-of-bag samples to estimate
the R^2 on unseen data.
n_jobs : integer, optional (default=1)
The number of jobs to run in parallel for both `fit` and `predict`.
If -1, then the number of jobs is set to the number of cores.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
verbose : int, optional (default=0)
Controls the verbosity of the tree building process.
warm_start : bool, optional (default=False)
When set to ``True``, reuse the solution of the previous call to fit
and add more estimators to the ensemble, otherwise, just fit a whole
new forest.
Attributes
----------
estimators_ : list of DecisionTreeRegressor
The collection of fitted sub-estimators.
feature_importances_ : array of shape = [n_features]
The feature importances (the higher, the more important the feature).
n_features_ : int
The number of features when ``fit`` is performed.
n_outputs_ : int
The number of outputs when ``fit`` is performed.
oob_score_ : float
Score of the training dataset obtained using an out-of-bag estimate.
oob_prediction_ : array of shape = [n_samples]
Prediction computed with out-of-bag estimate on the training set.
References
----------
.. [1] L. Breiman, "Random Forests", Machine Learning, 45(1), 5-32, 2001.
See also
--------
DecisionTreeRegressor, ExtraTreesRegressor
"""
def __init__(self,
n_estimators=10,
criterion="mse",
max_depth=None,
min_samples_split=2,
min_samples_leaf=1,
min_weight_fraction_leaf=0.,
max_features="auto",
max_leaf_nodes=None,
min_impurity_split=1e-7,
bootstrap=True,
oob_score=False,
n_jobs=1,
random_state=None,
verbose=0,
warm_start=False):
super(RandomForestRegressor, self).__init__(
base_estimator=DecisionTreeRegressor(),
n_estimators=n_estimators,
estimator_params=("criterion", "max_depth", "min_samples_split",
"min_samples_leaf", "min_weight_fraction_leaf",
"max_features", "max_leaf_nodes", "min_impurity_split",
"random_state"),
bootstrap=bootstrap,
oob_score=oob_score,
n_jobs=n_jobs,
random_state=random_state,
verbose=verbose,
warm_start=warm_start)
self.criterion = criterion
self.max_depth = max_depth
self.min_samples_split = min_samples_split
self.min_samples_leaf = min_samples_leaf
self.min_weight_fraction_leaf = min_weight_fraction_leaf
self.max_features = max_features
self.max_leaf_nodes = max_leaf_nodes
self.min_impurity_split = min_impurity_split
class ExtraTreesClassifier(ForestClassifier):
"""An extra-trees classifier.
This class implements a meta estimator that fits a number of
randomized decision trees (a.k.a. extra-trees) on various sub-samples
of the dataset and use averaging to improve the predictive accuracy
and control over-fitting.
Read more in the :ref:`User Guide <forest>`.
Parameters
----------
n_estimators : integer, optional (default=10)
The number of trees in the forest.
criterion : string, optional (default="gini")
The function to measure the quality of a split. Supported criteria are
"gini" for the Gini impurity and "entropy" for the information gain.
max_features : int, float, string or None, optional (default="auto")
The number of features to consider when looking for the best split:
- If int, then consider `max_features` features at each split.
- If float, then `max_features` is a percentage and
`int(max_features * n_features)` features are considered at each
split.
- If "auto", then `max_features=sqrt(n_features)`.
- If "sqrt", then `max_features=sqrt(n_features)`.
- If "log2", then `max_features=log2(n_features)`.
- If None, then `max_features=n_features`.
Note: the search for a split does not stop until at least one
valid partition of the node samples is found, even if it requires to
effectively inspect more than ``max_features`` features.
max_depth : integer or None, optional (default=None)
The maximum depth of the tree. If None, then nodes are expanded until
all leaves are pure or until all leaves contain less than
min_samples_split samples.
min_samples_split : int, float, optional (default=2)
The minimum number of samples required to split an internal node:
- If int, then consider `min_samples_split` as the minimum number.
- If float, then `min_samples_split` is a percentage and
`ceil(min_samples_split * n_samples)` are the minimum
number of samples for each split.
.. versionchanged:: 0.18
Added float values for percentages.
min_samples_leaf : int, float, optional (default=1)
The minimum number of samples required to be at a leaf node:
- If int, then consider `min_samples_leaf` as the minimum number.
- If float, then `min_samples_leaf` is a percentage and
`ceil(min_samples_leaf * n_samples)` are the minimum
number of samples for each node.
.. versionchanged:: 0.18
Added float values for percentages.
min_weight_fraction_leaf : float, optional (default=0.)
The minimum weighted fraction of the sum total of weights (of all
the input samples) required to be at a leaf node. Samples have
equal weight when sample_weight is not provided.
max_leaf_nodes : int or None, optional (default=None)
Grow trees with ``max_leaf_nodes`` in best-first fashion.
Best nodes are defined as relative reduction in impurity.
If None then unlimited number of leaf nodes.
min_impurity_split : float, optional (default=1e-7)
Threshold for early stopping in tree growth. A node will split
if its impurity is above the threshold, otherwise it is a leaf.
.. versionadded:: 0.18
bootstrap : boolean, optional (default=False)
Whether bootstrap samples are used when building trees.
oob_score : bool, optional (default=False)
Whether to use out-of-bag samples to estimate
the generalization accuracy.
n_jobs : integer, optional (default=1)
The number of jobs to run in parallel for both `fit` and `predict`.
If -1, then the number of jobs is set to the number of cores.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
verbose : int, optional (default=0)
Controls the verbosity of the tree building process.
warm_start : bool, optional (default=False)
When set to ``True``, reuse the solution of the previous call to fit
and add more estimators to the ensemble, otherwise, just fit a whole
new forest.
class_weight : dict, list of dicts, "balanced", "balanced_subsample" or None, optional (default=None)
Weights associated with classes in the form ``{class_label: weight}``.
If not given, all classes are supposed to have weight one. For
multi-output problems, a list of dicts can be provided in the same
order as the columns of y.
The "balanced" mode uses the values of y to automatically adjust
weights inversely proportional to class frequencies in the input data
as ``n_samples / (n_classes * np.bincount(y))``
The "balanced_subsample" mode is the same as "balanced" except that weights are
computed based on the bootstrap sample for every tree grown.
For multi-output, the weights of each column of y will be multiplied.
Note that these weights will be multiplied with sample_weight (passed
through the fit method) if sample_weight is specified.
Attributes
----------
estimators_ : list of DecisionTreeClassifier
The collection of fitted sub-estimators.
classes_ : array of shape = [n_classes] or a list of such arrays
The classes labels (single output problem), or a list of arrays of
class labels (multi-output problem).
n_classes_ : int or list
The number of classes (single output problem), or a list containing the
number of classes for each output (multi-output problem).
feature_importances_ : array of shape = [n_features]
The feature importances (the higher, the more important the feature).
n_features_ : int
The number of features when ``fit`` is performed.
n_outputs_ : int
The number of outputs when ``fit`` is performed.
oob_score_ : float
Score of the training dataset obtained using an out-of-bag estimate.
oob_decision_function_ : array of shape = [n_samples, n_classes]
Decision function computed with out-of-bag estimate on the training
set. If n_estimators is small it might be possible that a data point
was never left out during the bootstrap. In this case,
`oob_decision_function_` might contain NaN.
References
----------
.. [1] P. Geurts, D. Ernst., and L. Wehenkel, "Extremely randomized trees",
Machine Learning, 63(1), 3-42, 2006.
See also
--------
sklearn.tree.ExtraTreeClassifier : Base classifier for this ensemble.
RandomForestClassifier : Ensemble Classifier based on trees with optimal
splits.
"""
def __init__(self,
n_estimators=10,
criterion="gini",
max_depth=None,
min_samples_split=2,
min_samples_leaf=1,
min_weight_fraction_leaf=0.,
max_features="auto",
max_leaf_nodes=None,
min_impurity_split=1e-7,
bootstrap=False,
oob_score=False,
n_jobs=1,
random_state=None,
verbose=0,
warm_start=False,
class_weight=None):
super(ExtraTreesClassifier, self).__init__(
base_estimator=ExtraTreeClassifier(),
n_estimators=n_estimators,
estimator_params=("criterion", "max_depth", "min_samples_split",
"min_samples_leaf", "min_weight_fraction_leaf",
"max_features", "max_leaf_nodes", "min_impurity_split",
"random_state"),
bootstrap=bootstrap,
oob_score=oob_score,
n_jobs=n_jobs,
random_state=random_state,
verbose=verbose,
warm_start=warm_start,
class_weight=class_weight)
self.criterion = criterion
self.max_depth = max_depth
self.min_samples_split = min_samples_split
self.min_samples_leaf = min_samples_leaf
self.min_weight_fraction_leaf = min_weight_fraction_leaf
self.max_features = max_features
self.max_leaf_nodes = max_leaf_nodes
self.min_impurity_split = min_impurity_split
class ExtraTreesRegressor(ForestRegressor):
"""An extra-trees regressor.
This class implements a meta estimator that fits a number of
randomized decision trees (a.k.a. extra-trees) on various sub-samples
of the dataset and use averaging to improve the predictive accuracy
and control over-fitting.
Read more in the :ref:`User Guide <forest>`.
Parameters
----------
n_estimators : integer, optional (default=10)
The number of trees in the forest.
criterion : string, optional (default="mse")
The function to measure the quality of a split. Supported criteria
are "mse" for the mean squared error, which is equal to variance
reduction as feature selection criterion, and "mae" for the mean
absolute error.
.. versionadded:: 0.18
Mean Absolute Error (MAE) criterion.
max_features : int, float, string or None, optional (default="auto")
The number of features to consider when looking for the best split:
- If int, then consider `max_features` features at each split.
- If float, then `max_features` is a percentage and
`int(max_features * n_features)` features are considered at each
split.
- If "auto", then `max_features=n_features`.
- If "sqrt", then `max_features=sqrt(n_features)`.
- If "log2", then `max_features=log2(n_features)`.
- If None, then `max_features=n_features`.
Note: the search for a split does not stop until at least one
valid partition of the node samples is found, even if it requires to
effectively inspect more than ``max_features`` features.
max_depth : integer or None, optional (default=None)
The maximum depth of the tree. If None, then nodes are expanded until
all leaves are pure or until all leaves contain less than
min_samples_split samples.
min_samples_split : int, float, optional (default=2)
The minimum number of samples required to split an internal node:
- If int, then consider `min_samples_split` as the minimum number.
- If float, then `min_samples_split` is a percentage and
`ceil(min_samples_split * n_samples)` are the minimum
number of samples for each split.
.. versionchanged:: 0.18
Added float values for percentages.
min_samples_leaf : int, float, optional (default=1)
The minimum number of samples required to be at a leaf node:
- If int, then consider `min_samples_leaf` as the minimum number.
- If float, then `min_samples_leaf` is a percentage and
`ceil(min_samples_leaf * n_samples)` are the minimum
number of samples for each node.
.. versionchanged:: 0.18
Added float values for percentages.
min_weight_fraction_leaf : float, optional (default=0.)
The minimum weighted fraction of the sum total of weights (of all
the input samples) required to be at a leaf node. Samples have
equal weight when sample_weight is not provided.
max_leaf_nodes : int or None, optional (default=None)
Grow trees with ``max_leaf_nodes`` in best-first fashion.
Best nodes are defined as relative reduction in impurity.
If None then unlimited number of leaf nodes.
min_impurity_split : float, optional (default=1e-7)
Threshold for early stopping in tree growth. A node will split
if its impurity is above the threshold, otherwise it is a leaf.
.. versionadded:: 0.18
bootstrap : boolean, optional (default=False)
Whether bootstrap samples are used when building trees.
oob_score : bool, optional (default=False)
Whether to use out-of-bag samples to estimate the R^2 on unseen data.
n_jobs : integer, optional (default=1)
The number of jobs to run in parallel for both `fit` and `predict`.
If -1, then the number of jobs is set to the number of cores.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
verbose : int, optional (default=0)
Controls the verbosity of the tree building process.
warm_start : bool, optional (default=False)
When set to ``True``, reuse the solution of the previous call to fit
and add more estimators to the ensemble, otherwise, just fit a whole
new forest.
Attributes
----------
estimators_ : list of DecisionTreeRegressor
The collection of fitted sub-estimators.
feature_importances_ : array of shape = [n_features]
The feature importances (the higher, the more important the feature).
n_features_ : int
The number of features.
n_outputs_ : int
The number of outputs.
oob_score_ : float
Score of the training dataset obtained using an out-of-bag estimate.
oob_prediction_ : array of shape = [n_samples]
Prediction computed with out-of-bag estimate on the training set.
References
----------
.. [1] P. Geurts, D. Ernst., and L. Wehenkel, "Extremely randomized trees",
Machine Learning, 63(1), 3-42, 2006.
See also
--------
sklearn.tree.ExtraTreeRegressor: Base estimator for this ensemble.
RandomForestRegressor: Ensemble regressor using trees with optimal splits.
"""
def __init__(self,
n_estimators=10,
criterion="mse",
max_depth=None,
min_samples_split=2,
min_samples_leaf=1,
min_weight_fraction_leaf=0.,
max_features="auto",
max_leaf_nodes=None,
min_impurity_split=1e-7,
bootstrap=False,
oob_score=False,
n_jobs=1,
random_state=None,
verbose=0,
warm_start=False):
super(ExtraTreesRegressor, self).__init__(
base_estimator=ExtraTreeRegressor(),
n_estimators=n_estimators,
estimator_params=("criterion", "max_depth", "min_samples_split",
"min_samples_leaf", "min_weight_fraction_leaf",
"max_features", "max_leaf_nodes", "min_impurity_split",
"random_state"),
bootstrap=bootstrap,
oob_score=oob_score,
n_jobs=n_jobs,
random_state=random_state,
verbose=verbose,
warm_start=warm_start)
self.criterion = criterion
self.max_depth = max_depth
self.min_samples_split = min_samples_split
self.min_samples_leaf = min_samples_leaf
self.min_weight_fraction_leaf = min_weight_fraction_leaf
self.max_features = max_features
self.max_leaf_nodes = max_leaf_nodes
self.min_impurity_split = min_impurity_split
class RandomTreesEmbedding(BaseForest):
"""An ensemble of totally random trees.
An unsupervised transformation of a dataset to a high-dimensional
sparse representation. A datapoint is coded according to which leaf of
each tree it is sorted into. Using a one-hot encoding of the leaves,
this leads to a binary coding with as many ones as there are trees in
the forest.
The dimensionality of the resulting representation is
``n_out <= n_estimators * max_leaf_nodes``. If ``max_leaf_nodes == None``,
the number of leaf nodes is at most ``n_estimators * 2 ** max_depth``.
Read more in the :ref:`User Guide <random_trees_embedding>`.
Parameters
----------
n_estimators : integer, optional (default=10)
Number of trees in the forest.
max_depth : integer, optional (default=5)
The maximum depth of each tree. If None, then nodes are expanded until
all leaves are pure or until all leaves contain less than
min_samples_split samples.
min_samples_split : int, float, optional (default=2)
The minimum number of samples required to split an internal node:
- If int, then consider `min_samples_split` as the minimum number.
- If float, then `min_samples_split` is a percentage and
`ceil(min_samples_split * n_samples)` is the minimum
number of samples for each split.
.. versionchanged:: 0.18
Added float values for percentages.
min_samples_leaf : int, float, optional (default=1)
The minimum number of samples required to be at a leaf node:
- If int, then consider `min_samples_leaf` as the minimum number.
- If float, then `min_samples_leaf` is a percentage and
`ceil(min_samples_leaf * n_samples)` is the minimum
number of samples for each node.
.. versionchanged:: 0.18
Added float values for percentages.
min_weight_fraction_leaf : float, optional (default=0.)
The minimum weighted fraction of the sum total of weights (of all
the input samples) required to be at a leaf node. Samples have
equal weight when sample_weight is not provided.
max_leaf_nodes : int or None, optional (default=None)
Grow trees with ``max_leaf_nodes`` in best-first fashion.
Best nodes are defined as relative reduction in impurity.
If None then unlimited number of leaf nodes.
min_impurity_split : float, optional (default=1e-7)
Threshold for early stopping in tree growth. A node will split
if its impurity is above the threshold, otherwise it is a leaf.
.. versionadded:: 0.18
sparse_output : bool, optional (default=True)
Whether or not to return a sparse CSR matrix, as default behavior,
or to return a dense array compatible with dense pipeline operators.
n_jobs : integer, optional (default=1)
The number of jobs to run in parallel for both `fit` and `predict`.
If -1, then the number of jobs is set to the number of cores.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
verbose : int, optional (default=0)
Controls the verbosity of the tree building process.
warm_start : bool, optional (default=False)
When set to ``True``, reuse the solution of the previous call to fit
and add more estimators to the ensemble, otherwise, just fit a whole
new forest.
Attributes
----------
estimators_ : list of DecisionTreeClassifier
The collection of fitted sub-estimators.
References
----------
.. [1] P. Geurts, D. Ernst., and L. Wehenkel, "Extremely randomized trees",
Machine Learning, 63(1), 3-42, 2006.
.. [2] Moosmann, F. and Triggs, B. and Jurie, F. "Fast discriminative
visual codebooks using randomized clustering forests"
NIPS 2007
"""
def __init__(self,
n_estimators=10,
max_depth=5,
min_samples_split=2,
min_samples_leaf=1,
min_weight_fraction_leaf=0.,
max_leaf_nodes=None,
min_impurity_split=1e-7,
sparse_output=True,
n_jobs=1,
random_state=None,
verbose=0,
warm_start=False):
super(RandomTreesEmbedding, self).__init__(
base_estimator=ExtraTreeRegressor(),
n_estimators=n_estimators,
estimator_params=("criterion", "max_depth", "min_samples_split",
"min_samples_leaf", "min_weight_fraction_leaf",
"max_features", "max_leaf_nodes", "min_impurity_split",
"random_state"),
bootstrap=False,
oob_score=False,
n_jobs=n_jobs,
random_state=random_state,
verbose=verbose,
warm_start=warm_start)
self.criterion = 'mse'
self.max_depth = max_depth
self.min_samples_split = min_samples_split
self.min_samples_leaf = min_samples_leaf
self.min_weight_fraction_leaf = min_weight_fraction_leaf
self.max_features = 1
self.max_leaf_nodes = max_leaf_nodes
self.min_impurity_split = min_impurity_split
self.sparse_output = sparse_output
def _set_oob_score(self, X, y):
raise NotImplementedError("OOB score not supported by tree embedding")
def fit(self, X, y=None, sample_weight=None):
"""Fit estimator.
Parameters
----------
X : array-like or sparse matrix, shape=(n_samples, n_features)
The input samples. Use ``dtype=np.float32`` for maximum
efficiency. Sparse matrices are also supported, use sparse
``csc_matrix`` for maximum efficiency.
Returns
-------
self : object
Returns self.
"""
self.fit_transform(X, y, sample_weight=sample_weight)
return self
def fit_transform(self, X, y=None, sample_weight=None):
"""Fit estimator and transform dataset.
Parameters
----------
X : array-like or sparse matrix, shape=(n_samples, n_features)
Input data used to build forests. Use ``dtype=np.float32`` for
maximum efficiency.
Returns
-------
X_transformed : sparse matrix, shape=(n_samples, n_out)
Transformed dataset.
"""
X = check_array(X, accept_sparse=['csc'])
if issparse(X):
# Pre-sort indices to avoid that each individual tree of the
# ensemble sorts the indices.
X.sort_indices()
rnd = check_random_state(self.random_state)
y = rnd.uniform(size=X.shape[0])
super(RandomTreesEmbedding, self).fit(X, y,
sample_weight=sample_weight)
self.one_hot_encoder_ = OneHotEncoder(sparse=self.sparse_output)
return self.one_hot_encoder_.fit_transform(self.apply(X))
def transform(self, X):
"""Transform dataset.
Parameters
----------
X : array-like or sparse matrix, shape=(n_samples, n_features)
Input data to be transformed. Use ``dtype=np.float32`` for maximum
efficiency. Sparse matrices are also supported, use sparse
``csr_matrix`` for maximum efficiency.
Returns
-------
X_transformed : sparse matrix, shape=(n_samples, n_out)
Transformed dataset.
"""
return self.one_hot_encoder_.transform(self.apply(X))
| bsd-3-clause |
tobiasgehring/qudi | tools/Pulse_analysis_standalone.py | 4 | 8606 | # -*- coding: utf-8 -*-
"""
Created on Wed Aug 26 16:35:51 2015
This file contains a class for standalone analysis of fast counter data.
Qudi is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
Qudi is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with Qudi. If not, see <http://www.gnu.org/licenses/>.
Copyright (C) 2015 Nikolas Tomek [email protected]
"""
import numpy as np
from scipy import ndimage
from matplotlib.pyplot import plot
class PulseAnalysis():
def __init__(self):
self.is_counter_gated = False
# std. deviation of the gaussian filter.
#Too small and the filtered data is too noisy to analyze; too big and the pulse edges are filtered out...
self.conv_std_dev = 5
# set windows for signal and normalization of the laser pulses
self.signal_start_bin = 5
self.signal_width_bins = 200
self.norm_start_bin = 500
self.norm_width_bins = 200
# total number of laser pulses in the sequence
self.number_of_lasers = 50
# data arrays
self.tau_vector = np.array(range(50)) # tau values (x-axis)
self.signal_vector = np.zeros(self.number_of_lasers, dtype=float) # data points (y-axis)
self.laser_data = None # extracted laser pulses
def _gated_extraction(self, count_data):
""" This method detects the rising flank in the gated timetrace data and extracts just the laser pulses
@param 2D numpy.ndarray count_data: the raw timetrace data from a gated fast counter (dimensions 0: gate number, 1: time bin)
@return 2D numpy.ndarray: The extracted laser pulses of the timetrace (dimensions 0: laser number, 1: time bin)
"""
# sum up all gated timetraces to ease flank detection
timetrace_sum = np.sum(count_data, 0)
# apply gaussian filter to remove noise and compute the gradient of the timetrace sum
conv_deriv = self._convolve_derive(timetrace_sum, self.conv_std_dev)
# get indices of rising and falling flank
rising_ind = conv_deriv.argmax()
falling_ind = conv_deriv.argmin()
# slice the data array to cut off anything but laser pulses
laser_arr = count_data[:, rising_ind:falling_ind]
return laser_arr
def _ungated_extraction(self, count_data, num_of_lasers):
""" This method detects the laser pulses in the ungated timetrace data and extracts them
@param 1D numpy.ndarray count_data: the raw timetrace data from an ungated fast counter
@param int num_of_lasers: The total number of laser pulses inside the pulse sequence
@return 2D numpy.ndarray: The extracted laser pulses of the timetrace (dimensions 0: laser number, 1: time bin)
"""
# apply gaussian filter to remove noise and compute the gradient of the timetrace
conv_deriv = self._convolve_derive(count_data, self.conv_std_dev)
# initialize arrays to contain indices for all rising and falling flanks, respectively
rising_ind = np.empty([num_of_lasers],int)
falling_ind = np.empty([num_of_lasers],int)
# Find as many rising and falling flanks as there are laser pulses in the timetrace
for i in range(num_of_lasers):
# save the index of the absolute maximum of the derived timetrace as rising flank position
rising_ind[i] = np.argmax(conv_deriv)
# set this position and the sourrounding of the saved flank to 0 to avoid a second detection
if rising_ind[i] < 2*self.conv_std_dev:
del_ind_start = 0
else:
del_ind_start = rising_ind[i] - 2*self.conv_std_dev
if (conv_deriv.size - rising_ind[i]) < 2*self.conv_std_dev:
del_ind_stop = conv_deriv.size-1
else:
del_ind_stop = rising_ind[i] + 2*self.conv_std_dev
conv_deriv[del_ind_start:del_ind_stop] = 0
# save the index of the absolute minimum of the derived timetrace as falling flank position
falling_ind[i] = np.argmin(conv_deriv)
# set this position and the sourrounding of the saved flank to 0 to avoid a second detection
if falling_ind[i] < 2*self.conv_std_dev:
del_ind_start = 0
else:
del_ind_start = falling_ind[i] - 2*self.conv_std_dev
if (conv_deriv.size - falling_ind[i]) < 2*self.conv_std_dev:
del_ind_stop = conv_deriv.size-1
else:
del_ind_stop = falling_ind[i] + 2*self.conv_std_dev
conv_deriv[del_ind_start:del_ind_stop] = 0
# sort all indices of rising and falling flanks
rising_ind.sort()
falling_ind.sort()
# find the maximum laser length to use as size for the laser array
laser_length = np.max(falling_ind-rising_ind)
# initialize the empty output array
laser_arr = np.zeros([num_of_lasers, laser_length],int)
# slice the detected laser pulses of the timetrace and save them in the output array
for i in range(num_of_lasers):
if (rising_ind[i]+laser_length > count_data.size):
lenarr = count_data[rising_ind[i]:].size
laser_arr[i, 0:lenarr] = count_data[rising_ind[i]:]
else:
laser_arr[i] = count_data[rising_ind[i]:rising_ind[i]+laser_length]
return laser_arr
def _convolve_derive(self, data, std_dev):
""" This method smoothes the input data by applying a gaussian filter (convolution) with
specified standard deviation. The derivative of the smoothed data is computed afterwards and returned.
If the input data is some kind of rectangular signal containing high frequency noise,
the output data will show sharp peaks corresponding to the rising and falling flanks of the input signal.
@param 1D numpy.ndarray timetrace: the raw data to be smoothed and derived
@param float std_dev: standard deviation of the gaussian filter to be applied for smoothing
@return 1D numpy.ndarray: The smoothed and derived data
"""
conv = ndimage.filters.gaussian_filter1d(data, std_dev)
conv_deriv = np.gradient(conv)
return conv_deriv
def analyze_data(self, raw_data):
""" This method captures the fast counter data and extracts the laser pulses.
@param int num_of_lasers: The total number of laser pulses inside the pulse sequence
@return 2D numpy.ndarray: The extracted laser pulses of the timetrace (dimensions 0: laser number, 1: time bin)
@return 1D/2D numpy.ndarray: The raw timetrace from the fast counter
"""
# call appropriate laser extraction method depending on if the fast counter is gated or not.
if self.is_counter_gated:
self.laser_data = self._gated_extraction(raw_data)
else:
self.laser_data = self._ungated_extraction(raw_data, self.number_of_lasers)
#analyze data
norm_mean = np.zeros(self.number_of_lasers, dtype=float)
signal_mean = np.zeros(self.number_of_lasers, dtype=float)
# set start and stop indices for the analysis
norm_start = self.norm_start_bin
norm_end = self.norm_start_bin + self.norm_width_bins
signal_start = self.signal_start_bin
signal_end = self.signal_start_bin + self.signal_width_bins
# loop over all laser pulses and analyze them
for i in range(self.number_of_lasers):
# calculate the mean of the data in the normalization window
norm_mean[i] = self.laser_data[i][norm_start:norm_end].mean()
# calculate the mean of the data in the signal window
signal_mean[i] = (self.laser_data[i][signal_start:signal_end] - norm_mean[i]).mean()
# update the signal plot y-data
self.signal_vector[i] = 1. + (signal_mean[i]/norm_mean[i])
return
if __name__ == "__main__":
tool = PulseAnalysis()
data = np.loadtxt('FastComTec_demo_timetrace.asc')
tool.analyze_data(data)
plot(tool.tau_vector, tool.signal_vector) | gpl-3.0 |
mjirik/imtools | tests/train_test.py | 1 | 6177 | #! /usr/bin/python
# -*- coding: utf-8 -*-
# import funkcí z jiného adresáře
import unittest
import numpy as np
import os
import os.path
import pytest
import os.path as op
import sys
import matplotlib.pyplot as plt
import glob
import itertools
import io3d
import sed3
import imtools.trainer3d
import imtools.datasets
import sklearn
import sklearn.metrics
import sklearn.neural_network
from sklearn.svm import SVC
from loguru import logger
sys.path.append(op.expanduser("~/projects/bodynavigation"))
# import bodynavigation
pt = op.expanduser("~/projects/imtools")
sys.path.append(pt)
import imtools
# @pytest.mark.interactive
def test_intensity_training_ircad():
#nth - use every nth pixel
nth = 10
# TODO use ircad
sliver_reference_dir = io3d.datasets.joinp("~/data/medical/orig/sliver07/training/", return_as_str=True)
# sliver_reference_dir = op.expanduser("~/data/medical/orig/sliver07/training/")
# Train
ol = imtools.trainer3d.Trainer3D()
# ol.feature_function = localization_fv
# for one in imtools.datasets.sliver_reader("*[0].mhd", read_seg=True):
for i in range(1, 2):
datap = io3d.read_dataset('3Dircadb1', "data3d", i)
datap_liver = io3d.read_dataset('3Dircadb1', "liver", i)
ol.add_train_data(datap["data3d"], (datap_liver["data3d"] > 0).astype(np.uint8), voxelsize_mm=datap["voxelsize_mm"])
# numeric_label, vs_mm, oname, orig_data, rname, ref_data = one
# ol.add_train_data(orig_data, ref_data, voxelsize_mm=vs_mm)
ol.fit()
# Testing
i = 1
datap = io3d.datasets.read_dataset("3Dircadb1", 'data3d', i)
datap_liver = io3d.datasets.read_dataset("3Dircadb1", 'liver', i)
data3d = datap["data3d"]
segmentation = (datap_liver["data3d"] > 0).astype(np.uint8)
fit = ol.predict(data3d, voxelsize_mm=datap["voxelsize_mm"])
# one = list(imtools.datasets.sliver_reader("*018.mhd", read_seg=True))[0]
# numeric_label, vs_mm, oname, orig_data, rname, ref_data = one
# fit = ol.predict(orig_data, voxelsize_mm=vs_mm)
err = segmentation != (fit > 0).astype(np.uint8)
# visualization
# plt.figure(figsize=(15, 10))
# sed3.show_slices(datap["data3d"], fit, slice_step=20, axis=1, flipV=True)
accuracy = np.sum(~err) / np.prod(data3d.shape)
assert accuracy >= 0.80
# assert
def _mk_data(slice3, offset=1, shape=[10, 11, 12]):
data3d = np.random.random(shape)
data3d[slice3] += offset
segmentation = np.zeros(shape, dtype=int)
segmentation[slice3] = 1
return data3d, segmentation
# TODO finish product
_gmm__mix_clf = imtools.ml.gmmcl.GMMCl()
_gmm__mix_clf.cl = {0:sklearn.mixture.GaussianMixture(n_components=1), 1:sklearn.mixture.GaussianMixture(n_components=3)}
@pytest.mark.parametrize('cl,shape', itertools.product(
[
# sklearn.tree.DecisionTreeClassifier(),
# _gmm__mix_clf,
imtools.ml.gmmcl.GMMCl(),
# sklearn.neural_network.MLPClassifier(),
SVC(kernel='linear', class_weight='balanced', probability=True),
# SVC()
],
[
# [10, 11, 12],
[30, 31, 32],
]
))
def test_intensity_training_artificial(cl, shape):
"""
Test different classifiers on unbalanced dataset.
:param cl:
:param shape:
:return:
"""
# scl = str(cl)
# logger.debug(f'cl={scl[:min(30, len(scl))]}')
logger.debug(f'cl={cl}')
logger.debug(f'shape={shape}')
slice3 = (slice(3, 7), slice(3, 7), slice(3, 7))
# shape = [30,31,32]
voxelsize_mm = [1, 2, 3]
d3d, seg = _mk_data(slice3, shape=shape, offset=0.7)
un, counts = np.unique(seg.flatten(), return_counts=True)
logger.debug(f'counts={counts}')
ol = imtools.trainer3d.Trainer3D(classifier=cl)
ol.working_voxelsize_mm=[2,2,2]
# ol.cl = tree.DecisionTreeClassifier()
# ol.cl = cl
ol.add_train_data(d3d, seg, voxelsize_mm=voxelsize_mm, nth=None) # We take all samples
# https://elitedatascience.com/imbalanced-classes
un, counts = np.unique(ol.target, return_counts=True)
n_samples = np.min(counts)
new_data_list = []
new_target_list = []
for label in un:
all_data_for_one_label = ol.data[ol.target.astype(np.uint8).flatten() == label]
# TODO mozna pouzit funkci sklearn.utils.resample
# https://scikit-learn.org/stable/modules/generated/sklearn.utils.resample.html
resamples = sklearn.utils.resample(all_data_for_one_label, n_samples=n_samples, replace=True)
# data_subset = all_data_for_one_label[:n_samples] # pick first n samples
# new_data_list.append(data_subset)
new_data_list.append(resamples)
new_target_list.append(np.ones([n_samples], dtype=type(label)) * label)
original_data = ol.data
original_target = ol.target
new_data = np.concatenate(new_data_list, axis=0)
new_target = np.concatenate(new_target_list, axis=0)
ol.data = new_data
ol.target = new_target
ol.fit()
# test
# slice3 = (slice(2, 6), slice(2, 8), slice(2, 7))
# shape = [12, 11, 10]
# voxelsize_mm = [1, 2, 3]
d3d, seg = _mk_data(slice3, shape=shape, offset=0.7)
pred_seg = ol.predict(d3d, voxelsize_mm)
sed3.show_slices(d3d, contour=seg, slice_number=8)
# ed = sed3.sed3(d3d, contour=seg)
# ed.show()
sc = sklearn.metrics.accuracy_score(seg.flatten(), pred_seg.flatten())
f1 = sklearn.metrics.f1_score(seg.flatten(), pred_seg.flatten())
logger.debug(f"f1={f1}")
assert sc > 0.5
assert f1 > 0.5
def test_resample():
y = np.array([0, 0, 0, 1, 1, 1, 1, 1, 1])
X = np.random.rand(y.shape[0]) + y
X = X.reshape([-1,1])
# X = np.array([[1., 0.], [2., 1.], [0., 0.]])
# y = np.array([0, 1, 2])
# from scipy.sparse import coo_matrix
# X_sparse = coo_matrix(X)
from sklearn.utils import shuffle, resample
# X, X_sparse, y = shuffle(X, X_sparse, y, random_state=0)
# X
#
# X_sparse
#
# X_sparse.toarray()
y
# shuffle(y, n_samples=2, random_state=0)
Xr = resample(X, n_samples=2, random_state=0)
print(Xr)
def balance_dataset(X,y):
labels, counts = np.unique(y)
| mit |
zhuangjun1981/retinotopic_mapping | retinotopic_mapping/StimulusRoutines.py | 1 | 176902 | """
Contains various stimulus routines
"""
import os
import numpy as np
import matplotlib.pyplot as plt
import random
import time
import h5py
from tools import ImageAnalysis as ia
from tools import FileTools as ft
try:
import skimage.external.tifffile as tf
except ImportError:
import tifffile as tf
def in_hull(p, hull):
"""
Determine if points in `p` are in `hull`
`p` should be a `NxK` coordinate matrix of `N` points in `K` dimensions
`hull` is either a scipy.spatial.Delaunay object or the `MxK` array of the
coordinates of `M` points in `K`dimensions for which Delaunay triangulation
will be computed
Parameters
----------
p : array
NxK coordinate matrix of N points in K dimensions
hull :
either a scipy.spatial.Delaunay object or the `MxK` array of the
coordinates of `M` points in `K`dimensions for which Delaunay
triangulation will be computed
Returns
-------
is_in_hull : ndarray of int
Indices of simplices containing each point. Points outside the
triangulation get the value -1.
"""
from scipy.spatial import Delaunay
if not isinstance(hull, Delaunay):
hull = Delaunay(hull)
return hull.find_simplex(p) >= 0
def get_warped_probes(deg_coord_alt, deg_coord_azi, probes, width,
height, ori=0., background_color=0.):
"""
Generate a frame (matrix) with multiple probes defined by 'porbes', `width`,
`height` and orientation in degrees. visual degree coordinate of each pixel is
defined by deg_coord_azi, and deg_coord_alt
Parameters
----------
deg_coord_alt : ndarray
2d array of warped altitude coordinates of monitor pixels
deg_coord_azi : ndarray
2d array of warped azimuth coordinates of monitor pixels
probes : tuple or list
each element of probes represents a single probe (center_alt, center_azi, sign)
width : float
width of the square in degrees
height : float
height of the square in degrees
ori : float
angle in degree, should be [0., 180.]
foreground_color : float, optional
color of the noise pixels, takes values in [-1,1] and defaults to `1.`
background_color : float, optional
color of the background behind the noise pixels, takes values in
[-1,1] and defaults to `0.`
Returns
-------
frame : ndarray
the warped s
"""
frame = np.ones(deg_coord_azi.shape, dtype=np.float32) * background_color
# if ori < 0. or ori > 180.:
# raise ValueError, 'ori should be between 0 and 180.'
ori_arc = (ori % 360.) * 2 * np.pi / 360.
for probe in probes:
dis_width = np.abs(np.cos(ori_arc) * (deg_coord_azi - probe[1]) +
np.sin(ori_arc) * (deg_coord_alt - probe[0]))
dis_height = np.abs(np.cos(ori_arc + np.pi / 2) * (deg_coord_azi - probe[1]) +
np.sin(ori_arc + np.pi / 2) * (deg_coord_alt - probe[0]))
# f, (ax1, ax2) = plt.subplots(1, 2, figsize=(12, 5))
# fig1 = ax1.imshow(dis_width)
# ax1.set_title('width')
# f.colorbar(fig1, ax=ax1)
# fig2 = ax2.imshow(dis_height)
# ax2.set_title('height')
# f.colorbar(fig2, ax=ax2)
# plt.show()
frame[np.logical_and(dis_width <= width / 2.,
dis_height <= height / 2.)] = probe[2]
return frame
def blur_cos(dis, sigma):
"""
return a smoothed value [0., 1.] given the distance to center (with sign)
and smooth width. this is using cosine curve to smooth edge
parameters
----------
dis : ndarray
array that store the distance from the current pixel to blurred band center
sigma : float
definition of the width of blurred width, here is the length represent
half cycle of the cosin function
returns
-------
blurred : float
blurred value
"""
dis_f = dis.astype(np.float32)
sigma_f = abs(float(sigma))
blur_band = (np.cos((dis_f - (sigma_f / -2.)) * np.pi / sigma_f) + 1.) / 2.
# plt.imshow(blur_band)
# plt.show()
blur_band[dis_f < (sigma_f / -2.)] = 1.
blur_band[dis_f > (sigma_f / 2.)] = 0.
# print blur_band.dtype
return blur_band
def get_circle_mask(map_alt, map_azi, center, radius, is_smooth_edge=False,
blur_ratio=0.2, blur_func=blur_cos, is_plot=False):
"""
Generate a binary mask of a circle with given `center` and `radius`
The binary mask is generated on a map with coordinates for each pixel
defined by `map_x` and `map_y`
Parameters
----------
map_alt : ndarray
altitude coordinates for each pixel on a map
map_azi : ndarray
azimuth coordinates for each pixel on a map
center : tuple
coordinates (altitude, azimuth) of the center of the binary circle mask
radius : float
radius of the binary circle mask
is_smooth_edge : bool
if True, use 'blur_ratio' and 'blur_func' to smooth circle edge
blur_ratio : float, option, default 0.2
the ratio between blurred band width to radius, should be smaller than 1
the middle of blurred band is the circle edge
blur_func : function object to blur edge
is_plot : bool
Returns
-------
circle_mask : ndarray (dtype np.float32) with same shape as map_alt and map_azi
if is_smooth_edge is True
weighted circle mask, with smoothed edge
if is_smooth_edge is False
binary circle mask, takes values in [0.,1.]
"""
if map_alt.shape != map_azi.shape:
raise ValueError('map_alt and map_azi should have same shape.')
if len(map_alt.shape) != 2:
raise ValueError('map_alt and map_azi should be 2-d.')
dis_mat = np.sqrt((map_alt - center[0]) ** 2 + (map_azi - center[1]) ** 2)
# plt.imshow(dis_mat)
# plt.show()
if is_smooth_edge:
sigma = radius * blur_ratio
circle_mask = blur_func(dis=dis_mat - radius, sigma=sigma)
else:
circle_mask = np.zeros(map_alt.shape, dtype=np.float32)
circle_mask[dis_mat <= radius] = 1.
if is_plot:
plt.imshow(circle_mask)
plt.show()
return circle_mask
def get_grating(alt_map, azi_map, dire=0., spatial_freq=0.1,
center=(0., 60.), phase=0., contrast=1.):
"""
Generate a grating frame with defined spatial frequency, center location,
phase and contrast
Parameters
----------
azi_map : ndarray
x coordinates for each pixel on a map
alt_map : ndarray
y coordinates for each pixel on a map
dire : float, optional
orientation angle of the grating in degrees, defaults to 0.
spatial_freq : float, optional
spatial frequency (cycle per unit), defaults to 0.1
center : tuple, optional
center coordinates of circle {alt, azi}
phase : float, optional
defaults to 0.
contrast : float, optional
defines contrast. takes values in [0., 1.], defaults to 1.
Returns
-------
frame :
a frame as floating point 2-d array with grating, value range [0., 1.]
"""
if azi_map.shape != alt_map.shape:
raise ValueError('map_alt and map_azi should have same shape.')
if len(azi_map.shape) != 2:
raise ValueError('map_alt and map_azi should be 2-d.')
axis_arc = ((dire + 90.) * np.pi / 180.) % (2 * np.pi)
map_azi_h = np.array(azi_map, dtype=np.float32)
map_alt_h = np.array(alt_map, dtype=np.float32)
distance = (np.sin(axis_arc) * (map_azi_h - center[1]) -
np.cos(axis_arc) * (map_alt_h - center[0]))
grating = np.sin(distance * 2 * np.pi * spatial_freq - phase)
grating = grating * contrast # adjust contrast
grating = (grating + 1.) / 2. # change the scale of grating to be [0., 1.]
return grating
# def get_sparse_loc_num_per_frame(min_alt, max_alt, min_azi, max_azi, minimum_dis):
# """
# given the subregion of visual space and the minmum distance between the probes
# within a frame (definition of sparseness), return generously how many probes
# will be presented of a given frame
#
# Parameters
# ----------
# min_alt : float
# minimum altitude of display region, in visual degrees
# max_alt : float
# maximum altitude of display region, in visual degrees
# min_azi : float
# minimum azimuth of display region, in visual degrees
# max_azi : float
# maximum azimuth of display region, in visual degrees
# minimum_dis : float
# minimum distance allowed among probes within a frame
#
# returns
# -------
# probe_num_per_frame : uint
# generously how many probes will be presented in a given frame
# """
# if min_alt >= max_alt:
# raise ValueError('min_alt should be less than max_alt.')
#
# if min_azi >= max_azi:
# raise ValueError('min_azi should be less than max_azi.')
#
# min_alt = float(min_alt)
# max_alt = float(max_alt)
# min_azi = float(min_azi)
# max_azi = float(max_azi)
#
# area_tot = (max_alt - min_alt) * (max_azi - min_azi)
# area_circle = np.pi * (minimum_dis ** 2)
# probe_num_per_frame = int(np.ceil((2.0 * (area_tot / area_circle))))
# return probe_num_per_frame
def get_grid_locations(subregion, grid_space, monitor_azi, monitor_alt, is_include_edge=True,
is_plot=False):
"""
generate all the grid points in display area (covered by both subregion and
monitor span), designed for SparseNoise and LocallySparseNoise stimuli.
Parameters
----------
subregion : list, tuple or np.array
the region on the monitor that will display the sparse noise,
[min_alt, max_alt, min_azi, max_azi], all floats
grid_space : tuple or list of two floats
grid size of probes to be displayed, [altitude, azimuth]
monitor_azi : 2-d array
array mapping monitor pixels to azimuth in visual space
monitor_alt : 2-d array
array mapping monitor pixels to altitude in visual space
is_include_edge : bool, default True,
if True, the displayed probes will cover the edge case and ensure that
the entire subregion is covered.
If False, the displayed probes will exclude edge case and ensure that all
the centers of displayed probes are within the subregion.
is_plot : bool
Returns
-------
grid_locations : n x 2 array,
refined [alt, azi] pairs of probe centers going to be displayed
"""
rows = np.arange(subregion[0],
subregion[1] + grid_space[0],
grid_space[0])
columns = np.arange(subregion[2],
subregion[3] + grid_space[1],
grid_space[1])
azis, alts = np.meshgrid(columns, rows)
grid_locations = np.transpose(np.array([alts.flatten(), azis.flatten()]))
left_alt = monitor_alt[:, 0]
right_alt = monitor_alt[:, -1]
top_azi = monitor_azi[0, :]
bottom_azi = monitor_azi[-1, :]
left_azi = monitor_azi[:, 0]
right_azi = monitor_azi[:, -1]
top_alt = monitor_alt[0, :]
bottom_alt = monitor_alt[-1, :]
left_azi_e = left_azi - grid_space[1]
right_azi_e = right_azi + grid_space[1]
top_alt_e = top_alt + grid_space[0]
bottom_alt_e = bottom_alt - grid_space[0]
all_alt = np.concatenate((left_alt, right_alt, top_alt, bottom_alt))
all_azi = np.concatenate((left_azi, right_azi, top_azi, bottom_azi))
all_alt_e = np.concatenate((left_alt, right_alt, top_alt_e, bottom_alt_e))
all_azi_e = np.concatenate((left_azi_e, right_azi_e, top_azi, bottom_azi))
monitorPoints = np.array([all_alt, all_azi]).transpose()
monitorPoints_e = np.array([all_alt_e, all_azi_e]).transpose()
# get the grid points within the coverage of monitor
if is_include_edge:
grid_locations = grid_locations[in_hull(grid_locations, monitorPoints_e)]
else:
grid_locations = grid_locations[in_hull(grid_locations, monitorPoints)]
# grid_locations = np.array([grid_locations[:, 1], grid_locations[:, 0]]).transpose()
if is_plot:
f = plt.figure()
ax = f.add_subplot(111)
ax.plot(monitorPoints[:, 1], monitorPoints[:, 0], '.r', label='monitor')
ax.plot(monitorPoints_e[:, 1], monitorPoints_e[:, 0], '.g', label='monitor_e')
ax.plot(grid_locations[:, 1], grid_locations[:, 0], '.b', label='grid')
ax.legend()
plt.show()
return grid_locations
class Stim(object):
"""
generic class for visual stimulation. parent class for individual
stimulus routines.
Parameters
----------
monitor : monitor object
the monitor used to display stimulus in the experiment
indicator : indicator object
the indicator used during stimulus
background : float, optional
background color of the monitor screen when stimulus is not being
presented, takes values in [-1,1] and defaults to `0.` (grey)
coordinate : str {'degree', 'linear'}, optional
determines the representation of pixel coordinates on monitor,
defaults to 'degree'
pregap_dur : float, optional
duration of gap period before stimulus, measured in seconds, defaults
to `2.`
postgap_dur : float, optional
duration of gap period after stimulus, measured in seconds, defaults
to `3.`
"""
def __init__(self, monitor, indicator, background=0., coordinate='degree',
pregap_dur=2., postgap_dur=3.):
"""
Initialize visual stimulus object
"""
self.monitor = monitor
self.indicator = indicator
if background < -1. or background > 1.:
raise ValueError('parameter "background" should be a float within [-1., 1.].')
else:
self.background = float(background)
if coordinate not in ['degree', 'linear']:
raise ValueError('parameter "coordinate" should be either "degree" or "linear".')
else:
self.coordinate = coordinate
if pregap_dur >= 0.:
self.pregap_dur = float(pregap_dur)
else:
raise ValueError('pregap_dur should be no less than 0.')
if postgap_dur >= 0.:
self.postgap_dur = float(postgap_dur)
else:
raise ValueError('postgap_dur should be no less than 0.')
self.clear()
@property
def pregap_frame_num(self):
return int(self.pregap_dur * self.monitor.refresh_rate)
@property
def postgap_frame_num(self):
return int(self.postgap_dur * self.monitor.refresh_rate)
def generate_frames(self):
"""
place holder of function "generate_frames" for each specific stimulus
"""
print('Nothing executed! This is a place holder function. \n'
'See documentation in the respective stimulus.')
def generate_movie(self):
"""
place holder of function 'generate_movie' for each specific stimulus
"""
print('Nothing executed! This is a place holder function. '
'See documentation in the respective stimulus. \n'
'It is possible that full sequence generation is not'
'implemented in this particular stimulus. Try '
'generate_movie_by_index() function to see if indexed '
'sequence generation is implemented.')
def _generate_frames_for_index_display(self):
"""
place holder of function _generate_frames_for_index_display()
for each specific stimulus
"""
print('Nothing executed! This is a place holder function. \n'
'See documentation in the respective stimulus.')
def _generate_display_index(self):
"""
place holder of function _generate_display_index()
for each specific stimulus
"""
print('Nothing executed! This is a place holder function. \n'
'See documentation in the respective stimulus.')
def generate_movie_by_index(self):
"""
place holder of function generate_movie_by_index()
for each specific stimulus
"""
print('Nothing executed! This is a place holder function. \n'
'See documentation in the respective stimulus.')
def clear(self):
if hasattr(self, 'frames'):
del self.frames
if hasattr(self, 'frames_unique'):
del self.frames_unique
if hasattr(self, 'index_to_display'):
del self.index_to_display
# for StaticImages
if hasattr(self, 'images_wrapped'):
del self.images_wrapped
if hasattr(self, 'images_dewrapped'):
del self.images_dewrapped
if hasattr(self, 'altitude_wrapped'):
del self.altitude_wrapped
if hasattr(self, 'azimuth_wrapped'):
del self.azimuth_wrapped
if hasattr(self, 'altitude_dewrapped'):
del self.altitude_dewrapped
if hasattr(self, 'azimuth_dewrapped'):
del self.azimuth_dewrapped
def set_monitor(self, monitor):
self.monitor = monitor
self.clear()
def set_indicator(self, indicator):
self.indicator = indicator
self.clear()
def set_pregap_dur(self, pregap_dur):
if pregap_dur >= 0.:
self.pregap_dur = float(pregap_dur)
else:
raise ValueError('pregap_dur should be no less than 0.')
self.clear()
def set_postgap_dur(self, postgap_dur):
if postgap_dur >= 0.:
self.postgap_dur = float(postgap_dur)
else:
raise ValueError('postgap_dur should be no less than 0.')
def set_background(self, background):
if background < -1. or background > 1.:
raise ValueError('parameter "background" should be a float within [-1., 1.].')
else:
self.background = float(background)
self.clear()
def set_coordinate(self, coordinate):
if coordinate not in ['degree', 'linear']:
raise ValueError('parameter "coordinate" should be either "degree" or "linear".')
self.coordinate = coordinate
class UniformContrast(Stim):
"""
Generate full field uniform luminance for recording spontaneous activity.
Inherits from Stim.
The full field uniform luminance stimulus presents a fixed background color
which is normally taken to be grey.
Parameters
----------
monitor : monitor object
contains display monitor information
indicator : indicator object
contains indicator information
coordinate : str from {'degree','linear'}, optional
specifies coordinates, defaults to 'degree'
background : float, optional
color of background. Takes values in [-1,1] where -1 is black and 1
is white
pregap_dur : float, optional
amount of time (in seconds) before the stimulus is presented, defaults
to `2.`
postgap_dur : float, optional
amount of time (in seconds) after the stimulus is presented, defaults
to `3.`
duration: float
number of seconds of the duration of the uniform luminance.
color : float, optional
the choice of color to display in the stimulus, defaults to `0.` which
is grey
"""
def __init__(self, monitor, indicator, duration, color=0., pregap_dur=2.,
postgap_dur=3., background=0., coordinate='degree'):
"""
Initialize UniformContrast object
"""
super(UniformContrast, self).__init__(monitor=monitor,
indicator=indicator,
coordinate=coordinate,
background=background,
pregap_dur=pregap_dur,
postgap_dur=postgap_dur)
self.stim_name = 'UniformContrast'
self.duration = duration
self.color = float(color)
self.frame_config = ('is_display', 'indicator color [-1., 1.]')
def generate_frames(self):
"""
generate a tuple of parameters with information for each frame.
Information contained in each frame:
first element -
during display frames, value takes on 1 and value
is 0 otherwise
second element - color of indicator
during display value is equal to 1 and during gaps value is
equal to -1
"""
displayframe_num = int(self.duration * self.monitor.refresh_rate)
frames = [(0, -1.)] * self.pregap_frame_num + \
[(1, 1.)] * displayframe_num + \
[(0, -1.)] * self.postgap_frame_num
return tuple(frames)
def _generate_frames_for_index_display(self):
""" parameters are predefined here, nothing to compute. """
if self.indicator.is_sync:
# Parameters that define the stimulus
frames = ((0, -1.), (1, 1.))
return frames
else:
raise NotImplementedError("method not avaialable for non-sync indicator.")
def _generate_display_index(self):
""" compute a list of indices corresponding to each frame to display. """
displayframe_num = int(self.duration * self.monitor.refresh_rate)
index_to_display = [0] * self.pregap_frame_num + [1] * displayframe_num + \
[0] * self.postgap_frame_num
return index_to_display
def generate_movie_by_index(self):
""" compute the stimulus movie to be displayed by index. """
self.frames_unique = self._generate_frames_for_index_display()
self.index_to_display = self._generate_display_index()
num_frames = len(self.frames_unique)
num_pixels_width = self.monitor.deg_coord_x.shape[0]
num_pixels_height = self.monitor.deg_coord_x.shape[1]
# Initialize numpy array of 0's as placeholder for stimulus routine
full_sequence = np.ones((num_frames,
num_pixels_width,
num_pixels_height),
dtype=np.float32) * self.background
# Compute pixel coordinates for indicator
indicator_width_min = (self.indicator.center_width_pixel
- self.indicator.width_pixel / 2)
indicator_width_max = (self.indicator.center_width_pixel
+ self.indicator.width_pixel / 2)
indicator_height_min = (self.indicator.center_height_pixel
- self.indicator.height_pixel / 2)
indicator_height_max = (self.indicator.center_height_pixel
+ self.indicator.height_pixel / 2)
display = self.color * np.ones((num_pixels_width,num_pixels_height),
dtype=np.float32)
for i, frame in enumerate(self.frames_unique):
if frame[0] == 1:
full_sequence[i] = display
# Insert indicator pixels
full_sequence[i, indicator_height_min:indicator_height_max,
indicator_width_min:indicator_width_max] = frame[1]
monitor_dict = dict(self.monitor.__dict__)
indicator_dict = dict(self.indicator.__dict__)
NF_dict = dict(self.__dict__)
NF_dict.pop('monitor')
NF_dict.pop('indicator')
full_dict = {'stimulation': NF_dict,
'monitor': monitor_dict,
'indicator': indicator_dict}
return full_sequence, full_dict
def generate_movie(self):
"""
generate movie for uniform contrast display frame by frame.
Returns
-------
full_seq : nd array, uint8
3-d array of the stimulus to be displayed.
full_dict : dict
dictionary containing the information of the stimulus.
"""
self.frames = self.generate_frames()
full_seq = np.zeros((len(self.frames),
self.monitor.deg_coord_x.shape[0],
self.monitor.deg_coord_x.shape[1]),
dtype=np.float32)
indicator_width_min = (self.indicator.center_width_pixel
- self.indicator.width_pixel / 2)
indicator_width_max = (self.indicator.center_width_pixel
+ self.indicator.width_pixel / 2)
indicator_height_min = (self.indicator.center_height_pixel
- self.indicator.height_pixel / 2)
indicator_height_max = (self.indicator.center_height_pixel
+ self.indicator.height_pixel / 2)
background = np.ones((np.size(self.monitor.deg_coord_x, 0),
np.size(self.monitor.deg_coord_x, 1)),
dtype=np.float32) * self.background
display = np.ones((np.size(self.monitor.deg_coord_x, 0),
np.size(self.monitor.deg_coord_x, 1)),
dtype=np.float32) * self.color
if not (self.coordinate == 'degree' or self.coordinate == 'linear'):
raise LookupError, "`coordinate` value not in {'degree','linear'}"
for i in range(len(self.frames)):
curr_frame = self.frames[i]
if curr_frame[0] == 0:
curr_FC_seq = background
else:
curr_FC_seq = display
curr_FC_seq[indicator_height_min:indicator_height_max,
indicator_width_min:indicator_width_max] = curr_frame[1]
full_seq[i] = curr_FC_seq
if i in range(0, len(self.frames), len(self.frames) / 10):
print('Generating numpy sequence: ' +
str(int(100 * (i + 1) / len(self.frames))) + '%')
mondict = dict(self.monitor.__dict__)
indicator_dict = dict(self.indicator.__dict__)
indicator_dict.pop('monitor')
NFdict = dict(self.__dict__)
NFdict.pop('monitor')
NFdict.pop('indicator')
full_dict = {'stimulation': NFdict,
'monitor': mondict,
'indicator': indicator_dict}
return full_seq, full_dict
class SinusoidalLuminance(Stim):
"""
Generate fullfield sinusoidal luminance fluctuation at given frequency for
given number of cycles. Center luminance is at 0.
Parameters
----------
monitor : monitor object
contains display monitor information
indicator : indicator object
contains indicator information
coordinate : str from {'degree','linear'}, optional
specifies coordinates, defaults to 'degree'
background : float, optional
color of background. Takes values in [-1,1] where -1 is black and 1
is white
pregap_dur : float, optional
amount of time (in seconds) before the stimulus is presented, defaults
to `2.`
postgap_dur : float, optional
amount of time (in seconds) after the stimulus is presented, defaults
to `3.`
midgap_dur : float, optional
amount of time (in seconds) in between each cycle, defaults to `0.`
max_level : float, [-1., 1.]
maximum level of peak luminance, defaults to `1.`
min_level : float, [-1., 1.]
minimum level of peak luminance, defaults to `-1.` Should be smaller than max_level
frequency : float, Hz
frequency of the luminance fluctuation, should be less than 1/4 of
monitor refresh rate.
cycle_num : int
number of cycles to be displayed. Should be positive.
start_phase : float, [0, 2*pi]
starting phase of the cycle.
"""
def __init__(self, monitor, indicator, max_level=1., min_level=-1., frequency=1.,
cycle_num=10, start_phase=0., pregap_dur=2., postgap_dur=3.,
midgap_dur=0., background=0., coordinate='degree'):
"""
Initialize SinusoidalLuminance object
"""
super(SinusoidalLuminance, self).__init__(monitor=monitor,
indicator=indicator,
coordinate=coordinate,
background=background,
pregap_dur=pregap_dur,
postgap_dur=postgap_dur)
self.stim_name = 'SinusoidalLuminance'
if midgap_dur >= 0.:
self.midgap_dur = float(midgap_dur)
else:
raise ValueError('midgap_dur should be no less than 0.')
if max_level > 1.:
self.max_level = 1.
elif max_level < -1.:
self.max_level = -1
else:
self.max_level = float(max_level)
if min_level > 1.:
self.min_level = 1.
elif min_level < -1:
self.min_level = -1.
else:
self.min_level = float(min_level)
if self.min_level >= self.max_level:
raise ValueError('self.min_level ({}) should be smaller than '
'self.max_level({}).'.format(self.min_level, self.max_level))
if frequency > (monitor.refresh_rate / 4.):
raise ValueError('frequency too high to be sufficiently sampled. Should '
'be less than 1/4 of monitor refresh rate: {}.'.format(self.monitor.refresh_rate))
self.frequency = float(frequency)
if int(cycle_num) <= 0:
raise ValueError('cycle_num should be a positive integer.')
self.cycle_num = int(cycle_num)
self.start_phase = start_phase % (2 * np.pi)
self.frame_config = ('is_display',
'color [-1., 1.]',
'indicator color [-1., 1.]')
def _generate_frames_for_index_display(self):
if self.indicator.is_sync:
gap_frame = (0, None, -1.)
frames_per_cycle = int(np.round(self.monitor.refresh_rate / self.frequency))
phases = (2. * np.pi * np.arange(frames_per_cycle) / frames_per_cycle) + self.start_phase
mean_level = (self.max_level + self.min_level) / 2
amp_level = (self.max_level - self.min_level) / 2
colors = np.sin(phases) * amp_level + mean_level
indicator_on_frames = frames_per_cycle // 2
indicator_off_frames = frames_per_cycle - indicator_on_frames
indicator_color = [1.] * indicator_on_frames + [0.] * indicator_off_frames
display_frames = [(1, colors[frame_i], indicator_color[frame_i]) for frame_i
in range(frames_per_cycle)]
frames = [gap_frame] + display_frames
return frames
else:
raise NotImplementedError("method not avaialable for non-sync indicator.")
def _generate_display_index(self):
pregap_ind = [0] * int(self.pregap_dur * self.monitor.refresh_rate)
postgap_ind = [0] * int(self.postgap_dur * self.monitor.refresh_rate)
midgap_ind = [0] * int(self.midgap_dur * self.monitor.refresh_rate)
frames_per_cycle = int(np.round(self.monitor.refresh_rate / self.frequency))
cycle_ind = midgap_ind + range(1, frames_per_cycle + 1)
display_ind = cycle_ind * self.cycle_num
display_ind = display_ind[len(midgap_ind):]
index_to_display = pregap_ind + display_ind + postgap_ind
return index_to_display
def generate_movie_by_index(self):
""" compute the stimulus movie to be displayed by index. """
self.frames_unique = self._generate_frames_for_index_display()
self.index_to_display = self._generate_display_index()
num_frames = len(self.frames_unique)
num_pixels_width = self.monitor.deg_coord_x.shape[0]
num_pixels_height = self.monitor.deg_coord_x.shape[1]
# Initialize numpy array of 0's as placeholder for stimulus routine
full_sequence = np.ones((num_frames,
num_pixels_width,
num_pixels_height),
dtype=np.float32) * self.background
# Compute pixel coordinates for indicator
indicator_width_min = (self.indicator.center_width_pixel
- self.indicator.width_pixel / 2)
indicator_width_max = (self.indicator.center_width_pixel
+ self.indicator.width_pixel / 2)
indicator_height_min = (self.indicator.center_height_pixel
- self.indicator.height_pixel / 2)
indicator_height_max = (self.indicator.center_height_pixel
+ self.indicator.height_pixel / 2)
for i, frame in enumerate(self.frames_unique):
if frame[1] is not None:
full_sequence[i] = frame[1]
# Insert indicator pixels
full_sequence[i, indicator_height_min:indicator_height_max,
indicator_width_min:indicator_width_max] = frame[2]
monitor_dict = dict(self.monitor.__dict__)
indicator_dict = dict(self.indicator.__dict__)
NF_dict = dict(self.__dict__)
NF_dict.pop('monitor')
NF_dict.pop('indicator')
full_dict = {'stimulation': NF_dict,
'monitor': monitor_dict,
'indicator': indicator_dict}
return full_sequence, full_dict
class FlashingCircle(Stim):
"""
Generate flashing circle stimulus.
Stimulus routine presents a circle centered at the position `center`
with given `radius`.
Parameters
----------
monitor : monitor object
contains display monitor information
indicator : indicator object
contains indicator information
coordinate : str from {'degree','linear'}, optional
specifies coordinates, defaults to 'degree'
background : float, optional
color of background. Takes values in [-1,1] where -1 is black and 1
is white
pregap_dur : float, optional
amount of time (in seconds) before the stimulus is presented, defaults
to `2.`
postgap_dur : float, optional
amount of time (in seconds) after the stimulus is presented, defaults
to `3.`
center : 2-tuple, optional
center coordinate (altitude, azimuth) of the circle in degrees, defaults to (0.,60.).
radius : float, optional
radius of the circle, defaults to `10.`
is_smooth_edge : bool
True, smooth circle edge with smooth_width_ratio and smooth_func
False, do not smooth edge
smooth_width_ratio : float, should be smaller than 1.
the ratio between smooth band width and radius, circle edge is the middle
of smooth band
smooth_func : function object
this function take to inputs
first, ndarray storing the distance from each pixel to smooth band center
second, smooth band width
returns smoothed mask with same shape as input ndarray
color : float, optional
color of the circle, takes values in [-1,1], defaults to `-1.`
iteration : int, optional
total number of flashes, defaults to `1`.
flash_frame : int, optional
number of frames that circle is displayed during each presentation
of the stimulus, defaults to `3`.
"""
def __init__(self, monitor, indicator, coordinate='degree', center=(0., 60.),
radius=10., is_smooth_edge=False, smooth_width_ratio=0.2,
smooth_func=blur_cos, color=-1., flash_frame_num=3,
pregap_dur=2., postgap_dur=3., background=0., midgap_dur=1.,
iteration=1):
"""
Initialize `FlashingCircle` stimulus object.
"""
super(FlashingCircle, self).__init__(monitor=monitor,
indicator=indicator,
background=background,
coordinate=coordinate,
pregap_dur=pregap_dur,
postgap_dur=postgap_dur)
self.stim_name = 'FlashingCircle'
self.center = center
self.radius = float(radius)
self.color = float(color)
self.flash_frame_num = int(flash_frame_num)
self.frame_config = ('is_display', 'indicator color [-1., 1.]')
self.is_smooth_edge = is_smooth_edge
self.smooth_width_ratio = float(smooth_width_ratio)
self.smooth_func = smooth_func
self.midgap_dur = float(midgap_dur)
self.iteration = int(iteration)
if self.pregap_frame_num + self.postgap_frame_num == 0:
raise ValueError('pregap_frame_num + postgap_frame_num should be larger than 0.')
self.clear()
def set_flash_frame_num(self, flash_frame_num):
self.flash_frame_num = flash_frame_num
self.clear()
def set_color(self, color):
self.color = color
self.clear()
def set_center(self, center):
self.center = center
self.clear()
def set_radius(self, radius):
self.radius = radius
self.clear()
@property
def midgap_frame_num(self):
return int(self.midgap_dur * self.monitor.refresh_rate)
def generate_frames(self):
"""
function to generate all the frames needed for the stimulation.
Information contained in each frame:
first element :
during a gap, the value is equal to 0 and during display the
value is equal to 1
second element :
corresponds to the color of indicator
if indicator.is_sync is True, during stimulus the value is
equal to 1., whereas during a gap the value isequal to -1.;
if indicator.is_sync is False, indicator color will alternate
between 1. and -1. at the frequency as indicator.freq
Returns
-------
frames : list
list of information defining each frame.
"""
frames = [[0, -1.]] * self.pregap_frame_num
for iter in range(self.iteration):
if self.indicator.is_sync:
frames += [[0, -1.]] * self.midgap_frame_num
frames += [[1, 1.]] * self.flash_frame_num
else:
frames += [[0, -1.]] * self.midgap_frame_num
frames += [[1, -1.]] * self.flash_frame_num
frames += [[0, -1.]] * self.postgap_frame_num
frames = frames[self.midgap_frame_num:]
if not self.indicator.is_sync:
for frame_ind in xrange(frames.shape[0]):
# mark unsynchronized indicator
if np.floor(frame_ind // self.indicator.frame_num) % 2 == 0:
frames[frame_ind, 1] = 1.
else:
frames[frame_ind, 1] = -1.
frames = [tuple(x) for x in frames]
return tuple(frames)
def _generate_frames_for_index_display(self):
"""
frame structure: first element: is_gap (0:False; 1:True).
second element: indicator color [-1., 1.]
"""
if self.indicator.is_sync:
gap = (0., -1.)
flash = (1., 1.)
frames = (gap, flash)
return frames
else:
raise NotImplementedError, "method not available for non-sync indicator"
def _generate_display_index(self):
""" compute a list of indices corresponding to each frame to display. """
if self.indicator.is_sync:
index_to_display = [0] * self.pregap_frame_num
for iter in range(self.iteration):
index_to_display += [0] * self.midgap_frame_num
index_to_display += [1] * self.flash_frame_num
index_to_display += [0] * self.postgap_frame_num
index_to_display = index_to_display[self.midgap_frame_num:]
return index_to_display
else:
raise NotImplementedError, "method not available for non-sync indicator"
def generate_movie_by_index(self):
""" compute the stimulus movie to be displayed by index. """
# compute unique frame parameters
self.frames_unique = self._generate_frames_for_index_display()
self.index_to_display = self._generate_display_index()
num_frames = len(self.frames_unique)
num_pixels_width = self.monitor.deg_coord_x.shape[0]
num_pixels_height = self.monitor.deg_coord_x.shape[1]
full_sequence = self.background * np.ones((num_frames,
num_pixels_width,
num_pixels_height),
dtype=np.float32)
indicator_width_min = (self.indicator.center_width_pixel
- self.indicator.width_pixel / 2)
indicator_width_max = (self.indicator.center_width_pixel
+ self.indicator.width_pixel / 2)
indicator_height_min = (self.indicator.center_height_pixel
- self.indicator.height_pixel / 2)
indicator_height_max = (self.indicator.center_height_pixel
+ self.indicator.height_pixel / 2)
# background = self.background * np.ones((num_pixels_width,
# num_pixels_height),
# dtype=np.float32)
if self.coordinate == 'degree':
map_azi = self.monitor.deg_coord_x
map_alt = self.monitor.deg_coord_y
elif self.coordinate == 'linear':
map_azi = self.monitor.lin_coord_x
map_alt = self.monitor.lin_coord_y
else:
raise LookupError, "`coordinate` not in {'linear','degree'}"
circle_mask = get_circle_mask(map_alt=map_alt, map_azi=map_azi,
center=self.center, radius=self.radius,
is_smooth_edge=self.is_smooth_edge,
blur_ratio=self.smooth_width_ratio,
blur_func=self.smooth_func).astype(np.float32)
# plt.imshow(circle_mask)
# plt.show()
for i, frame in enumerate(self.frames_unique):
if frame[0] == 1:
full_sequence[i][np.where(circle_mask==1)] = self.color
full_sequence[i, indicator_height_min:indicator_height_max,
indicator_width_min:indicator_width_max] = frame[1]
mondict = dict(self.monitor.__dict__)
indicator_dict = dict(self.indicator.__dict__)
indicator_dict.pop('monitor')
NFdict = dict(self.__dict__)
NFdict.pop('monitor')
NFdict.pop('indicator')
NFdict.pop('smooth_func')
full_dict = {'stimulation': NFdict,
'monitor': mondict,
'indicator': indicator_dict}
return full_sequence, full_dict
def generate_movie(self):
"""
generate movie frame by frame.
"""
self.frames = self.generate_frames()
full_seq = np.zeros((len(self.frames), self.monitor.deg_coord_x.shape[0],
self.monitor.deg_coord_x.shape[1]),
dtype=np.float32)
indicator_width_min = (self.indicator.center_width_pixel -
(self.indicator.width_pixel / 2))
indicator_width_max = (self.indicator.center_width_pixel +
(self.indicator.width_pixel / 2))
indicator_height_min = (self.indicator.center_height_pixel -
(self.indicator.height_pixel / 2))
indicator_height_max = (self.indicator.center_height_pixel +
(self.indicator.height_pixel / 2))
background = np.ones((np.size(self.monitor.deg_coord_x, 0),
np.size(self.monitor.deg_coord_x, 1)),
dtype=np.float32) * self.background
if self.coordinate == 'degree':
map_azi = self.monitor.deg_coord_x
map_alt = self.monitor.deg_coord_y
elif self.coordinate == 'linear':
map_azi = self.monitor.lin_coord_x
map_alt = self.monitor.lin_coord_y
else:
raise LookupError, "`coordinate` not in {'linear','degree'}"
circle_mask = get_circle_mask(map_alt=map_alt, map_azi=map_azi,
center=self.center, radius=self.radius,
is_smooth_edge=self.is_smooth_edge,
blur_ratio=self.smooth_width_ratio,
blur_func=self.smooth_func).astype(np.float32)
for i in range(len(self.frames)):
curr_frame = self.frames[i]
if curr_frame[0] == 0:
curr_FC_seq = background
else:
curr_FC_seq = ((circle_mask * self.color) +
((-1 * (circle_mask - 1)) * background))
curr_FC_seq[indicator_height_min:indicator_height_max,
indicator_width_min:indicator_width_max] = curr_frame[1]
full_seq[i] = curr_FC_seq
if i in range(0, len(self.frames), len(self.frames) / 10):
print('Generating numpy sequence: '
+ str(int(100 * (i + 1) / len(self.frames))) + '%')
mondict = dict(self.monitor.__dict__)
indicator_dict = dict(self.indicator.__dict__)
indicator_dict.pop('monitor')
NFdict = dict(self.__dict__)
NFdict.pop('monitor')
NFdict.pop('indicator')
NFdict.pop('smooth_func')
full_dict = {'stimulation': NFdict,
'monitor': mondict,
'indicator': indicator_dict}
return full_seq, full_dict
class SparseNoise(Stim):
"""
generate sparse noise stimulus integrates flashing indicator for photodiode
This stimulus routine presents quasi-random noise in a specified region of
the monitor. The `background` color can be customized but defaults to a
grey value. Can specify the `subregion` of the monitor where the pixels
will flash on and off (black and white respectively)
Parameters
----------
monitor : monitor object
contains display monitor information
indicator : indicator object
contains indicator information
coordinate : str from {'degree','linear'}, optional
specifies coordinates, defaults to 'degree'
background : float, optional
color of background. Takes values in [-1,1] where -1 is black and 1
is white
pregap_dur : float, optional
amount of time (in seconds) before the stimulus is presented, defaults
to `2.`
postgap_dur : float, optional
amount of time (in seconds) after the stimulus is presented, defaults
to `3.`
grid_space : 2-tuple of floats, optional
first coordinate is altitude, second coordinate is azimuth
probe_size : 2-tuple of floats, optional
size of flicker probes. First coordinate defines the width, and
second coordinate defines the height
probe_orientation : float, optional
orientation of flicker probes
probe_frame_num : int, optional
number of frames for each square presentation
subregion : list or tuple
the region on the monitor that will display the sparse noise,
list or tuple, [min_alt, max_alt, min_azi, max_azi]
sign : {'ON-OFF', 'ON', 'OFF'}, optional
determines which pixels appear in the `subregion`, defaults to
`'ON-Off'` so that both on and off pixels appear. If `'ON` selected
only on pixels (white) are displayed in the noise `subregion while if
`'OFF'` is selected only off (black) pixels are displayed in the noise
iteration : int, optional
number of times to present stimulus, defaults to `1`
is_include_edge : bool, default True,
if True, the displayed probes will cover the edge case and ensure that
the entire subregion is covered.
If False, the displayed probes will exclude edge case and ensure that all
the centers of displayed probes are within the subregion.
"""
def __init__(self, monitor, indicator, background=0., coordinate='degree',
grid_space=(10., 10.), probe_size=(10., 10.), probe_orientation=0.,
probe_frame_num=6, subregion=None, sign='ON-OFF', iteration=1,
pregap_dur=2., postgap_dur=3., is_include_edge=True):
"""
Initialize sparse noise object, inherits Parameters from Stim object
"""
super(SparseNoise, self).__init__(monitor=monitor,
indicator=indicator,
background=background,
coordinate=coordinate,
pregap_dur=pregap_dur,
postgap_dur=postgap_dur)
self.stim_name = 'SparseNoise'
self.grid_space = grid_space
self.probe_size = probe_size
self.probe_orientation = probe_orientation
if probe_frame_num >= 2.:
self.probe_frame_num = int(probe_frame_num)
else:
raise ValueError('SparseNoise: probe_frame_num should be no less than 2.')
self.is_include_edge = is_include_edge
self.frame_config = ('is_display', 'probe center (altitude, azimuth)',
'polarity (-1 or 1)', 'indicator color [-1., 1.]')
if subregion is None:
if self.coordinate == 'degree':
self.subregion = [np.amin(self.monitor.deg_coord_y),
np.amax(self.monitor.deg_coord_y),
np.amin(self.monitor.deg_coord_x),
np.amax(self.monitor.deg_coord_x)]
if self.coordinate == 'linear':
self.subregion = [np.amin(self.monitor.lin_coord_y),
np.amax(self.monitor.lin_coord_y),
np.amin(self.monitor.lin_coord_x),
np.amax(self.monitor.lin_coord_x)]
else:
self.subregion = subregion
self.sign = sign
if iteration >= 1:
self.iteration = int(iteration)
else:
raise ValueError('iteration should be no less than 1.')
self.clear()
def _get_grid_locations(self, is_plot=False):
"""
generate all the grid points in display area (covered by both subregion and
monitor span)
Returns
-------
grid_points : n x 2 array,
refined [alt, azi] pairs of probe centers going to be displayed
"""
# get all the visual points for each pixels on monitor
if self.coordinate == 'degree':
monitor_azi = self.monitor.deg_coord_x
monitor_alt = self.monitor.deg_coord_y
elif self.coordinate == 'linear':
monitor_azi = self.monitor.lin_coord_x
monitor_alt = self.monitor.lin_coord_y
else:
raise ValueError('Do not understand coordinate system: {}. '
'Should be either "linear" or "degree".'.
format(self.coordinate))
grid_locations = get_grid_locations(subregion=self.subregion, grid_space=self.grid_space,
monitor_azi=monitor_azi, monitor_alt=monitor_alt,
is_include_edge=self.is_include_edge, is_plot=is_plot)
return grid_locations
def _generate_grid_points_sequence(self):
"""
generate pseudorandomized grid point sequence. if ON-OFF, consecutive
frames should not present stimulus at same location
Returns
-------
all_grid_points : list
list of the form [grid_point, sign]
"""
grid_points = self._get_grid_locations()
if self.sign == 'ON':
grid_points = [[x, 1] for x in grid_points]
random.shuffle(grid_points)
return grid_points
elif self.sign == 'OFF':
grid_points = [[x, -1] for x in grid_points]
random.shuffle(grid_points)
return grid_points
elif self.sign == 'ON-OFF':
all_grid_points = [[x, 1] for x in grid_points] + [[x, -1] for x in grid_points]
random.shuffle(all_grid_points)
# remove coincident hit of same location by continuous frames
print('removing coincident hit of same location with continuous frames:')
while True:
iteration = 0
coincident_hit_num = 0
for i, grid_point in enumerate(all_grid_points[:-3]):
if (all_grid_points[i][0] == all_grid_points[i + 1][0]).all():
all_grid_points[i + 1], all_grid_points[i + 2] = all_grid_points[i + 2], all_grid_points[i + 1]
coincident_hit_num += 1
iteration += 1
print('iteration:' + iteration + ' continous hits number:' + coincident_hit_num)
if coincident_hit_num == 0:
break
return all_grid_points
def generate_frames(self):
"""
function to generate all the frames needed for SparseNoise stimulus
returns a list of information of all frames as a list of tuples
Information contained in each frame:
first element - int
when stimulus is displayed value is equal to 1, otherwise
equal to 0,
second element - tuple,
retinotopic location of the center of current square,[alt, azi]
third element -
polarity of current square, 1 -> bright, -1-> dark
forth element - color of indicator
if synchronized : value equal to 0 when stimulus is not
begin displayed, and 1 for onset frame of stimulus for
each square, -1 for the rest.
if non-synchronized: values alternate between -1 and 1
at defined frequency
for gap frames the second and third elements should be 'None'
"""
frames = []
if self.probe_frame_num == 1:
indicator_on_frame = 1
elif self.probe_frame_num > 1:
indicator_on_frame = self.probe_frame_num // 2
else:
raise ValueError('`probe_frame_num` should be an int larger than 0!')
indicator_off_frame = self.probe_frame_num - indicator_on_frame
frames += [[0., None, None, -1.]] * self.pregap_frame_num
for i in range(self.iteration):
iter_grid_points = self._generate_grid_points_sequence()
for grid_point in iter_grid_points:
frames += [[1., grid_point[0], grid_point[1], 1.]] * indicator_on_frame
frames += [[1., grid_point[0], grid_point[1], -1.]] * indicator_off_frame
frames += [[0., None, None, -1.]] * self.postgap_frame_num
if not self.indicator.is_sync:
indicator_frame = self.indicator.frame_num
for m in range(len(frames)):
if np.floor(m // indicator_frame) % 2 == 0:
frames[m][3] = 1.
else:
frames[m][3] = -1.
frames = [tuple(x) for x in frames]
return tuple(frames)
def _generate_frames_for_index_display(self):
""" compute the information that defines the frames used for index display"""
if self.indicator.is_sync:
frames_unique = []
gap = [0., None, None, -1.]
frames_unique.append(gap)
grid_points = self._get_grid_locations()
for grid_point in grid_points:
if self.sign == 'ON':
frames_unique.append([1., grid_point, 1., 1.])
frames_unique.append([1., grid_point, 1., -1.])
elif self.sign == 'OFF':
frames_unique.append([1., grid_point, -1., 1.])
frames_unique.append([1., grid_point, -1., -1])
elif self.sign == 'ON-OFF':
frames_unique.append([1., grid_point, 1., 1.])
frames_unique.append([1., grid_point, 1., -1.])
frames_unique.append([1., grid_point, -1., 1.])
frames_unique.append([1., grid_point, -1., -1])
else:
raise ValueError('SparseNoise: Do not understand "sign", should '
'be one of "ON", "OFF" and "ON-OFF".')
frames_unique = tuple([tuple(f) for f in frames_unique])
return frames_unique
else:
raise NotImplementedError, "method not available for non-sync indicator"
@staticmethod
def _get_probe_index_for_one_iter_on_off(frames_unique):
"""
get shuffled probe indices from frames_unique generated by
self._generate_frames_for_index_display(), only for 'ON-OFF' stimulus
the first element of frames_unique should be gap frame, the following
frames should be [
(probe_i_ON, indictor_ON),
(probe_i_ON, indictor_OFF),
(probe_i_OFF, indictor_ON),
(probe_i_OFF, indictor_OFF),
]
it is designed such that no consecutive probes will hit the same visual
field location
return list of integers, indices of shuffled probe
"""
if len(frames_unique) % 4 == 1:
probe_num = (len(frames_unique) - 1) // 2
else:
raise ValueError('number of frames_unique should be 4x + 1')
probe_locations = [f[1] for f in frames_unique[1::2]]
probe_ind = np.arange(probe_num)
np.random.shuffle(probe_ind)
is_overlap = True
while is_overlap:
is_overlap = False
for i in range(probe_num - 1):
probe_loc_0 = probe_locations[probe_ind[i]]
probe_loc_1 = probe_locations[probe_ind[i + 1]]
if np.array_equal(probe_loc_0, probe_loc_1):
# print('overlapping probes detected. ind_{}:loc{}; ind_{}:loc{}'
# .format(i, probe_loc_0, i + 1, probe_loc_1))
# print ('ind_{}:loc{}'.format((i + 2) % probe_num,
# probe_locations[(i + 2) % probe_num]))
ind_temp = probe_ind[i + 1]
probe_ind[i + 1] = probe_ind[(i + 2) % probe_num]
probe_ind[(i + 2) % probe_num] = ind_temp
is_overlap = True
return probe_ind
def _generate_display_index(self):
""" compute a list of indices corresponding to each frame to display. """
frames_unique = self._generate_frames_for_index_display()
probe_on_frame_num = self.probe_frame_num // 2
probe_off_frame_num = self.probe_frame_num - probe_on_frame_num
if self.sign == 'ON' or self.sign == 'OFF':
if len(frames_unique) % 2 == 1:
probe_num = (len(frames_unique) - 1) / 2
else:
raise ValueError('SparseNoise: number of unique frames is not correct. Should be odd.')
index_to_display = []
index_to_display += [0] * self.pregap_frame_num
for iter in range(self.iteration):
probe_sequence = np.arange(probe_num)
np.random.shuffle(probe_sequence)
for probe_ind in probe_sequence:
index_to_display += [probe_ind * 2 + 1] * probe_on_frame_num
index_to_display += [probe_ind * 2 + 2] * probe_off_frame_num
index_to_display += [0] * self.postgap_frame_num
elif self.sign == 'ON-OFF':
if len(frames_unique) % 4 != 1:
raise ValueError('number of frames_unique should be 4x + 1')
index_to_display = []
index_to_display += [0] * self.pregap_frame_num
for iter in range(self.iteration):
probe_inds = self._get_probe_index_for_one_iter_on_off(frames_unique)
for probe_ind in probe_inds:
index_to_display += [probe_ind * 2 + 1] * probe_on_frame_num
index_to_display += [probe_ind * 2 + 2] * probe_off_frame_num
index_to_display += [0] * self.postgap_frame_num
else:
raise ValueError('SparseNoise: Do not understand "sign", should '
'be one of "ON", "OFF" and "ON-OFF".')
return frames_unique, index_to_display
def generate_movie_by_index(self):
""" compute the stimulus movie to be displayed by index. """
self.frames_unique, self.index_to_display = self._generate_display_index()
num_unique_frames = len(self.frames_unique)
num_pixels_width = self.monitor.deg_coord_x.shape[0]
num_pixels_height = self.monitor.deg_coord_x.shape[1]
if self.coordinate == 'degree':
coord_azi = self.monitor.deg_coord_x
coord_alt = self.monitor.deg_coord_y
elif self.coordinate == 'linear':
coord_azi = self.monitor.lin_coord_x
coord_alt = self.monitor.lin_coord_y
else:
raise ValueError('Do not understand coordinate system: {}. '
'Should be either "linear" or "degree".'.
format(self.coordinate))
indicator_width_min = (self.indicator.center_width_pixel
- self.indicator.width_pixel / 2)
indicator_width_max = (self.indicator.center_width_pixel
+ self.indicator.width_pixel / 2)
indicator_height_min = (self.indicator.center_height_pixel
- self.indicator.height_pixel / 2)
indicator_height_max = (self.indicator.center_height_pixel
+ self.indicator.height_pixel / 2)
full_seq = self.background * \
np.ones((num_unique_frames, num_pixels_width, num_pixels_height), dtype=np.float32)
for i, frame in enumerate(self.frames_unique):
if frame[0] == 1:
curr_probes = ([frame[1][0], frame[1][1], frame[2]],)
# print type(curr_probes)
disp_mat = get_warped_probes(deg_coord_alt=coord_alt,
deg_coord_azi=coord_azi,
probes=curr_probes,
width=self.probe_size[0],
height=self.probe_size[1],
ori=self.probe_orientation,
background_color=self.background)
full_seq[i] = disp_mat
full_seq[i, indicator_height_min:indicator_height_max,
indicator_width_min:indicator_width_max] = frame[3]
mondict = dict(self.monitor.__dict__)
indicator_dict = dict(self.indicator.__dict__)
indicator_dict.pop('monitor')
SNdict = dict(self.__dict__)
SNdict.pop('monitor')
SNdict.pop('indicator')
full_dict = {'stimulation': SNdict,
'monitor': mondict,
'indicator': indicator_dict}
return full_seq, full_dict
def generate_movie(self):
"""
generate movie for display frame by frame
"""
self.frames = self.generate_frames()
if self.coordinate == 'degree':
coord_x = self.monitor.deg_coord_x
coord_y = self.monitor.deg_coord_y
elif self.coordinate == 'linear':
coord_x = self.monitor.lin_coord_x
coord_y = self.monitor.lin_coord_y
else:
raise ValueError('Do not understand coordinate system: {}. '
'Should be either "linear" or "degree".'.
format(self.coordinate))
indicator_width_min = (self.indicator.center_width_pixel
- self.indicator.width_pixel / 2)
indicator_width_max = (self.indicator.center_width_pixel
+ self.indicator.width_pixel / 2)
indicator_height_min = (self.indicator.center_height_pixel
- self.indicator.height_pixel / 2)
indicator_height_max = (self.indicator.center_height_pixel
+ self.indicator.height_pixel / 2)
full_seq = np.ones((len(self.frames),
self.monitor.deg_coord_x.shape[0],
self.monitor.deg_coord_x.shape[1]),
dtype=np.float32) * self.background
for i, curr_frame in enumerate(self.frames):
if curr_frame[0] == 1: # not a gap
curr_probes = ([curr_frame[1][0], curr_frame[1][1], curr_frame[2]],)
if i == 0: # first frame and (not a gap)
curr_disp_mat = get_warped_probes(deg_coord_alt=coord_y,
deg_coord_azi=coord_x,
probes=curr_probes,
width=self.probe_size[0],
height=self.probe_size[1],
ori=self.probe_orientation,
background_color=self.background)
else: # (not first frame) and (not a gap)
if self.frames[i - 1][1] is None: # (not first frame) and (not a gap) and (new square from gap)
curr_disp_mat = get_warped_probes(deg_coord_alt=coord_y,
deg_coord_azi=coord_x,
probes=curr_probes,
width=self.probe_size[0],
height=self.probe_size[1],
ori=self.probe_orientation,
background_color=self.background)
elif (curr_frame[1] != self.frames[i - 1][1]).any() or (curr_frame[2] != self.frames[i - 1][2]):
# (not first frame) and (not a gap) and (new square from old square)
curr_disp_mat = get_warped_probes(deg_coord_alt=coord_y,
deg_coord_azi=coord_x,
probes=curr_probes,
width=self.probe_size[0],
height=self.probe_size[1],
ori=self.probe_orientation,
background_color=self.background)
# assign current display matrix to full sequence
full_seq[i] = curr_disp_mat
# add sync square for photodiode
full_seq[i, indicator_height_min:indicator_height_max,
indicator_width_min:indicator_width_max] = curr_frame[3]
if i in range(0, len(self.frames), len(self.frames) / 10):
print('Generating numpy sequence: ' +
str(int(100 * (i + 1) / len(self.frames))) + '%')
# generate log dictionary
mondict = dict(self.monitor.__dict__)
indicator_dict = dict(self.indicator.__dict__)
indicator_dict.pop('monitor')
SNdict = dict(self.__dict__)
SNdict.pop('monitor')
SNdict.pop('indicator')
full_dict = {'stimulation': SNdict,
'monitor': mondict,
'indicator': indicator_dict}
return full_seq, full_dict
class LocallySparseNoise(Stim):
"""
generate locally sparse noise stimulus integrates flashing indicator for
photodiode
This stimulus routine presents quasi-random noise in a specified region of
the monitor. The `background` color can be customized but defaults to a
grey value. Can specify the `subregion` of the monitor where the pixels
will flash on and off (black and white respectively)
Different from SparseNoise stimulus which presents only one probe at a time,
the LocallySparseNoise presents multiple probes simultaneously to speed up
the sampling frequency. The sparsity of probes is defined by minimum distance
in visual degree: in any given frame, the centers of any pair of two probes
will have distance larger than minimum distance in visual degrees. The
method generate locally sparse noise here insures, for each iteration, all
the locations in the subregion will be sampled once and only once.
Parameters
----------
monitor : monitor object
contains display monitor information
indicator : indicator object
contains indicator information
coordinate : str from {'degree','linear'}, optional
specifies coordinates, defaults to 'degree'
background : float, optional
color of background. Takes values in [-1,1] where -1 is black and 1
is white
pregap_dur : float, optional
amount of time (in seconds) before the stimulus is presented, defaults
to `2.`
postgap_dur : float, optional
amount of time (in seconds) after the stimulus is presented, defaults
to `3.`
min_distance : float, default 20.
the minimum distance in visual degree for any pair of probe centers
in a given frame
grid_space : 2-tuple of floats, optional
first coordinate is altitude, second coordinate is azimuth
probe_size : 2-tuple of floats, optional
size of flicker probes. First coordinate defines the width, and
second coordinate defines the height
probe_orientation : float, optional
orientation of flicker probes
probe_frame_num : int, optional
number of frames for each square presentation
subregion : list or tuple
the region on the monitor that will display the sparse noise,
list or tuple, [min_alt, max_alt, min_azi, max_azi]
sign : {'ON-OFF', 'ON', 'OFF'}, optional
determines which pixels appear in the `subregion`, defaults to
`'ON-Off'` so that both on and off pixels appear. If `'ON` selected
only on pixels (white) are displayed in the noise `subregion while if
`'OFF'` is selected only off (black) pixels are displayed in the noise
iteration : int, optional
number of times to present stimulus with random order, the total number
a paticular probe will be displayded will be iteration * repeat,
defaults to `1`
repeat : int, optional
number of repeat of whole sequence, the total number a paticular probe
will be displayded will be iteration * repeat, defaults to `1`
is_include_edge : bool, default True,
if True, the displayed probes will cover the edge case and ensure that
the entire subregion is covered.
If False, the displayed probes will exclude edge case and ensure that all
the centers of displayed probes are within the subregion.
"""
def __init__(self, monitor, indicator, min_distance=20., background=0., coordinate='degree',
grid_space=(10., 10.), probe_size=(10., 10.), probe_orientation=0.,
probe_frame_num=6, subregion=None, sign='ON-OFF', iteration=1, repeat=1,
pregap_dur=2., postgap_dur=3., is_include_edge=True):
"""
Initialize sparse noise object, inherits Parameters from Stim object
"""
super(LocallySparseNoise, self).__init__(monitor=monitor, indicator=indicator,
background=background, coordinate=coordinate,
pregap_dur=pregap_dur, postgap_dur=postgap_dur)
self.stim_name = 'LocallySparseNoise'
self.grid_space = grid_space
self.probe_size = probe_size
self.min_distance = float(min_distance)
self.probe_orientation = probe_orientation
self.is_include_edge = is_include_edge
self.frame_config = ('is_display', 'probes ((altitude, azimuth, sign), ...)',
'iteration', 'indicator color [-1., 1.]')
if probe_frame_num >= 2:
self.probe_frame_num = int(probe_frame_num)
else:
raise ValueError('SparseNoise: probe_frame_num should be no less than 2.')
self.is_include_edge = is_include_edge
if subregion is None:
if self.coordinate == 'degree':
self.subregion = [np.amin(self.monitor.deg_coord_y),
np.amax(self.monitor.deg_coord_y),
np.amin(self.monitor.deg_coord_x),
np.amax(self.monitor.deg_coord_x)]
if self.coordinate == 'linear':
self.subregion = [np.amin(self.monitor.lin_coord_y),
np.amax(self.monitor.lin_coord_y),
np.amin(self.monitor.lin_coord_x),
np.amax(self.monitor.lin_coord_x)]
else:
self.subregion = subregion
self.sign = sign
if iteration >= 1:
self.iteration = int(iteration)
else:
raise ValueError('iteration should be no less than 1.')
if repeat >= 1:
self.repeat = int(repeat)
else:
raise ValueError('repeat should be no less than 1.')
self.clear()
def _get_grid_locations(self, is_plot=False):
"""
generate all the grid points in display area (covered by both subregion and
monitor span)
Returns
-------
grid_points : n x 2 array,
refined [azi, alt] pairs of probe centers going to be displayed
"""
# get all the visual points for each pixels on monitor
if self.coordinate == 'degree':
monitor_azi = self.monitor.deg_coord_x
monitor_alt = self.monitor.deg_coord_y
elif self.coordinate == 'linear':
monitor_azi = self.monitor.lin_coord_x
monitor_alt = self.monitor.lin_coord_y
else:
raise ValueError('Do not understand coordinate system: {}. Should be either "linear" or "degree".'.
format(self.coordinate))
grid_locations = get_grid_locations(subregion=self.subregion, grid_space=self.grid_space,
monitor_azi=monitor_azi, monitor_alt=monitor_alt,
is_include_edge=self.is_include_edge, is_plot=is_plot)
return grid_locations
def _generate_all_probes(self):
"""
return all possible (grid location + sign) combinations within the subregion,
return a list of probe parameters, each element in the list is
[center_altitude, center_azimuth, sign]
"""
grid_locs = self._get_grid_locations()
grid_locs = list([list(gl) for gl in grid_locs])
if self.sign == 'ON':
all_probes = [gl + [1.] for gl in grid_locs]
elif self.sign == 'OFF':
all_probes = [gl + [-1.] for gl in grid_locs]
elif self.sign == 'ON-OFF':
all_probes = [gl + [1.] for gl in grid_locs] + [gl + [-1.] for gl in grid_locs]
else:
raise ValueError('LocallySparseNoise: Cannot understand self.sign, should be '
'one of "ON", "OFF", "ON-OFF".')
return all_probes
def _generate_probe_locs_one_frame(self, probes):
"""
given the available probes, generate a sublist of the probes for a single frame,
all the probes in the sublist will have their visual space distance longer than
self.min_distance. This function will also update input probes, remove the
elements that have been selected into the sublist.
parameters
----------
probes : list of all available probes
each elements is [center_altitude, center_azimuth, sign] for a particular probe
min_dis : float
minimum distance to reject probes too close to each other
returns
-------
probes_one_frame : list of selected probes fo one frame
each elements is [center_altitude, center_azimuth, sign] for a selected probe
"""
np.random.shuffle(probes)
probes_one_frame = []
probes_left = list(probes)
for probe in probes:
# print len(probes)
is_overlap = False
for probe_frame in probes_one_frame:
# print probe
# print probe_frame
curr_dis = ia.distance([probe[0], probe[1]], [probe_frame[0], probe_frame[1]])
if curr_dis <= self.min_distance:
is_overlap = True
break
if not is_overlap:
probes_one_frame.append(probe)
probes_left.remove(probe)
return probes_one_frame, probes_left
def _generate_probe_sequence_one_iteration(self, all_probes, is_redistribute=True):
"""
given all probes to be displayed and minimum distance between any pair of two probes
return frames of one iteration that ensure all probes will be present once
parameters
----------
all_probes : list
all probes to be displayed, each element (center_alt, center_azi, sign). ideally
outputs of self._generate_all_probes()
is_redistribute : bool
redistribute the probes among frames after initial generation or not.
redistribute will use self._redistribute_probes() and try to minimize the difference
of probe numbers among different frames
returns
-------
frames : tuple
each element of the frames tuple represent one display frame, the element itself
is a tuple of the probes to be displayed in this particular frame
"""
all_probes_cpy = list(all_probes)
frames = []
while len(all_probes_cpy) > 0:
curr_frames, all_probes_cpy = self._generate_probe_locs_one_frame(probes=all_probes_cpy)
frames.append(curr_frames)
if is_redistribute:
frames = self._redistribute_probes(frames=frames)
frames = tuple(tuple(f) for f in frames)
return frames
def _redistribute_one_probe(self, frames):
# initiate is_moved variable
is_moved = False
# reorder frames from most probes to least probes
new_frames = sorted(frames, key=lambda frame: len(frame))
probe_num_most = len(new_frames[-1])
# the indices of frames in new_frames that contain most probes
frame_ind_most = []
# the indices of frames in new_frames that contain less probes
frame_ind_less = []
for frame_ind, frame in enumerate(new_frames):
if len(frame) == probe_num_most:
frame_ind_most.append(frame_ind)
elif len(frame) <= probe_num_most - 2: # '-1' means well distributed
frame_ind_less.append(frame_ind)
# constructing a list of probes that potentially can be moved
# each element is [(center_alt, center_azi, sign), frame_ind]
probes_to_be_moved = []
for frame_ind in frame_ind_most:
frame_most = new_frames[frame_ind]
for probe in frame_most:
probes_to_be_moved.append((probe, frame_ind))
# loop through probes_to_be_moved to see if any of them will fit into
# frames with less probes, once find a case, break the loop and return
for probe, frame_ind_src in probes_to_be_moved:
frame_src = new_frames[frame_ind_src]
for frame_ind_dst in frame_ind_less:
frame_dst = new_frames[frame_ind_dst]
if self._is_fit(probe, frame_dst):
frame_src.remove(probe)
frame_dst.append(probe)
is_moved = True
break
if is_moved:
break
return is_moved, new_frames
def _is_fit(self, probe, probes):
"""
test if a given probe will fit a group of probes without breaking the
sparcity
parameters
----------
probe : list or tuple of three floats
(center_alt, center_azi, sign)
probes : list of probes
[(center_alt, center_zai, sign), (center_alt, center_azi, sign), ...]
returns
-------
is_fit : bool
the probe will fit or not
"""
is_fit = True
for probe2 in probes:
if ia.distance([probe[0], probe[1]], [probe2[0], probe2[1]]) <= self.min_distance:
is_fit = False
break
return is_fit
def _redistribute_probes(self, frames):
"""
attempt to redistribute probes among frames for one iteration of display
the algorithm is to pick a probe from the frames with most probes to the
frames with least probes and do it iteratively until it can not move
anymore and the biggest difference of probe numbers among all frames is
no more than 1 (most evenly distributed).
the algorithm is implemented by self._redistribute_probes() function,
this is just to roughly massage the probes among frames, but not the
attempt to find the best solution.
parameters
----------
frames : list
each element of the frames list represent one display frame, the element
itself is a list of the probes (center_alt, center_azi, sign) to be
displayed in this particular frame
returns
-------
new_frames : list
same structure as input frames but with redistributed probes
"""
new_frames = list(frames)
is_moved = True
probe_nums = [len(frame) for frame in new_frames]
probe_nums.sort()
probe_diff = probe_nums[-1] - probe_nums[0]
while is_moved and probe_diff > 1:
is_moved, new_frames = self._redistribute_one_probe(new_frames)
probe_nums = [len(frame) for frame in new_frames]
probe_nums.sort()
probe_diff = probe_nums[-1] - probe_nums[0]
else:
if not is_moved:
# print ('redistributing probes among frames: no more probes can be moved.')
pass
if probe_diff <= 1:
# print ('redistributing probes among frames: probes already well distributed.')
pass
return new_frames
def _generate_frames_for_index_display(self):
"""
compute the information that defines the frames used for index display
parameters
----------
all_probes : list
all probes to be displayed, each element (center_alt, center_azi, sign). ideally
outputs of self._generate_all_probes()
returns
-------
frames_unique : tuple
"""
all_probes = self._generate_all_probes()
frames_unique = []
gap = [0., None, None, -1.]
frames_unique.append(gap)
for i in range(self.iteration):
probes_iter = self._generate_probe_sequence_one_iteration(all_probes=all_probes,
is_redistribute=True)
for probes in probes_iter:
frames_unique.append([1., probes, i, 1.])
frames_unique.append([1., probes, i, -1.])
frames_unique = tuple([tuple(f) for f in frames_unique])
return frames_unique
def _generate_display_index(self):
"""
compute a list of indices corresponding to each frame to display.
"""
if self.indicator.is_sync:
frames_unique = self._generate_frames_for_index_display()
if len(frames_unique) % 2 == 1:
display_num = (len(frames_unique) - 1) / 2 # number of each unique display frame
else:
raise ValueError('LocallySparseNoise: number of unique frames is not correct. Should be odd.')
probe_on_frame_num = self.probe_frame_num // 2
probe_off_frame_num = self.probe_frame_num - probe_on_frame_num
index_to_display = []
for display_ind in range(display_num):
index_to_display += [display_ind * 2 + 1] * probe_on_frame_num
index_to_display += [display_ind * 2 + 2] * probe_off_frame_num
index_to_display = index_to_display * self.repeat
index_to_display = [0] * self.pregap_frame_num + index_to_display + [0] * self.postgap_frame_num
return frames_unique, index_to_display
else:
raise NotImplementedError, "method not available for non-sync indicator"
def generate_movie_by_index(self):
self.frames_unique, self.index_to_display = self._generate_display_index()
num_unique_frames = len(self.frames_unique)
num_pixels_width = self.monitor.deg_coord_x.shape[0]
num_pixels_height = self.monitor.deg_coord_x.shape[1]
if self.coordinate == 'degree':
coord_azi = self.monitor.deg_coord_x
coord_alt = self.monitor.deg_coord_y
elif self.coordinate == 'linear':
coord_azi = self.monitor.lin_coord_x
coord_alt = self.monitor.lin_coord_y
else:
raise ValueError('Do not understand coordinate system: {}. '
'Should be either "linear" or "degree".'.
format(self.coordinate))
indicator_width_min = (self.indicator.center_width_pixel
- self.indicator.width_pixel / 2)
indicator_width_max = (self.indicator.center_width_pixel
+ self.indicator.width_pixel / 2)
indicator_height_min = (self.indicator.center_height_pixel
- self.indicator.height_pixel / 2)
indicator_height_max = (self.indicator.center_height_pixel
+ self.indicator.height_pixel / 2)
full_seq = self.background * \
np.ones((num_unique_frames, num_pixels_width, num_pixels_height), dtype=np.float32)
for i, frame in enumerate(self.frames_unique):
if frame[0] == 1.:
disp_mat = get_warped_probes(deg_coord_alt=coord_alt,
deg_coord_azi=coord_azi,
probes=frame[1],
width=self.probe_size[0],
height=self.probe_size[1],
ori=self.probe_orientation,
background_color=self.background)
full_seq[i] = disp_mat
full_seq[i, indicator_height_min:indicator_height_max,
indicator_width_min:indicator_width_max] = frame[3]
mondict = dict(self.monitor.__dict__)
indicator_dict = dict(self.indicator.__dict__)
indicator_dict.pop('monitor')
SNdict = dict(self.__dict__)
SNdict.pop('monitor')
SNdict.pop('indicator')
full_dict = {'stimulation': SNdict,
'monitor': mondict,
'indicator': indicator_dict}
return full_seq, full_dict
class DriftingGratingCircle(Stim):
"""
Generate drifting grating circle stimulus
Stimulus routine presents drifting grating stimulus inside
of a circle centered at `center`. The drifting gratings are determined by
spatial and temporal frequencies, directionality, contrast, and radius.
The routine can generate several different gratings within
one presentation by specifying multiple values of the parameters which
characterize the stimulus.
Parameters
----------
monitor : monitor object
contains display monitor information
indicator : indicator object
contains indicator information
coordinate : str from {'degree','linear'}, optional
specifies coordinates, defaults to 'degree'
background : float, optional
color of background. Takes values in [-1,1] where -1 is black and 1
is white
pregap_dur : float, optional
amount of time (in seconds) before the stimulus is presented, defaults
to `2.`
postgap_dur : float, optional
amount of time (in seconds) after the stimulus is presented, defaults
to `3.`
center : 2-tuple of floats, optional
coordintes for center of the stimulus (altitude, azimuth)
sf_list : n-tuple, optional
list of spatial frequencies in cycles/unit, defaults to `(0.08)`
tf_list : n-tuple, optional
list of temportal frequencies in Hz, defaults to `(4.)`
dire_list : n-tuple, optional
list of directions in degrees, defaults to `(0.)`
con_list : n-tuple, optional
list of contrasts taking values in [0.,1.], defaults to `(0.5)`
radius_list : n-tuple
list of radii of circles, unit defined by `self.coordinate`, defaults
to `(10.)`
block_dur : float, optional
duration of each condition in seconds, defaults to `2.`
midgap_dur : float, optional
duration of gap between conditions, defaults to `0.5`
iteration : int, optional
number of times the stimulus is displayed, defaults to `1`
is_smooth_edge : bool
True, smooth circle edge with smooth_width_ratio and smooth_func
False, do not smooth edge
smooth_width_ratio : float, should be smaller than 1.
the ratio between smooth band width and radius, circle edge is the middle
of smooth band
smooth_func : function object
this function take two inputs: 1) ndarray storing the distance from each
pixel to smooth band center; 2) smooth band width.
returns smoothed mask with same shape as input ndarray
is_blank_block : bool
if True, one blank block (full screen background with the same duration of other blocks)
will be displayed for each iteration. The frames of this condition will be:
(1, 1, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 1.0), the meaning of these numbers can be found in
self.frame_config
"""
def __init__(self, monitor, indicator, background=0., coordinate='degree',
center=(0., 60.), sf_list=(0.08,), tf_list=(4.,), dire_list=(0.,),
con_list=(0.5,), radius_list=(10.,), block_dur=2., midgap_dur=0.5,
iteration=1, pregap_dur=2., postgap_dur=3., is_smooth_edge=False,
smooth_width_ratio=0.2, smooth_func=blur_cos, is_blank_block=True):
"""
Initialize `DriftingGratingCircle` stimulus object, inherits Parameters
from `Stim` class
"""
super(DriftingGratingCircle, self).__init__(monitor=monitor,
indicator=indicator,
background=background,
coordinate=coordinate,
pregap_dur=pregap_dur,
postgap_dur=postgap_dur)
self.stim_name = 'DriftingGratingCircle'
if len(center) != 2:
raise ValueError("DriftingGragingCircle: input 'center' should have "
"two elements: (altitude, azimuth).")
self.center = center
self.sf_list = list(set(sf_list))
self.tf_list = list(set(tf_list))
self.dire_list = list(set(dire_list))
self.con_list = list(set(con_list))
self.radius_list = list(set(radius_list))
self.is_smooth_edge = is_smooth_edge
self.smooth_width_ratio = smooth_width_ratio
self.smooth_func = smooth_func
if int(block_dur * self.monitor.refresh_rate) >= 4:
self.block_dur = float(block_dur)
else:
raise ValueError('There should be more than 4 frames per block, otherwise the '
'synchronized indicator strategy will not work.')
if midgap_dur >= 0.:
self.midgap_dur = float(midgap_dur)
else:
raise ValueError('midgap_dur should be no less than 0 second')
self.iteration = iteration
self.frame_config = ('is_display', 'isCycleStart', 'spatial frequency (cycle/deg)',
'temporal frequency (Hz)', 'direction (deg)',
'contrast [0., 1.]', 'radius (deg)', 'phase (deg)',
'indicator color [-1., 1.]')
self.is_blank_block = bool(is_blank_block)
for tf in tf_list:
period = 1. / tf
if (0.05 * period) < (block_dur % period) < (0.95 * period):
# print(period)
# print(block_dur % period)
# print(0.95 * period)
error_msg = ('Duration of each block times tf ' + str(tf)
+ ' should be close to a whole number!')
raise ValueError, error_msg
@property
def midgap_frame_num(self):
return int(self.midgap_dur * self.monitor.refresh_rate)
@property
def block_frame_num(self):
return int(self.block_dur * self.monitor.refresh_rate)
def _generate_all_conditions(self):
"""
generate all possible conditions for one iteration given the lists of
parameters
Returns
-------
all_conditions : list of tuples
all unique combinations of spatial frequency, temporal frequency,
direction, contrast, and radius. Output depends on initialization
parameters.
"""
all_conditions = [(sf, tf, dire, con, size) for sf in self.sf_list
for tf in self.tf_list
for dire in self.dire_list
for con in self.con_list
for size in self.radius_list]
if self.is_blank_block:
all_conditions.append((0., 0., 0., 0., 0.))
return all_conditions
def _generate_phase_list(self, tf):
"""
get a list of phases that will be displayed for each frame in the block
duration, also make the first frame of each cycle
Parameters
----------
tf : float
temporal frequency in Hz
Returns
-------
phases :
list of phases in one block
frame_per_cycle :
number of frames for each circle
"""
if tf == 0.:
phases = [0.] * self.block_frame_num
frame_per_cycle = self.block_frame_num
else:
frame_per_cycle = int(self.monitor.refresh_rate / tf)
phases_per_cycle = list(np.arange(0, np.pi * 2, np.pi * 2 / frame_per_cycle))
phases = []
while len(phases) < self.block_frame_num:
phases += phases_per_cycle
phases = phases[0: self.block_frame_num]
return phases, frame_per_cycle
@staticmethod
def _get_ori(dire):
"""
get orientation from direction, [0, pi)
"""
return (dire + 90.) % 360.
def generate_frames(self):
"""
function to generate all the frames needed for DriftingGratingCircle
returns a list of information of all frames as a list of tuples
Information contained in each frame:
first element -
value equal to 1 during stimulus and 0 otherwise
second element -
on first frame in a cycle value takes on 1, and otherwise is
equal to 0.
third element -
spatial frequency
forth element -
temporal frequency
fifth element -
direction, [0, 2*pi)
sixth element -
contrast, [-1., 1.]
seventh element -
size, float (radius of the circle in visual degree)
eighth element -
phase, [0, 2*pi)
ninth element -
indicator color [-1, 1]. Value is equal to 1 on the first
frame of each cycle, -1 during gaps and otherwise 0.
during gap frames the second through the eighth elements should
be 'None'.
"""
frames = []
off_params = [0, None, None, None, None, None, None, None, -1.]
# midgap_frames = int(self.midgap_dur*self.monitor.refresh_rate)
for i in range(self.iteration):
if i == 0: # very first block
frames += [off_params for ind in range(self.pregap_frame_num)]
else: # first block for the later iteration
frames += [off_params for ind in range(self.midgap_frame_num)]
all_conditions = self._generate_all_conditions()
random.shuffle(all_conditions)
for j, condition in enumerate(all_conditions):
if j != 0: # later conditions
frames += [off_params for ind in range(self.midgap_frame_num)]
sf, tf, dire, con, size = condition
# get phase list for each condition
phases, frame_per_cycle = self._generate_phase_list(tf)
# if (dire % 360.) >= 90. and (dire % 360. < 270.):
# phases = [-phase for phase in phases]
for k, phase in enumerate(phases): # each frame in the block
# mark first frame of each cycle
if k % frame_per_cycle == 0:
first_in_cycle = 1
else:
first_in_cycle = 0
frames.append([1, first_in_cycle, sf, tf, dire,
con, size, phase, float(first_in_cycle)])
# add post gap frame
frames += [off_params for ind in range(self.postgap_frame_num)]
# add non-synchronized indicator
if self.indicator.is_sync == False:
for l in range(len(frames)):
if np.floor(l // self.indicator.frame_num) % 2 == 0:
frames[l][-1] = 1
else:
frames[l][-1] = -1
# switch each frame to tuple
frames = [tuple(frame) for frame in frames]
return tuple(frames)
def _generate_frames_for_index_display_condition(self, condi_params):
"""
:param condi_params: list of input condition parameters, [sf, tf, dire, con, size]
designed for the output of self._generate_all_conditions()
:return: frames_unique_condi: list of unique frame parameters for this particular condition
index_to_display_condi: list of indices of display order of the unique frames for
this particular condition
"""
phases, frame_per_cycle = self._generate_phase_list(condi_params[1])
if condi_params[0] == 0.: # blank block
frames_unique_condi = ((1, 1, 0., 0., 0., 0., 0., 0., 1.),
(1, 1, 0., 0., 0., 0., 0., 0., 0.))
index_to_display_condi = [1] * self.block_frame_num
index_to_display_condi[0] = 0
else:
phases_unique = phases[0:frame_per_cycle]
frames_unique_condi = []
for i, ph in enumerate(phases_unique):
if i == 0:
frames_unique_condi.append([1, 1, condi_params[0], condi_params[1], condi_params[2],
condi_params[3], condi_params[4], ph, 1.])
else:
frames_unique_condi.append([1, 0, condi_params[0], condi_params[1], condi_params[2],
condi_params[3], condi_params[4], ph, 0.])
index_to_display_condi = []
while len(index_to_display_condi) < len(phases):
index_to_display_condi += range(frame_per_cycle)
index_to_display_condi = index_to_display_condi[0:len(phases)]
frames_unique_condi = tuple([tuple(f) for f in frames_unique_condi])
return frames_unique_condi, index_to_display_condi
def _generate_frames_unique_and_condi_ind_dict(self):
"""
compute the information that defines the frames used for index display
:return frames_unique
the condi_ind_in_frames_unique:
{
condi_key (same condi_key as condi_dict):
list of non-negative integers representing the indices of this
particular condition in frames_unique
}
"""
if self.indicator.is_sync:
all_conditions = self._generate_all_conditions()
'''
cond_dict is a dictionary constructed as following
{
condi_key (i.e. condi_0000):
{
'frames_unique': list of unique frame parameters for this particual condition
[is_display, is_first_in_cycle, sf, tf, dire,
con, size, phase, indicator_color],
'index_to_display': list of non-negative integers,
}
}
'''
condi_dict = {}
for i, condi in enumerate(all_conditions):
frames_unique_condi, index_to_display_condi = self._generate_frames_for_index_display_condition(condi)
condi_dict.update({'condi_{:04d}'.format(i):
{'frames_unique': frames_unique_condi,
'index_to_display': index_to_display_condi}
})
condi_keys = condi_dict.keys()
condi_keys.sort()
# handle frames_unique
frames_unique = []
gap_frame = (0., None, None, None, None, None, None, None, -1.)
frames_unique.append(gap_frame)
condi_keys.sort()
condi_ind_in_frames_unique = {}
for condi_key in condi_keys:
curr_frames_unique_total = len(frames_unique)
curr_index_to_display_condi = np.array(condi_dict[condi_key]['index_to_display'])
frames_unique += list(condi_dict[condi_key]['frames_unique'])
condi_ind_in_frames_unique.update(
{condi_key: list(curr_index_to_display_condi + curr_frames_unique_total)})
return frames_unique, condi_ind_in_frames_unique
else:
raise NotImplementedError, "method not available for non-sync indicator"
def _generate_display_index(self):
""" compute a list of indices corresponding to each frame to display. """
frames_unique, condi_ind_in_frames_unique = self._generate_frames_unique_and_condi_ind_dict()
condi_keys = list(condi_ind_in_frames_unique.keys())
index_to_display = []
index_to_display += [0] * self.pregap_frame_num
for iter in range(self.iteration):
np.random.shuffle(condi_keys)
for condi_ind, condi in enumerate(condi_keys):
if iter == 0 and condi_ind == 0:
pass
else:
index_to_display += [0] * self.midgap_frame_num
index_to_display += condi_ind_in_frames_unique[condi]
index_to_display += [0] * self.postgap_frame_num
return frames_unique, index_to_display
def generate_movie_by_index(self):
""" compute the stimulus movie to be displayed by index. """
self.frames_unique, self.index_to_display = self._generate_display_index()
# print '\n'.join([str(f) for f in self.frames_unique])
mask_dict = self._generate_circle_mask_dict()
num_unique_frames = len(self.frames_unique)
num_pixels_width = self.monitor.deg_coord_x.shape[0]
num_pixels_height = self.monitor.deg_coord_x.shape[1]
if self.coordinate == 'degree':
coord_azi = self.monitor.deg_coord_x
coord_alt = self.monitor.deg_coord_y
elif self.coordinate == 'linear':
coord_azi = self.monitor.lin_coord_x
coord_alt = self.monitor.lin_coord_y
else:
raise LookupError, "`coordinate` not in {'linear','degree'}"
indicator_width_min = (self.indicator.center_width_pixel
- self.indicator.width_pixel / 2)
indicator_width_max = (self.indicator.center_width_pixel
+ self.indicator.width_pixel / 2)
indicator_height_min = (self.indicator.center_height_pixel
- self.indicator.height_pixel / 2)
indicator_height_max = (self.indicator.center_height_pixel
+ self.indicator.height_pixel / 2)
mov = self.background * np.ones((num_unique_frames,
num_pixels_width,
num_pixels_height),
dtype=np.float32)
background_frame = self.background * np.ones((num_pixels_width,
num_pixels_height),
dtype=np.float32)
for i, frame in enumerate(self.frames_unique):
if frame[0] == 1 and frame[2] != 0.: # not a gap and not a blank block
# curr_ori = self._get_ori(frame[3])
curr_grating = get_grating(alt_map=coord_alt,
azi_map=coord_azi,
dire=frame[4],
spatial_freq=frame[2],
center=self.center,
phase=frame[7],
contrast=frame[5])
curr_grating = curr_grating * 2. - 1.
curr_circle_mask = mask_dict[frame[6]]
mov[i] = ((curr_grating * curr_circle_mask) +
(background_frame * (curr_circle_mask * -1. + 1.)))
# add sync square for photodiode
mov[i, indicator_height_min:indicator_height_max,
indicator_width_min:indicator_width_max] = frame[-1]
mondict = dict(self.monitor.__dict__)
indicator_dict = dict(self.indicator.__dict__)
indicator_dict.pop('monitor')
self_dict = dict(self.__dict__)
self_dict.pop('monitor')
self_dict.pop('indicator')
self_dict.pop('smooth_func')
log = {'stimulation': self_dict,
'monitor': mondict,
'indicator': indicator_dict}
return mov, log
def _generate_circle_mask_dict(self):
"""
generate a dictionary of circle masks for each size in size list
"""
masks = {}
if self.coordinate == 'degree':
coord_azi = self.monitor.deg_coord_x
coord_alt = self.monitor.deg_coord_y
elif self.coordinate == 'linear':
coord_azi = self.monitor.lin_coord_x
coord_alt = self.monitor.lin_coord_y
else:
raise ValueError('Do not understand coordinate system: {}. '
'Should be either "linear" or "degree".'.
format(self.coordinate))
for radius in self.radius_list:
curr_mask = get_circle_mask(map_alt=coord_alt, map_azi=coord_azi,
center=self.center, radius=radius,
is_smooth_edge=self.is_smooth_edge,
blur_ratio=self.smooth_width_ratio,
blur_func=self.smooth_func)
masks.update({radius: curr_mask})
return masks
def generate_movie(self):
"""
Generate movie frame by frame
"""
self.frames = self.generate_frames()
mask_dict = self._generate_circle_mask_dict()
if self.coordinate == 'degree':
coord_azi = self.monitor.deg_coord_x
coord_alt = self.monitor.deg_coord_y
elif self.coordinate == 'linear':
coord_azi = self.monitor.lin_coord_x
coord_alt = self.monitor.lin_coord_y
else:
raise LookupError, "`coordinate` not in {'linear','degree'}"
indicator_width_min = (self.indicator.center_width_pixel
- self.indicator.width_pixel / 2)
indicator_width_max = (self.indicator.center_width_pixel
+ self.indicator.width_pixel / 2)
indicator_height_min = (self.indicator.center_height_pixel
- self.indicator.height_pixel / 2)
indicator_height_max = (self.indicator.center_height_pixel
+ self.indicator.height_pixel / 2)
mov = np.ones((len(self.frames),
coord_azi.shape[0],
coord_azi.shape[1]), dtype=np.float32) * self.background
background_frame = np.ones(coord_azi.shape, dtype=np.float32) * self.background
for i, curr_frame in enumerate(self.frames):
if curr_frame[0] == 1 and curr_frame[2] != 0. : # not a gap and not a blank block
# curr_ori = self._get_ori(curr_frame[4])
curr_grating = get_grating(alt_map=coord_alt,
azi_map=coord_azi,
dire=curr_frame[4],
spatial_freq=curr_frame[2],
center=self.center,
phase=curr_frame[7],
contrast=curr_frame[5])
# plt.imshow(curr_grating)
# plt.show()
curr_grating = curr_grating * 2. - 1. # change scale from [0., 1.] to [-1., 1.]
curr_circle_mask = mask_dict[curr_frame[6]]
mov[i] = ((curr_grating * curr_circle_mask) +
(background_frame * (curr_circle_mask * -1. + 1.)))
# add sync square for photodiode
mov[i, indicator_height_min:indicator_height_max,
indicator_width_min:indicator_width_max] = curr_frame[-1]
if i in range(0, len(self.frames), len(self.frames) / 10):
print('Generating numpy sequence: ' +
str(int(100 * (i + 1) / len(self.frames))) + '%')
# generate log dictionary
mondict = dict(self.monitor.__dict__)
indicator_dict = dict(self.indicator.__dict__)
indicator_dict.pop('monitor')
self_dict = dict(self.__dict__)
self_dict.pop('monitor')
self_dict.pop('indicator')
self_dict.pop('smooth_func')
log = {'stimulation': self_dict,
'monitor': mondict,
'indicator': indicator_dict}
return mov, log
class StaticGratingCircle(Stim):
"""
Generate static grating circle stimulus
Stimulus routine presents flashing static grating stimulus inside
of a circle centered at `center`. The static gratings are determined by
spatial frequencies, orientation, contrast, radius and phase. The
routine can generate several different gratings within
one presentation by specifying multiple values of the parameters which
characterize the stimulus.
Parameters
----------
monitor : monitor object
contains display monitor information
indicator : indicator object
contains indicator information
coordinate : str from {'degree','linear'}, optional
specifies coordinates, defaults to 'degree'
background : float, optional
color of background. Takes values in [-1,1] where -1 is black and 1
is white
pregap_dur : float, optional
amount of time (in seconds) before the stimulus is presented, defaults
to `2.`
postgap_dur : float, optional
amount of time (in seconds) after the stimulus is presented, defaults
to `3.`
center : 2-tuple of floats, optional
coordintes for center of the stimulus (altitude, azimuth)
sf_list : n-tuple, optional
list of spatial frequencies in cycles/unit, defaults to `(0.08)`
ori_list : n-tuple, optional
list of directions in degrees, defaults to `(0., 90.)`
con_list : n-tuple, optional
list of contrasts taking values in [0.,1.], defaults to `(0.5)`
radius_list : n-tuple, optional
list of radii of circles, unit defined by `self.coordinate`, defaults
to `(10.)`
phase_list : n-tuple, optional
list of phase of gratings in degrees, default (0., 90., 180., 270.)
display_dur : float, optional
duration of each condition in seconds, defaults to `0.25`
midgap_dur, float, optional
duration of gap between conditions, defaults to `0.`
iteration, int, optional
number of times the stimulus is displayed, defaults to `1`
is_smooth_edge : bool
True, smooth circle edge with smooth_width_ratio and smooth_func
False, do not smooth edge
smooth_width_ratio : float, should be smaller than 1.
the ratio between smooth band width and radius, circle edge is the middle
of smooth band
smooth_func : function object
this function take two inputs: 1) ndarray storing the distance from each
pixel to smooth band center; 2) smooth band width.
returns smoothed mask with same shape as input ndarray
is_blank_block : bool, optional
if True, a full screen background will be displayed as an additional grating.
The frames of this condition will be: (1, 0.0, 0.0, 0.0, 0.0, 0.0, 1.0 or 0.0),
the meaning of these numbers can be found in self.frame_config
"""
def __init__(self, monitor, indicator, background=0., coordinate='degree',
center=(0., 60.), sf_list=(0.08,), ori_list=(0., 90.), con_list=(0.5,),
radius_list=(10.,), phase_list=(0., 90., 180., 270.), display_dur=0.25,
midgap_dur=0., iteration=1, pregap_dur=2., postgap_dur=3.,
is_smooth_edge=False, smooth_width_ratio=0.2, smooth_func=blur_cos,
is_blank_block=True):
"""
Initialize `StaticGratingCircle` stimulus object, inherits Parameters
from `Stim` class
"""
super(StaticGratingCircle, self).__init__(monitor=monitor,
indicator=indicator,
background=background,
coordinate=coordinate,
pregap_dur=pregap_dur,
postgap_dur=postgap_dur)
self.stim_name = 'StaticGratingCircle'
if len(center) != 2:
raise ValueError("StaticGragingCircle: input 'center' should have "
"two elements: (altitude, azimuth).")
self.center = center
self.sf_list = list(set(sf_list))
self.phase_list = list(set([p % 360. for p in phase_list]))
self.ori_list = list(set([o % 180. for o in ori_list]))
self.con_list = list(set(con_list))
self.radius_list = list(set(radius_list))
self.is_smooth_edge = is_smooth_edge
self.smooth_width_ratio = smooth_width_ratio
self.smooth_func = smooth_func
if display_dur > 0.:
self.display_dur = float(display_dur)
else:
raise ValueError('block_dur should be larger than 0 second.')
if midgap_dur >= 0.:
self.midgap_dur = float(midgap_dur)
else:
raise ValueError('midgap_dur should be no less than 0 second')
self.iteration = iteration
self.frame_config = ('is_display', 'spatial frequency (cycle/deg)',
'phase (deg)', 'orientation (deg)',
'contrast [0., 1.]', 'radius (deg)', 'indicator_color [-1., 1.]')
self.is_blank_block = bool(is_blank_block)
@property
def midgap_frame_num(self):
return int(self.midgap_dur * self.monitor.refresh_rate)
@property
def display_frame_num(self):
return int(self.display_dur * self.monitor.refresh_rate)
@staticmethod
def _get_dire(ori):
return (ori + 90.) % 180.
def _generate_circle_mask_dict(self):
"""
generate a dictionary of circle masks for each size in size list
"""
masks = {}
if self.coordinate == 'degree':
coord_azi = self.monitor.deg_coord_x
coord_alt = self.monitor.deg_coord_y
elif self.coordinate == 'linear':
coord_azi = self.monitor.lin_coord_x
coord_alt = self.monitor.lin_coord_y
else:
raise ValueError('Do not understand coordinate system: {}. '
'Should be either "linear" or "degree".'.
format(self.coordinate))
for radius in self.radius_list:
curr_mask = get_circle_mask(map_alt=coord_alt, map_azi=coord_azi,
center=self.center, radius=radius,
is_smooth_edge=self.is_smooth_edge,
blur_ratio=self.smooth_width_ratio,
blur_func=self.smooth_func)
masks.update({radius: curr_mask})
return masks
def _generate_all_conditions(self):
"""
generate all possible conditions for one iteration given the lists of
parameters
Returns
-------
all_conditions : list of tuples
all unique combinations of spatial frequency, phase,
orientation, contrast, and radius. Output depends on initialization
parameters.
"""
all_conditions = [(sf, ph, ori, con, radius) for sf in self.sf_list
for ph in self.phase_list
for ori in self.ori_list
for con in self.con_list
for radius in self.radius_list]
if self.is_blank_block:
all_conditions.append((0., 0., 0., 0., 0.))
return all_conditions
def _generate_frames_for_index_display(self):
"""
generate a tuple of unique frames, each element of the tuple
represents a unique display condition including gap
frame structure:
0. is_display: if gap --> 0; if display --> 1
1. spatial frequency, cyc/deg
2. phase, deg
3. orientation, deg
4. contrast, [0., 1.]
5. radius, deg
6. indicator color, [-1., 1.]
"""
all_conditions = self._generate_all_conditions()
gap_frame = (0., None, None, None, None, None, -1.)
frames_unique = [gap_frame]
for condition in all_conditions:
frames_unique.append((1, condition[0], condition[1], condition[2],
condition[3], condition[4], 1.))
frames_unique.append((1, condition[0], condition[1], condition[2],
condition[3], condition[4], 0.))
return frames_unique
def _generate_display_index(self):
if self.indicator.is_sync:
display_frame_num = int(self.display_dur * self.monitor.refresh_rate)
if display_frame_num < 2:
raise ValueError('StaticGratingCircle: display_dur too short, should be '
'at least 2 display frames.')
indicator_on_frame_num = display_frame_num // 2
indicator_off_frame_num = display_frame_num - indicator_on_frame_num
frames_unique = self._generate_frames_for_index_display()
if len(frames_unique) % 2 != 1:
raise ValueError('StaticGratingCircle: the number of unique frames should odd.')
condition_num = (len(frames_unique) - 1) / 2
index_to_display = [0] * self.pregap_frame_num
for iter in range(self.iteration):
display_sequence = range(condition_num)
random.shuffle(display_sequence)
for cond_ind in display_sequence:
index_to_display += [0] * self.midgap_frame_num
index_to_display += [cond_ind * 2 + 1] * indicator_on_frame_num
index_to_display += [cond_ind * 2 + 2] * indicator_off_frame_num
index_to_display += [0] * self.postgap_frame_num
# remove the extra mid gap
index_to_display = index_to_display[self.midgap_frame_num:]
return frames_unique, index_to_display
else:
raise NotImplementedError, "method not available for non-sync indicator."
def generate_movie_by_index(self):
""" compute the stimulus movie to be displayed by index. """
self.frames_unique, self.index_to_display = self._generate_display_index()
# print '\n'.join([str(f) for f in self.frames_unique])
mask_dict = self._generate_circle_mask_dict()
num_unique_frames = len(self.frames_unique)
num_pixels_width = self.monitor.deg_coord_x.shape[0]
num_pixels_height = self.monitor.deg_coord_x.shape[1]
if self.coordinate == 'degree':
coord_azi = self.monitor.deg_coord_x
coord_alt = self.monitor.deg_coord_y
elif self.coordinate == 'linear':
coord_azi = self.monitor.lin_coord_x
coord_alt = self.monitor.lin_coord_y
else:
raise LookupError, "`coordinate` not in {'linear','degree'}"
indicator_width_min = (self.indicator.center_width_pixel
- self.indicator.width_pixel / 2)
indicator_width_max = (self.indicator.center_width_pixel
+ self.indicator.width_pixel / 2)
indicator_height_min = (self.indicator.center_height_pixel
- self.indicator.height_pixel / 2)
indicator_height_max = (self.indicator.center_height_pixel
+ self.indicator.height_pixel / 2)
mov = self.background * np.ones((num_unique_frames,
num_pixels_width,
num_pixels_height),
dtype=np.float32)
background_frame = self.background * np.ones((num_pixels_width,
num_pixels_height),
dtype=np.float32)
for i, frame in enumerate(self.frames_unique):
if frame[0] == 1 and frame[1] != 0: # not a gap and not a blank grating
# curr_ori = self._get_ori(frame[3])
curr_grating = get_grating(alt_map=coord_alt,
azi_map=coord_azi,
dire=self._get_dire(frame[3]),
spatial_freq=frame[1],
center=self.center,
phase=frame[2],
contrast=frame[4])
curr_grating = curr_grating * 2. - 1.
curr_circle_mask = mask_dict[frame[5]]
mov[i] = ((curr_grating * curr_circle_mask) +
(background_frame * (curr_circle_mask * -1. + 1.)))
# add sync square for photodiode
mov[i, indicator_height_min:indicator_height_max,
indicator_width_min:indicator_width_max] = frame[-1]
mondict = dict(self.monitor.__dict__)
indicator_dict = dict(self.indicator.__dict__)
indicator_dict.pop('monitor')
self_dict = dict(self.__dict__)
self_dict.pop('monitor')
self_dict.pop('indicator')
self_dict.pop('smooth_func')
log = {'stimulation': self_dict,
'monitor': mondict,
'indicator': indicator_dict}
return mov, log
class StaticImages(Stim):
"""
Generate static images stimulus
Stimulus routine presents a sequence of static images in a random order.
Currently the input image stack should be a tif file. The size of the
image should be exactly same as the pixel dimension of downsized monitor
pixel resolution. For example if self.monitor.resolution = (1200,1920)
and self.monitor.downsample_rate = 10. The shape of input image stack
should be n x 120 x 192. Value of the input image stack should be within
the range of [-1., 1.]. The values out of this range will be handled
by psychopy.visual.ImageStim() function. The reason of this seemingly
stringent requirement is that, for visual physiological experiments,
the parameters of visual stimuli should be very well controlled. Any
imaging cropping, zooming, transformating etc. will affect luminance,
contrast, spatial resolution etc. and produce unexpected effects.
This stimulus routing provides a method to generate such image stacks.
StaticImages.wrap_images() takes a list of image files transform them
into a desired spherically corrected and luminance normalized image
stack into visual degree coordinates and save it as a tif file.
Parameters
----------
monitor : monitor object
contains display monitor information
indicator : indicator object
contains indicator information
coordinate : str from {'degree','linear'}, optional
specifies coordinates, defaults to 'degree'
background : float, optional
color of background. Takes values in [-1,1] where -1 is black and 1
is white
pregap_dur : float, optional
amount of time (in seconds) before the stimulus is presented, defaults
to `2.`
postgap_dur : float, optional
amount of time (in seconds) after the stimulus is presented, defaults
to `3.`
img_center : 2-tuple of floats, optional
coordintes for center of the images (altitude, azimuth)
deg_per_pixel: float, or list/tuple of two floats
pixel size in visual degrees of unwrapped image (altitude, azimuth),
if float, assume sizes in altitude and azimuth are the same
display_dur : float, optional
duration of each condition in seconds, defaults to `0.25`
midgap_dur : float, optional
duration of gap between conditions, defaults to `0.`
iteration : int, optional
number of times the stimulus is displayed, defaults to `1`
is_blank_block : bool, optional
if True, a full screen background will be displayed as an additional image.
index of this image will be -1.
"""
def __init__(self, monitor, indicator, background=0., coordinate='degree',
img_center=(0., 60.), deg_per_pixel=(0.1, 0.1), display_dur=0.25,
midgap_dur=0., iteration=1, pregap_dur=2., postgap_dur=3., is_blank_block=True):
"""
Initialize `StaticImages` stimulus object, inherits Parameters from `Stim` class
"""
super(StaticImages, self).__init__(monitor=monitor, indicator=indicator,
background=background, coordinate=coordinate,
pregap_dur=pregap_dur, postgap_dur=postgap_dur)
if len(img_center) != 2:
raise ValueError("StaticImages: input 'img_center' should have "
"two elements: (altitude, azimuth).")
self.stim_name = 'StaticImages'
self.img_center = img_center
self.frame_config = ('is_display', 'image_index', 'indicator color [-1., 1.]')
try:
self.deg_per_pixel_alt = float(deg_per_pixel[0])
self.deg_per_pixel_azi = float(deg_per_pixel[1])
except TypeError:
self.deg_per_pixel_alt = self.deg_per_pixel_azi = float(deg_per_pixel)
self.display_dur = float(display_dur)
self.midgap_dur = float(midgap_dur)
self.iteration = int(iteration)
self.is_blank_block = bool(is_blank_block)
@property
def display_frame_num(self):
return int(self.display_dur * self.monitor.refresh_rate)
@property
def midgap_frame_num(self):
return int(self.midgap_dur * self.monitor.refresh_rate)
def wrap_images(self, work_dir):
"""
look for the 'images_original.tif' in the work_dir, load the images,
warp and luminance correct images, save wrapping results in an HDF5 file
with name "wrapped_images_for_display.hdf5" in the work_dir
datasets
--------
images_wrapped : 3d array, frame x altitude x azimuth,
each frame will have same shape as the pixel resolution of down
sampled self.monitor
attrs
+++++
altitude : 2d array, altitude x azimuth
altitude coordinates of wrapped images in visual degrees,
same shape as each frame of images_wrapped
azimuth : 2d array, altitude x azimuth
azimuth coordinates of wrapped images in visual degrees,
same shape as each frame of images_wrapped
images_dewrapped : 3d array, frame x altitude x azimuth
dewrapped images, please note there is no pixel to pixel relationship
between images_wrapped and images_dewrapped. Different regions in
images_dewrapped have different sampling density to generate
images_wrapped. Some pixels in image_dewrapped (especially on the edge)
may not get presented by image_wrapped. images_dewrapped represent the
minimum rectangle region in the original image that cover the entire
images_wrapped.
attrs
+++++
altitude : 2d array, altitude x azimuth
altitude coordinates of dewrapped images in visual degrees,
same shape as each frame in images_dewrapped
azimuth : 2d array, altitude x azimuth
azimuth coordinates of dewrapped images in visual degrees,
same shape as each frame in images_dewrapped
"""
if os.path.isfile(os.path.join(work_dir, 'wrapped_images_for_display.hdf5')):
raise IOError('"wrapped_images_for_display.hdf5" already exists in the '
'"work_dir" : {}. Please choose another folder or delete '
'the file.'.format(os.path.realpath(work_dir)))
imgs = tf.imread(os.path.join(work_dir, 'images_original.tif'))
deg_per_pixel = [self.deg_per_pixel_alt, self.deg_per_pixel_azi]
wrapping_results = self.monitor.warp_images(imgs=imgs, center_coor=self.img_center,
deg_per_pixel=deg_per_pixel,
is_luminance_correction=True)
imgs_w, alt_w, azi_w, imgs_dw, alt_dw, azi_dw = wrapping_results
results_f = h5py.File(os.path.join(work_dir, 'wrapped_images_for_display.hdf5'))
grp_w = results_f.create_group('images_wrapped')
_ = grp_w.create_dataset('images', data=imgs_w)
_ = grp_w.create_dataset('altitude', data=alt_w)
_ = grp_w.create_dataset('azimuth', data=azi_w)
grp_dw = results_f.create_group('images_dewrapped')
_ = grp_dw.create_dataset('images', data=imgs_dw)
_ = grp_dw.create_dataset('altitude', data=alt_dw.astype(np.float32))
_ = grp_dw.create_dataset('azimuth', data=azi_dw.astype(np.float32))
results_f.close()
def set_imgs_from_tif(self, imgs_path_wrapped, imgs_path_dewrapped=None):
imgs_wrapped = tf.imread(imgs_path_wrapped)
if len(imgs_wrapped.shape) != 3:
raise ValueError('StaticImages: the input wrapped images should be a 3d array.')
if (imgs_wrapped.shape[1], imgs_wrapped.shape[2]) != self.monitor.deg_coord_x.shape:
raise ValueError('StaticImages: the input wrapped images should have '
'the same dimensions of the pixel resolution of '
'downsampled monitor.')
self.images_wrapped = imgs_wrapped
if imgs_path_dewrapped is not None:
imgs_dewrapped = tf.imread(imgs_path_dewrapped)
if imgs_dewrapped.shape[0] != imgs_wrapped.shape[0]:
print ('The input dewrapped images have different dimensions from the '
'input wrapped images. Set self.images_dewrapped to None.')
self.images_dewrapped = None
else:
self.images_dewrapped = tf.imread(imgs_path_dewrapped)
else:
self.images_dewrapped = None
def set_imgs_from_hdf5(self, imgs_file_path):
"""
set 3d arrays from a hdf5 file for display. Ideally the hdf5 file should be
the result from self.wrap_images() method. Only designed to work with wrapped
images
parameters
----------
imgs_file_path : str
system path ot the hdf5 file. It should have at least one dataset named
'images_wrapped' containing a 3d array of wrapped images to display
"""
img_f = h5py.File(imgs_file_path, 'r')
if len(img_f['images_wrapped/images'].shape) != 3:
raise ValueError('StaticImages: the input wrapped images should be a 3d array.')
if (img_f['images_wrapped/images'].shape[1],
img_f['images_wrapped/images'].shape[2]) != self.monitor.deg_coord_x.shape:
raise ValueError('StaticImages: the input wrapped images should have '
'the same dimensions of the pixel resolution of '
'downsampled monitor.')
try:
alt_w = img_f['images_wrapped/altitude'].value
except:
alt_w = None
try:
azi_w = img_f['images_wrapped/azimuth'].value
except:
azi_w = None
if alt_w is not None:
if not np.array_equal(alt_w, self.monitor.deg_coord_y):
raise ValueError('the altitude coordinates of input wrapped images do not '
'match the wrapped monitor pixel altitude coordinates.')
if azi_w is not None:
if not np.array_equal(azi_w, self.monitor.deg_coord_x):
raise ValueError('the azimuth coordinates of input wrapped images do not '
'match the wrapped monitor pixel azimuth coordinates.')
self.images_wrapped = img_f['images_wrapped/images'].value
if 'images_dewrapped' in img_f:
if not img_f['images_dewrapped/images'].shape != 3:
print ('The images_dewrapped in the input file is not 3d. '
'Set self.images_dewrapped to None.')
self.images_dewrapped = None
self.altitude_dewrapped = None
self.azimuth_dewrapped = None
elif img_f['images_dewrapped/images'].shape[0] != self.images_wrapped.shape[0]:
print ('The number of frames of images_dewrapped in the input file is different'
'from the number of frames of self.images. Set self.images_dewrapped to None.')
self.images_dewrapped = None
self.altitude_dewrapped = None
self.azimuth_dewrapped = None
else:
self.images_dewrapped = img_f['images_dewrapped/images'].value
try:
alt_d = img_f['images_dewrapped/altitude'].value
if alt_d.shape[0] != self.images_dewrapped.shape[1] or \
alt_d.shape[1] != self.images_dewrapped.shape[2]:
print ('altitude coordinates of images_dewrapped in the input file have '
'different shape as frames in self.images_dewrapped. Set'
'self.altitude_dewrapped to None.')
self.altitude_dewrapped = None
else:
self.altitude_dewrapped = alt_d
except:
self.altitude_dewrapped = None
try:
azi_d = img_f['images_dewrapped/azimuth'].value
if azi_d.shape[0] != self.images_dewrapped.shape[1] or \
azi_d.shape[1] != self.images_dewrapped.shape[2]:
print ('azimuth coordinates of images_dewrapped in the input file have '
'different shape as frames in self.images_dewrapped. Set'
'self.azimuth_dewrapped to None.')
self.azimuth_dewrapped = None
else:
self.azimuth_dewrapped = azi_d
except:
self.azimuth_dewrapped = None
else:
print ('Cannot find "images_dewrapped" dataset in the input file. '
'Set self.images_dewrapped to None.')
self.images_dewrapped = None
self.altitude_dewrapped = None
self.azimuth_dewrapped = None
img_f.close()
def _generate_frames_for_index_display(self):
"""
generate a tuple of unique frames, each element of the tuple
represents a unique display condition including gap
frame structure:
0. is_display: if gap --> 0; if display --> 1
1. image index, non-negative integer
2. indicator color, [-1., 1.]
"""
if not hasattr(self, 'images_wrapped'):
raise LookupError('StaticImages: cannot find attribute: "imgs_wrapped".'
'Please use self.set_imgs_from_tif() or '
'self.set_imgs_from_hdf5() to set the images.')
img_num = self.images_wrapped.shape[0]
frames_unique = [(0, None, -1.)]
for i in range(img_num):
frames_unique.append((1, i, 1.))
frames_unique.append((1, i, 0.))
# adding blank image
if self.is_blank_block:
frames_unique.append((1, -1, 1.))
frames_unique.append((1, -1, 0.))
return frames_unique
def _generate_display_index(self):
if self.indicator.is_sync:
display_frame_num = int(self.display_dur * self.monitor.refresh_rate)
if display_frame_num < 2:
raise ValueError('StaticGratingCircle: display_dur too short, should be '
'at least 2 display frames.')
indicator_on_frame_num = display_frame_num // 2
indicator_off_frame_num = display_frame_num - indicator_on_frame_num
frames_unique = self._generate_frames_for_index_display()
if len(frames_unique) % 2 != 1:
raise ValueError('StaticGratingCircle: the number of unique frames should odd.')
img_num = (len(frames_unique) - 1) / 2
index_to_display = [0] * self.pregap_frame_num
for iter in range(self.iteration):
display_sequence = range(img_num)
random.shuffle(display_sequence)
for cond_ind in display_sequence:
index_to_display += [0] * self.midgap_frame_num
index_to_display += [cond_ind * 2 + 1] * indicator_on_frame_num
index_to_display += [cond_ind * 2 + 2] * indicator_off_frame_num
index_to_display += [0] * self.postgap_frame_num
# remove the extra mid gap
index_to_display = index_to_display[self.midgap_frame_num:]
return frames_unique, index_to_display
else:
raise NotImplementedError, "method not available for non-sync indicator."
def generate_movie_by_index(self):
""" compute the stimulus movie to be displayed by index. """
self.frames_unique, self.index_to_display = self._generate_display_index()
# print '\n'.join([str(f) for f in self.frames_unique])
if self.coordinate == 'degree':
coord_azi = self.monitor.deg_coord_x
coord_alt = self.monitor.deg_coord_y
elif self.coordinate == 'linear':
coord_azi = self.monitor.lin_coord_x
coord_alt = self.monitor.lin_coord_y
else:
raise LookupError, "`coordinate` not in {'linear','degree'}"
indicator_width_min = (self.indicator.center_width_pixel
- self.indicator.width_pixel / 2)
indicator_width_max = (self.indicator.center_width_pixel
+ self.indicator.width_pixel / 2)
indicator_height_min = (self.indicator.center_height_pixel
- self.indicator.height_pixel / 2)
indicator_height_max = (self.indicator.center_height_pixel
+ self.indicator.height_pixel / 2)
mov = self.background * np.ones((len(self.frames_unique),
self.images_wrapped.shape[1],
self.images_wrapped.shape[2]),
dtype=np.float32)
for i, frame in enumerate(self.frames_unique):
if frame[0] == 1 and frame[1] != -1: # not a gap and not a blank block
curr_img = self.images_wrapped[frame[1]]
curr_img[np.isnan(curr_img)] = self.background
mov[i] = curr_img
# add sync square for photodiode
mov[i, indicator_height_min:indicator_height_max,
indicator_width_min:indicator_width_max] = frame[-1]
mondict = dict(self.monitor.__dict__)
indicator_dict = dict(self.indicator.__dict__)
indicator_dict.pop('monitor')
self_dict = dict(self.__dict__)
self_dict.pop('monitor')
self_dict.pop('indicator')
log = {'stimulation': self_dict,
'monitor': mondict,
'indicator': indicator_dict}
return mov, log
class StimulusSeparator(Stim):
"""
a quick flash of indicator to separate different
visual stimuli when displayed in the same session
Parameters
----------
monitor : monitor object
contains display monitor information
indicator : indicator object
contains indicator information
coordinate : str from {'degree','linear'}, optional
specifies coordinates, defaults to 'degree'
background : float, optional
color of background. Takes values in [-1,1] where -1 is black and 1
is white
pregap_dur : float, optional
amount of time (in seconds) before the stimulus is presented, defaults
to `2.`
postgap_dur : float, optional
amount of time (in seconds) after the stimulus is presented, defaults
to `3.`
indicator_on_frame_num : int
number of frames the indicator is white, should be positive.
indicator_off_frame_num : int
number of frames the indicator is black, should be positive.
cycle_num : int
number of repeat of the indicator flash, should be positive.
"""
def __init__(self, monitor, indicator, coordinate='degree', background=0.,
indicator_on_frame_num=4, indicator_off_frame_num=4,
cycle_num=10, pregap_dur=0., postgap_dur=0.):
"""
Initialize `StimulusSeparator` stimulus object, inherits Parameters from `Stim` class
"""
super(StimulusSeparator, self).__init__(monitor=monitor,
indicator=indicator,
background=background,
coordinate=coordinate,
pregap_dur=pregap_dur,
postgap_dur=postgap_dur)
self.stim_name = 'StimulusSeparator'
self.background = float(background)
self.indicator_on_frame_num = int(indicator_on_frame_num)
self.indicator_off_frame_num = int(indicator_off_frame_num)
self.cycle_num = int(cycle_num)
self.frame_config = ('is_display', 'indicator color [-1., 1.]')
def _generate_frames_for_index_display(self):
"""
frame structure is as following
first element: is_display
second element: indicator color
"""
return ((0, -1), (1, 1.), (1, -1.))
def _generate_display_index(self):
if self.indicator.is_sync:
frames_unique = self._generate_frames_for_index_display()
index_to_display = [0] * self.pregap_frame_num
for cycle_ind in range(self.cycle_num):
index_to_display += [1] * self.indicator_on_frame_num
index_to_display += [2] * self.indicator_off_frame_num
index_to_display += [0] * self.postgap_frame_num
return frames_unique, index_to_display
else:
raise NotImplementedError, "method not available for non-sync indicator."
def generate_movie_by_index(self):
self.frames_unique, self.index_to_display = self._generate_display_index()
if self.coordinate == 'degree':
coord_azi = self.monitor.deg_coord_x
coord_alt = self.monitor.deg_coord_y
elif self.coordinate == 'linear':
coord_azi = self.monitor.lin_coord_x
coord_alt = self.monitor.lin_coord_y
else:
raise LookupError, "`coordinate` not in {'linear','degree'}"
indicator_width_min = (self.indicator.center_width_pixel
- self.indicator.width_pixel / 2)
indicator_width_max = (self.indicator.center_width_pixel
+ self.indicator.width_pixel / 2)
indicator_height_min = (self.indicator.center_height_pixel
- self.indicator.height_pixel / 2)
indicator_height_max = (self.indicator.center_height_pixel
+ self.indicator.height_pixel / 2)
mov = self.background * np.ones((len(self.frames_unique),
coord_azi.shape[0],
coord_azi.shape[1]),
dtype=np.float32)
for i, frame in enumerate(self.frames_unique):
# add sync square for photodiode
mov[i, indicator_height_min:indicator_height_max,
indicator_width_min:indicator_width_max] = frame[-1]
mondict = dict(self.monitor.__dict__)
indicator_dict = dict(self.indicator.__dict__)
indicator_dict.pop('monitor')
self_dict = dict(self.__dict__)
self_dict.pop('monitor')
self_dict.pop('indicator')
log = {'stimulation': self_dict,
'monitor': mondict,
'indicator': indicator_dict}
return mov, log
class CombinedStimuli(Stim):
"""
the stimulus class that can combine different stimuli into one session.
example:
>>> import retinotopic_mapping.StimulusRoutines as stim
>>> from retinotopic_mapping.MonitorSetup import Monitor, Indicator
>>> from retinotopic_mapping.DisplayStimulus import DisplaySequence
>>> mon = Monitor(resolution=(1200, 1920), dis=15., mon_width_cm=52., mon_height_cm=32.)
>>> ind = Indicator(mon)
>>> uc = stim.UniformContrast(mon, ind, duration=10., color=-1.)
>>> ss = stim.StimulusSeparator(mon, ind)
>>> cs = stim.CombinedStimuli(mon, ind)
>>> cs.set_stimuli([ss, uc, ss])
>>> ds = DisplaySequence(log_dir='C:/data')
>>> ds.set_stim(cs)
>>> ds.trigger_display()
Parameters
----------
monitor : monitor object
contains display monitor information
indicator : indicator object
contains indicator information
coordinate : str from {'degree','linear'}, optional
specifies coordinates, defaults to 'degree'
background : float, optional
color of background. Takes values in [-1,1] where -1 is black and 1
is white
pregap_dur : float, optional
amount of time (in seconds) before the stimulus is presented, defaults
to `2.`
postgap_dur : float, optional
amount of time (in seconds) after the stimulus is presented, defaults
to `3.`
"""
def __init__(self, monitor, indicator, background=0., coordinate='degree',
pregap_dur=2., postgap_dur=3.):
super(CombinedStimuli, self).__init__(monitor=monitor, indicator=indicator,
background=background, coordinate=coordinate,
pregap_dur=pregap_dur, postgap_dur=postgap_dur)
self.stim_name = 'CombinedStimuli'
def set_stimuli(self, stimuli, static_images_path=None):
"""
parameters
----------
stimuli : list of above stimulus object
static_images_path : str
system path to the hdf5 file storing the wrapped images for display. If there
is StaticImages stimulus in the stimuli list, it will try to load images and
display
"""
for stimulus in stimuli:
if not stimulus.stim_name in ['UniformContrast', 'FlashingCircle', 'SparseNoise',
'LocallySparseNoise', 'DriftingGratingCircle',
'StaticGratingCircle', 'StaticImages', 'StimulusSeparator',
'SinusoidalLuminance']:
raise LookupError('Stimulus type "{}" is not currently supported.'
.format(stimulus.stim_name))
self.stimuli = stimuli
self.static_images_path = static_images_path
def generate_movie_by_index(self):
t0 = time.time()
print ('\n{:04.1f} min : CombinedStimulus: generating stimuli ...'.format(time.time() - t0))
self.frames_unique = []
self.index_to_display = []
self.individual_logs = {}
mov = []
curr_start_frame_ind = 0
for stim_ind, stimulus in enumerate(self.stimuli):
curr_stim_name = stimulus.stim_name
curr_stim_id = ft.int2str(stim_ind, 3) + '_' + curr_stim_name
stimulus.set_monitor(self.monitor)
stimulus.set_indicator(self.indicator)
stimulus.set_pregap_dur(self.pregap_dur)
stimulus.set_postgap_dur(self.postgap_dur)
stimulus.set_background(self.background)
stimulus.set_coordinate(self.coordinate)
# load the images if the stimulus is StaticImages
if curr_stim_name == 'StaticImages':
stimulus.set_imgs_from_hdf5(imgs_file_path=self.static_images_path)
curr_mov, curr_log = stimulus.generate_movie_by_index()
curr_log.pop('monitor')
curr_log.pop('indicator')
self.individual_logs.update({curr_stim_id: curr_log['stimulation']})
curr_frames_unique = [[curr_stim_id] + list(f) for f in curr_log['stimulation']['frames_unique']]
curr_index_to_display = np.array(curr_log['stimulation']['index_to_display'], dtype=np.uint64)
self.frames_unique += curr_frames_unique
self.index_to_display.append(curr_index_to_display + curr_start_frame_ind)
mov.append(curr_mov)
curr_start_frame_ind += len(curr_frames_unique)
print ('{:04.1f} min : stimulus: {:<30}; estimated display duration: {:4.1f} minute(s).'
.format((time.time() - t0) / 60., curr_stim_id,
len(curr_index_to_display) / (60. * self.monitor.refresh_rate)))
self.frames_unique = tuple([tuple(f) for f in self.frames_unique])
self.index_to_display = list(np.concatenate(self.index_to_display, axis=0))
mov = np.concatenate(mov, axis=0)
mondict = dict(self.monitor.__dict__)
indicator_dict = dict(self.indicator.__dict__)
indicator_dict.pop('monitor')
self_dict = dict(self.__dict__)
self_dict.pop('monitor')
self_dict.pop('indicator')
stim_seq = []
for stim_ind, stim in enumerate(self.stimuli):
stim_seq.append(ft.int2str(stim_ind, 3) + '_' + stim.stim_name)
self_dict.pop('stimuli')
self_dict.update({'stimuli_sequence':stim_seq})
log = {'stimulation': self_dict,
'monitor': mondict,
'indicator': indicator_dict}
return mov, log
def clear(self):
super(CombinedStimuli, self).clear()
if hasattr(self, 'stimuli'):
del self.stimuli
if hasattr(self, 'static_images_path'):
del self.static_images_path
class KSstim(Stim):
"""
generate Kalatsky & Stryker stimulus
Kalatsky & Stryker (KS) stimulus routine presents checkerboard gratings
that drift against a fixed `background` color.
Parameters
----------
monitor : monitor object
object storing experimental monitor setup information
indicator : indicator object
object storing photodiode indicator information
background : float, optional
background color of stimulus, takes values in [-1,1]. defaults to
`0.`
coordinate : str, optional
coordinate representation, either 'degree' or 'linear', defaults
to 'degree'
square_size : float, optional
size of flickering square, defaults to `25.`
square_center: tuple, optional
coordinate of center point, defaults to `(0,0)`
flicker_frame : int, optional
number of frames in one flicker, defaults to `10`
sweep_width : float, optional
width of sweeps measured in units cm or degs if coordinate value
is 'linear' or 'degree' respectively. defaults to `20`
step_width : float, optional
width of steps measured in units cm or degs if coordinate value
is 'linear' or 'degree' respectively. defaults to `0.15`
direction : {'B2U','U2B','L2R','R2L'}, optional
the direction of sweep movement, defaults to 'B2U'. 'B2U' means
stim is presented from the bottom to the top of the screen, whereas
'U2B' is from the top to the bottom. 'L2R' is left to right and 'R2L'
is right to left
sweep_frame : int, optional
roughly determines speed of the drifting grating, defaults to `1`
iteration : int, optional
number of times that the stimulus will be repeated, defaults to `1`
pregap_dur : float, optional
number of seconds before stimulus is presented, defaults to `2`
postgap_dur : float, optional
number of seconds after stimulus is presented, defaults to `2`
"""
def __init__(self, monitor, indicator, background=0., coordinate='degree',
square_size=25., square_center=(0, 0), flicker_frame=10,
sweep_width=20., step_width=0.15, direction='B2U', sweep_frame=1,
iteration=1, pregap_dur=2., postgap_dur=3.):
super(KSstim, self).__init__(monitor=monitor,
indicator=indicator,
coordinate=coordinate,
background=background,
pregap_dur=pregap_dur,
postgap_dur=postgap_dur)
"""
Initialize Kalatsky & Stryker stimulus object
"""
self.stim_name = 'KSstim'
self.square_size = square_size
self.square_center = square_center
self.flicker_frame = flicker_frame
self.flicker_freq = self.monitor.refresh_rate / self.flicker_frame
self.sweep_width = sweep_width
self.step_width = step_width
self.direction = direction
self.sweep_frame = sweep_frame
self.iteration = iteration
self.frame_config = ('is_display', 'squarePolarity',
'sweep_index', 'indicator_color')
self.sweep_config = ('orientation', 'sweepStartCoordinate',
'sweepEndCoordinate')
self.sweep_speed = (self.monitor.refresh_rate *
self.step_width / self.sweep_frame)
self.flicker_hz = self.monitor.refresh_rate / self.flicker_frame
self.clear()
def generate_squares(self):
"""
generate checker board squares
"""
if self.coordinate == 'degree':
map_x = self.monitor.deg_coord_x
map_y = self.monitor.deg_coord_y
elif self.coordinate == 'linear':
map_x = self.monitor.lin_coord_x
map_y = self.monitor.lin_coord_y
else:
raise LookupError, '`coordinate` not in {"degree","linear"}'
min_x = map_x.min()
max_x = map_x.max()
min_y = map_y.min()
max_y = map_y.max()
neg_x = np.ceil(abs(((min_x - self.square_center[0]) /
(2 * self.square_size)))) + 1
pos_x = np.ceil(abs(((max_x - self.square_center[0]) /
(2 * self.square_size)))) + 1
neg_y = np.ceil(abs(((min_y - self.square_center[0]) /
(2 * self.square_size)))) + 1
pos_y = np.ceil(abs(((max_y - self.square_center[0]) /
(2 * self.square_size)))) + 1
squareV = np.ones((np.size(map_x, 0),
np.size(map_x, 1)),
dtype=np.float32)
squareV = -1 * squareV
stepV = np.arange(self.square_center[0] - (2 * neg_x + 0.5) * self.square_size,
self.square_center[0] + (2 * pos_x - 0.5) * self.square_size,
self.square_size * 2)
for i in range(len(stepV)):
squareV[np.where(np.logical_and(map_x >= stepV[i],
map_x < (stepV[i] +
self.square_size)))] = 1.0
squareH = np.ones((np.size(map_y, 0),
np.size(map_y, 1)), dtype=np.float32)
squareH = -1 * squareH
stepH = np.arange(self.square_center[1] - (2 * neg_y + 0.5) * self.square_size,
self.square_center[1] + (2 * pos_y - 0.5) * self.square_size,
self.square_size * 2)
for j in range(len(stepH)):
squareH[np.where(np.logical_and(map_y >= stepH[j],
map_y < (stepH[j] +
self.square_size)))] = 1
squares = np.multiply(squareV, squareH)
return squares
def plot_squares(self):
"""
plot checkerboard squares
"""
plt.figure()
plt.imshow(self.squares)
def generate_sweeps(self):
"""
generate full screen sweep sequence
"""
sweep_width = self.sweep_width
step_width = self.step_width
direction = self.direction
if self.coordinate == 'degree':
map_x = self.monitor.deg_coord_x
map_y = self.monitor.deg_coord_y
elif self.coordinate == 'linear':
map_x = self.monitor.lin_coord_x
map_y = self.monitor.lin_coord_y
else:
raise LookupError, '`coordinate` not in {"degree", "linear"}'
min_x = map_x.min()
max_x = map_x.max()
min_y = map_y.min()
max_y = map_y.max()
if direction == "B2U":
step_y = np.arange(min_y - sweep_width,
max_y + step_width, step_width)
elif direction == "U2B":
step_y = np.arange(min_y - sweep_width,
max_y + step_width, step_width)[::-1]
elif direction == "L2R":
step_x = np.arange(min_x - sweep_width,
max_x + step_width, step_width)
elif direction == "R2L":
step_x = np.arange(min_x - sweep_width,
max_x + step_width, step_width)[::-1]
else:
raise LookupError, '`direction` not in {"B2U","U2B","L2R","R2L"}'
sweep_table = []
if 'step_x' in locals():
sweeps = np.zeros((len(step_x),
np.size(map_x, 0),
np.size(map_x, 1)), dtype=np.float32)
for i in range(len(step_x)):
temp = sweeps[i, :, :]
temp[np.where(np.logical_and(map_x >= step_x[i],
map_x < (step_x[i] +
sweep_width)))] = 1.0
sweep_table.append(('V', step_x[i], step_x[i] + sweep_width))
del temp
if 'step_y' in locals():
sweeps = np.zeros((len(step_y),
np.size(map_y, 0),
np.size(map_y, 1)), dtype=np.float32)
for j in range(len(step_y)):
temp = sweeps[j, :, :]
temp[np.where(np.logical_and(map_y >= step_y[j],
map_y < (step_y[j] +
sweep_width)))] = 1.0
sweep_table.append(('H', step_y[j], step_y[j] + sweep_width))
del temp
return sweeps.astype(np.bool), sweep_table
def generate_frames(self):
"""
function to generate all the frames needed for KS stimulation
returnins a list of information of all frames as a list of tuples
Information contained in each frame:
first element -
during stimulus value is equal to 1 and 0 otherwise
second element -
square polarity, 1->not reversed; -1->reversed
third element:
sweeps, index in sweep table
forth element -
color of indicator
synchronized: gap->-1, sweep on -> 1
non-synchronized: alternating between -1 and 1 at defined frequency
for gap frames the second and third elements should be 'None'
"""
sweeps, _ = self.generate_sweeps()
sweep_frame = self.sweep_frame
flicker_frame = self.flicker_frame
iteration = self.iteration
sweep_num = np.size(sweeps, 0) # Number of sweeps vertical or horizontal
displayframe_num = sweep_frame * sweep_num # total frame number for 1 iter
# frames for one iteration
iter_frames = []
# add frames for gaps
for i in range(self.pregap_frame_num):
iter_frames.append([0, None, None, -1])
# add frames for display
is_reverse = []
for i in range(displayframe_num):
if (np.floor(i // flicker_frame)) % 2 == 0:
is_reverse = -1
else:
is_reverse = 1
sweep_index = int(np.floor(i // sweep_frame))
# add sychornized indicator
if self.indicator.is_sync == True:
indicator_color = 1
else:
indicator_color = -1
iter_frames.append([1, is_reverse, sweep_index, indicator_color])
# add gap frames at the end
for i in range(self.postgap_frame_num):
iter_frames.append([0, None, None, -1])
full_frames = []
# add frames for multiple iteration
for i in range(int(iteration)):
full_frames += iter_frames
# add non-synchronized indicator
if self.indicator.is_sync == False:
indicator_frame = self.indicator.frame_num
for j in range(np.size(full_frames, 0)):
if np.floor(j // indicator_frame) % 2 == 0:
full_frames[j][3] = 1
else:
full_frames[j][3] = -1
full_frames = [tuple(x) for x in full_frames]
return tuple(full_frames)
def generate_movie(self):
"""
Function to Generate Kalatsky & Stryker visual stimulus frame by frame
"""
self.squares = self.generate_squares()
sweeps, self.sweep_table = self.generate_sweeps()
self.frames = self.generate_frames()
full_seq = np.zeros((len(self.frames),
self.monitor.deg_coord_x.shape[0],
self.monitor.deg_coord_x.shape[1]),
dtype=np.float32)
indicator_width_min = (self.indicator.center_width_pixel -
(self.indicator.width_pixel / 2))
indicator_width_max = (self.indicator.center_width_pixel +
(self.indicator.width_pixel / 2))
indicator_height_min = (self.indicator.center_height_pixel -
(self.indicator.height_pixel / 2))
indicator_height_max = (self.indicator.center_height_pixel +
(self.indicator.height_pixel / 2))
background = np.ones((np.size(self.monitor.deg_coord_x, 0),
np.size(self.monitor.deg_coord_x, 1)),
dtype=np.float32) * self.background
for i in range(len(self.frames)):
curr_frame = self.frames[i]
if curr_frame[0] == 0:
curr_NM_seq = background
else:
currSquare = self.squares * curr_frame[1]
curr_sweep = sweeps[curr_frame[2]]
curr_NM_seq = ((curr_sweep * currSquare) +
((-1 * (curr_sweep - 1)) * background))
curr_NM_seq[indicator_height_min:indicator_height_max,
indicator_width_min:indicator_width_max] = curr_frame[3]
full_seq[i] = curr_NM_seq
if i in range(0, len(self.frames), len(self.frames) / 10):
print('Generating numpy sequence: ' + str(int(100 * (i + 1) / len(self.frames))) + '%')
mondict = dict(self.monitor.__dict__)
indicator_dict = dict(self.indicator.__dict__)
indicator_dict.pop('monitor')
KSdict = dict(self.__dict__)
KSdict.pop('monitor')
KSdict.pop('indicator')
full_dict = {'stimulation': KSdict,
'monitor': mondict,
'indicator': indicator_dict}
return full_seq, full_dict
def clear(self):
self.sweep_table = None
self.frames = None
self.square = None
def set_direction(self, direction):
if direction in ['B2U', 'U2B', 'L2R', 'R2L']:
self.direction = direction
self.clear()
else:
raise LookupError, '`direction` not in {"B2U","U2B","L2R","R2L"}'
def set_sweep_sigma(self, sweepSigma):
self.sweepSigma = sweepSigma
self.clear()
def set_sweep_width(self, sweep_width):
self.sweep_width = sweep_width
self.clear()
class KSstimAllDir(object):
"""
generate Kalatsky & Stryker stimulation in all four direction contiuously
Generalizes the KS stimulus routine so that the drifting gratings can go
in all four directions
Parameters
----------
monitor : monitor object
contains display monitor information
indicator : indicator object
contains indicator information
coordinate : str from {'degree','linear'}, optional
specifies coordinates, defaults to 'degree'
background : float, optional
color of background. Takes values in [-1,1] where -1 is black and 1
is white
square_size : int, optional
size of flickering square, defaults to 25.
square_center : tuple, optional
coordinate of center point of the square, defaults to (0,0)
flicker_frame : int, optional
number of frames per flicker while stimulus is being presented,
defaults to `6`
sweep_width : float, optional
width of sweeps. defaults to `20.`
step_width : float, optional
width of steps. defaults to `0.15`.
sweep_frame : int, optional
roughly determines speed of the drifting grating, defaults to `1`
iteration : int, optional
number of times stimulus will be presented, defaults to `1`
pregap_dur : float, optional
number of seconds before stimulus is presented, defaults to `2.`
postgap_dur : float, optional
number of seconds after stimulus is presented, defaults to `3.`
"""
def __init__(self, monitor, indicator, coordinate='degree', background=0.,
square_size=25, square_center=(0, 0), flicker_frame=6, sweep_width=20.,
step_width=0.15, sweep_frame=1, iteration=1, pregap_dur=2.,
postgap_dur=3.):
"""
Initialize stimulus object
"""
self.stim_name = 'KSstimAllDir'
self.monitor = monitor
self.indicator = indicator
self.background = background
self.coordinate = coordinate
self.square_size = square_size
self.square_center = square_center
self.flicker_frame = flicker_frame
self.sweep_width = sweep_width
self.step_width = step_width
self.sweep_frame = sweep_frame
self.iteration = iteration
self.pregap_dur = pregap_dur
self.postgap_dur = postgap_dur
def generate_movie(self):
"""
Generate stimulus movie frame by frame
"""
KS_stim = KSstim(self.monitor,
self.indicator,
background=self.background,
coordinate=self.coordinate,
direction='B2U',
square_size=self.square_size,
square_center=self.square_center,
flicker_frame=self.flicker_frame,
sweep_width=self.sweep_width,
step_width=self.step_width,
sweep_frame=self.sweep_frame,
iteration=self.iteration,
pregap_dur=self.pregap_dur,
postgap_dur=self.postgap_dur)
mov_B2U, dict_B2U = KS_stim.generate_movie()
KS_stim.set_direction('U2B')
mov_U2B, dict_U2B = KS_stim.generate_movie()
KS_stim.set_direction('L2R')
mov_L2R, dict_L2R = KS_stim.generate_movie()
KS_stim.set_direction('R2L')
mov_R2L, dict_R2L = KS_stim.generate_movie()
mov = np.vstack((mov_B2U, mov_U2B, mov_L2R, mov_R2L))
log = {'monitor': dict_B2U['monitor'],
'indicator': dict_B2U['indicator']}
stimulation = dict(dict_B2U['stimulation'])
stimulation['stim_name'] = 'KSstimAllDir'
stimulation['direction'] = ['B2U', 'U2B', 'L2R', 'R2L']
sweep_table = []
frames = []
sweep_table_B2U = dict_B2U['stimulation']['sweep_table']
frames_B2U = dict_B2U['stimulation']['frames']
sweep_length_B2U = len(sweep_table_B2U)
sweep_table_B2U = [['B2U', x[1], x[2]] for x in sweep_table_B2U]
frames_B2U = [[x[0], x[1], x[2], x[3], 'B2U'] for x in frames_B2U]
sweep_table += sweep_table_B2U
frames += frames_B2U
sweep_table_U2B = dict_U2B['stimulation']['sweep_table']
frames_U2B = dict_U2B['stimulation']['frames']
sweep_length_U2B = len(sweep_table_U2B)
sweep_table_U2B = [['U2B', x[1], x[2]] for x in sweep_table_U2B]
frames_U2B = [[x[0], x[1], x[2], x[3], 'U2B'] for x in frames_U2B]
for frame in frames_U2B:
if frame[2] is not None:
frame[2] += sweep_length_B2U
sweep_table += sweep_table_U2B
frames += frames_U2B
sweep_table_L2R = dict_L2R['stimulation']['sweep_table']
frames_L2R = dict_L2R['stimulation']['frames']
sweep_length_L2R = len(sweep_table_L2R)
sweep_table_L2R = [['L2R', x[1], x[2]] for x in sweep_table_L2R]
frames_L2R = [[x[0], x[1], x[2], x[3], 'L2R'] for x in frames_L2R]
for frame in frames_L2R:
if frame[2] is not None:
frame[2] += sweep_length_B2U + sweep_length_U2B
sweep_table += sweep_table_L2R
frames += frames_L2R
sweep_table_R2L = dict_R2L['stimulation']['sweep_table']
frames_R2L = dict_R2L['stimulation']['frames']
sweep_table_R2L = [['R2L', x[1], x[2]] for x in sweep_table_R2L]
frames_R2L = [[x[0], x[1], x[2], x[3], 'R2L'] for x in frames_R2L]
for frame in frames_R2L:
if frame[2] is not None:
frame[2] += sweep_length_B2U + sweep_length_U2B + sweep_length_L2R
sweep_table += sweep_table_R2L
frames += frames_R2L
stimulation['frames'] = [tuple(x) for x in frames]
stimulation['sweep_table'] = [tuple(x) for x in sweep_table]
log['stimulation'] = stimulation
log['stimulation']['frame_config'] = ('is_display', 'squarePolarity',
'sweep_index', 'indicator_color')
log['stimulation']['sweep_config'] = ('orientation',
'sweepStartCoordinate',
'sweepEndCoordinate')
return mov, log
| gpl-3.0 |
gmorph/MAVProxy | setup.py | 1 | 3043 | from setuptools import setup
import os
version = "1.5.6"
def package_files(directory):
paths = []
for (path, directories, filenames) in os.walk(directory):
for filename in filenames:
paths.append(os.path.join('..', path, filename))
return paths
package_data = ['modules/mavproxy_map/data/*.jpg',
'modules/mavproxy_map/data/*.png',
'tools/graphs/*.xml',
]
package_data.extend(package_files('MAVProxy/modules/mavproxy_cesium/app'))
setup(name='MAVProxy',
version=version,
zip_safe=True,
description='MAVProxy MAVLink ground station',
long_description='''A MAVLink protocol proxy and ground station. MAVProxy
is oriented towards command line operation, and is suitable for embedding in
small autonomous vehicles or for using on ground control stations. It also
features a number of graphical tools such as a slipmap for satellite mapping
view of the vehicles location, and status console and several useful vehicle
control modules. MAVProxy is extensible via a modules system - see the modules
subdirectory for some example modules. MAVProxy was developed by CanberraUAV
for use in the 2012 Outback Challenge, and includes a module for the
CanberraUAV search and rescue system. See
http://ardupilot.github.io/MAVProxy/ for more information
on how to use MAVProxy.''',
url='https://github.com/ArduPilot/MAVProxy',
author='Andrew Tridgell',
author_email='[email protected]',
classifiers=[
'Development Status :: 4 - Beta',
'Environment :: Console',
'Intended Audience :: Science/Research',
'License :: OSI Approved :: GNU General Public License v3 (GPLv3)',
'Operating System :: OS Independent',
'Programming Language :: Python :: 2.7',
'Topic :: Scientific/Engineering'],
license='GPLv3',
packages=['MAVProxy',
'MAVProxy.modules',
'MAVProxy.modules.mavproxy_map',
'MAVProxy.modules.mavproxy_misseditor',
'MAVProxy.modules.mavproxy_smartcamera',
'MAVProxy.modules.mavproxy_cesium',
'MAVProxy.modules.lib',
'MAVProxy.modules.lib.ANUGA',
'MAVProxy.modules.lib.optparse_gui'],
# note that we do not include all the real dependencies here (like matplotlib etc)
# as that breaks the pip install. It seems that pip is not smart enough to
# use the system versions of these dependencies, so it tries to download and install
# large numbers of modules like numpy etc which may be already installed
install_requires=['pymavlink>=1.1.73',
'pyserial>=3.0'],
scripts=['MAVProxy/mavproxy.py',
'MAVProxy/tools/mavflightview.py',
'MAVProxy/tools/MAVExplorer.py',
'MAVProxy/modules/mavproxy_map/mp_slipmap.py',
'MAVProxy/modules/mavproxy_map/mp_tile.py'],
package_data={'MAVProxy':
package_data}
)
| gpl-3.0 |
google-code-export/nmrglue | doc/_build/html/examples/el/plotting/2d_spectrum/plot_spectrum_pts.py | 10 | 1340 | #! /usr/bin/env python
# Create contour plots of a 2D NMRPipe spectrum
import nmrglue as ng
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.cm
# plot parameters
cmap = matplotlib.cm.Blues_r # contour map (colors to use for contours)
contour_start = 30000 # contour level start value
contour_num = 20 # number of contour levels
contour_factor = 1.20 # scaling factor between contour levels
# calculate contour levels
cl = [contour_start*contour_factor**x for x in xrange(contour_num)]
# read in the data from a NMRPipe file
dic,data = ng.pipe.read("../../common_data/2d_pipe/test.ft2")
# create the figure
fig = plt.figure()
ax = fig.add_subplot(111)
# plot the contours
ax.contour(data,cl,cmap=cmap,extent=(0,data.shape[1]-1,0,data.shape[0]-1))
# add some labels
ax.text(2006,1322,"T49",size=8,color='r')
ax.text(2010,1290,"T11",size=8,color='k')
# plot slices in each direction
xslice = data[1187,:]
ax.plot(xrange(data.shape[1]),xslice/3.e3+1187)
yslice = data[:,1976]
ax.plot(-yslice/3.e3+1976,xrange(data.shape[0]))
# decorate the axes
ax.set_ylabel("15N (points)")
ax.set_xlabel("13C (points)")
ax.set_title("Protein 2D NCa Spectrum")
ax.set_xlim(1900,2200)
ax.set_ylim(750,1400)
# save the figure
fig.savefig("spectrum_pts.png") #change to .pdf, .ps, etc for different formats
| bsd-3-clause |
dmelcaz/backPropagationNN | demo.py | 1 | 1168 | import numpy as np
from BackPropagationNN import NeuralNetwork
from sklearn import datasets
from sklearn import preprocessing
from sklearn import model_selection
from sklearn import metrics
def targetToVector(x):
# Vector
a = np.zeros([len(x),10])
for i in range(0,len(x)):
a[i,x[i]] = 1
return a
if __name__ == '__main__':
# Digits dataset loading
digits = datasets.load_digits()
X = preprocessing.scale(digits.data.astype(float))
y = targetToVector(digits.target)
# Cross valitation
X_train, X_test, y_train, y_test = model_selection.train_test_split(X, y, test_size=0.2, random_state=0)
# Neural Network initialization
NN = NeuralNetwork(64,60,10, output_act = 'softmax')
NN.fit(X_train,y_train, epochs = 50, learning_rate = .1, learning_rate_decay = .01, verbose = 1)
# NN predictions
y_predicted = NN.predict(X_test)
# Metrics
y_predicted = np.argmax(y_predicted, axis=1).astype(int)
y_test = np.argmax(y_test, axis=1).astype(int)
print("\nClassification report for classifier:\n\n%s\n"
% (metrics.classification_report(y_test, y_predicted)))
print("Confusion matrix:\n\n%s" % metrics.confusion_matrix(y_test, y_predicted))
| mit |
LCAS/zoidbot | vrep_teleop/scripts/Linear_modelling.py | 1 | 11680 | #!/usr/bin/env python
# run the ReplaySavedTrajectory.ttt file on Vrep before running this
import numpy as np
import glob
import errno
import matplotlib.pyplot as plt
import rospy
from sensor_msgs.msg import JointState
from std_msgs.msg import Header
from std_msgs.msg import Float64
from std_msgs.msg import Float64MultiArray
class Trajectory:
trajectory_number = 0
def __init__(self, filename):
self.__class__.trajectory_number = self.__class__.trajectory_number + 1
self.input_file = filename
self.read_count = 0
self.phase = None
self.joint_positions = None
self.box_positions = None
self.box_orientations = None
self.context = None
self.read()
self.basis_centres = None
self.phi = None
self.weights = None
self.linear_basis()
self.weights_func()
def read(self):
with open(self.input_file) as f_in:
content = f_in.readlines()
new_content = [line.strip().split(' ') for line in content]
data_dict = {}
for line_pie in new_content:
feature_name = line_pie.pop(0)
this_v = [float(v) for v in line_pie]
if feature_name not in data_dict:
data_dict[feature_name] = []
data_dict[feature_name].append(this_v)
ts = np.array(data_dict['simTime'])[:, 0]
ts = (ts - ts[0])
self.read_count = len(ts)
self.phase = ts / ts[self.read_count - 1] # scaling the time to [0,1]
self.joint_positions = np.array(data_dict['currentPos'])
self.box_positions = np.array(data_dict['boxPosition'])
self.box_orientations = np.array(data_dict['boxOrientation'])
context_start = np.array([self.box_positions[0, 0], self.box_positions[0, 1], self.box_orientations[0, 1]])
context_end = np.array([self.box_positions[self.read_count - 1, 0], self.box_positions[self.read_count - 1, 1],
self.box_orientations[self.read_count - 1, 1]])
# context contains initial and final x,y,angle
self.context = np.concatenate((context_start, context_end), axis=0)
# [np.savez('/home/akhil/Downloads/data/trajectory' + str(self.__class__.trajectory_number) + '.npz',
# phase=self.phase, joint_positions=self.joint_positions, box_positions=self.box_positions,
# box_orientations=self.box_orientations, context=self.context)
def linear_basis(self):
num_basis_inside = 11
num_basis_outside = 4
num_basis_tot = num_basis_inside + num_basis_outside
basis_centre_gap = 1.0 / (num_basis_inside - 1)
basis_width = 2 * basis_centre_gap ** 2
self.basis_centres = np.linspace(-2*basis_centre_gap, 1+2*basis_centre_gap, num_basis_tot)
self.phi = np.zeros((self.phase.shape[0], num_basis_tot))
for z in range(0, self.phase.shape[0]):
b = np.exp(-(1/basis_width)*(self.phase[z] - self.basis_centres)**2)
sum_bz = np.sum(b)
self.phi[z, :] = b/sum_bz
def weights_func(self):
self.weights = np.zeros((self.basis_centres.shape[0], 14))
regularization_factor = 1e-10
i_mat = np.identity(self.basis_centres.shape[0])
self.weights = np.dot(np.linalg.inv(np.dot(self.phi.T, self.phi) + regularization_factor*i_mat),
np.dot(self.phi.T, self.joint_positions))
# function to calculate a trajectory given the weight vectors
def calc_trajectory(weights, steps):
num_basis_tot = weights.shape[0]
num_basis_outside = 4
num_basis_inside = num_basis_tot - num_basis_outside
basis_centre_gap = 1.0 / (num_basis_inside - 1)
basis_width = 2 * basis_centre_gap ** 2
phase = np.linspace(0, 1, steps)
basis_centres = np.linspace(-2 * basis_centre_gap, 1 + 2 * basis_centre_gap, num_basis_tot)
phi = np.zeros((steps, num_basis_tot))
for z in range(0, steps):
b = np.exp(-(1 / basis_width) * (phase[z] - basis_centres) ** 2)
sum_bz = np.sum(b)
phi[z, :] = b / sum_bz
traj = np.dot(phi, weights)
return traj
# function to play a trajectory in vrep given the trajectory joint positions and initial box data
def baxter_play(traj, box_data):
pub1 = rospy.Publisher('/replay/joint_states', JointState, queue_size=10)
rate_value = 200
rate1 = rospy.Rate(rate_value)
pub2 = rospy.Publisher('/stopSim', Float64, queue_size=1)
rate2 = rospy.Rate(160)
prev_pos = traj[0]
for pos in traj:
replay = JointState()
replay.header = Header()
replay.header.stamp = rospy.Time.now()
replay.name = ['ljoint1', 'ljoint2', 'ljoint3', 'ljoint4', 'ljoint5', 'ljoint6', 'ljoint7', 'rjoint1',
'rjoint2', 'rjoint3', 'rjoint4', 'rjoint5', 'rjoint6', 'rjoint7']
replay.position = pos
replay.velocity = (pos - prev_pos) * rate_value
replay.effort = box_data
pub1.publish(replay)
rate1.sleep()
prev_pos = pos
# for vrep to know when a trajectory ends and to keep the joints from moving randomly
for i in range(0, 5):
replay = JointState()
replay.header = Header()
replay.header.stamp = rospy.Time.now()
replay.name = []
replay.position = pos
replay.velocity = (pos-pos) # to send 0s so that the joints don't go haywire
replay.effort = []
pub1.publish(replay)
pub2.publish(data=1)
rate2.sleep()
class ContextResult:
def __init__(self, trajectory_list, context_indices):
self.trajectory_list = trajectory_list
self.trajectories_tot = len(trajectory_list)
# self.plot([4,5,6])
self.context_indices = context_indices
self.context_feature_matrix = None
self.k = self.find_transformation_parameters()
self.final_box_data = None
rospy.Subscriber('boxData', Float64MultiArray, self.replay_box_data)
def find_transformation_parameters(self):
num_contexts = len(self.context_indices)
num_basis = self.trajectory_list[0].basis_centres.shape[0]
self.context_feature_matrix = np.ones((self.trajectories_tot, num_contexts + 1))
concatenated_weights = np.zeros((self.trajectories_tot, num_basis * 14))
for i in range(0, self.trajectories_tot):
# saving the context feature vectors of a trajectory as rows
for p in range(0, num_contexts):
self.context_feature_matrix[i, 1 + p] = self.trajectory_list[i].context[self.context_indices[p]]
# concatenating weights for each joint of a trajectory in a row
for j in range(0, 14):
concatenated_weights[i, num_basis * j:num_basis * (j + 1)] = self.trajectory_list[i].weights[:, j]
regularization_factor = 1e-10
i_mat = np.identity(self.context_feature_matrix.shape[1])
k = np.dot(np.linalg.inv(np.dot(self.context_feature_matrix.T, self.context_feature_matrix)
+ regularization_factor * i_mat),
np.dot(self.context_feature_matrix.T, concatenated_weights))
return k
def plot(self, joints):
for j in joints:
plt.figure(j * 2)
plt.title('left_arm joint' + str(j + 1))
for i in range(0, self.trajectories_tot):
y = self.trajectory_list[i].joint_positions[:, j]
x = self.trajectory_list[i].phase
plt.plot(x, y, label=str(i))
plt.legend()
plt.figure(j * 2 + 1)
plt.title('right_arm joint' + str(j + 1))
for i in range(0, self.trajectories_tot):
y = self.trajectory_list[i].joint_positions[:, j + 7]
x = self.trajectory_list[i].phase
plt.plot(x, y, label=str(i))
plt.legend()
plt.show()
def replay_box_data(self, msg):
# print(msg.data)
self.final_box_data = msg.data
def find_linear_errors(self, num_divisions):
num_contexts = len(self.context_indices)
contexts_min = np.amin(self.context_feature_matrix, axis=0)
contexts_max = np.amax(self.context_feature_matrix, axis=0)
task_linear_features = np.ones((num_divisions, self.context_feature_matrix.shape[1]))
errors = np.zeros((num_divisions, 3))
for p in range(0, num_contexts):
# making task context features from min to max
task_linear_features[:, p + 1] = np.linspace(contexts_min[p + 1], contexts_max[p + 1], num_divisions)
box_data_default = np.concatenate((self.trajectory_list[0].box_positions[0],
self.trajectory_list[0].box_orientations[0]), axis=0)
for i in range(0, num_divisions):
temp_result = np.dot(self.k.T, task_linear_features[i])
resulting_weights = np.reshape(temp_result, (14, 15)).T
traj = calc_trajectory(resulting_weights, 1500)
box_data = box_data_default
# changing the initial box position if the context says so
for p in range(0, num_contexts):
if self.context_indices[p] < 2:
box_data[p] = task_linear_features[i, p+1]
elif self.context_indices[p] == 2:
box_data[4] = task_linear_features[i, p+1]
baxter_play(traj, box_data)
# finding the errors in the final position
for p in range(0, num_contexts):
if self.context_indices[p] == 3 or self.context_indices[p] == 4:
errors[i, self.context_indices[p]-3] = self.final_box_data[self.context_indices[p]-3] \
- task_linear_features[i, p+1]
elif self.context_indices[p] == 5:
errors[i, self.context_indices[p]-3] = self.final_box_data[4] - task_linear_features[i, p+1]
plt.figure(100)
if self.context_indices[0] == 5:
plt.plot((task_linear_features[:, 1] - box_data[4])*180/3.14, errors[:, 2]*180/3.14, 'o')
plt.xlabel('rotation angle(in deg)')
plt.ylabel('error in angle(in deg)')
if self.context_indices[0] == 4:
plt.plot((task_linear_features[:, 1] - box_data[1])*100, errors[:, 1]*100, 'o')
plt.xlabel('change in box position(in cms)')
plt.ylabel('error in box position(in cms)')
if self.context_indices[0] == 3:
plt.plot((task_linear_features[:, 0] - box_data[0])*100, errors[:, 1]*100, 'o')
plt.xlabel('change in box position(in cms)')
plt.ylabel('error in box position(in cms)')
plt.show()
if __name__ == '__main__':
rospy.init_node('trajectory_replay', anonymous=True)
# Enter the file path of the recorded data
input_files = glob.glob('/home/akhil/SampleData/turnDemoSlave_*.txt')
trajectory_list = []
for name in input_files:
try:
trajectory_list.append(Trajectory(name))
except IOError as exc: # Not sure what error this is
if exc.errno != errno.EISDIR:
raise
relevant_context_indices = [4]
# the contexts that can be used are initial x,y,angle final x,y,angle in order 0-5
calc = ContextResult(trajectory_list, relevant_context_indices)
num_linear_goal_positions = 5
calc.find_linear_errors(num_linear_goal_positions)
# joints_to_plot = [0, 1, 2, 3, 4, 5, 6]
# calc.plot(joints_to_plot)
print('end')
| mit |
jpautom/scikit-learn | examples/svm/plot_separating_hyperplane_unbalanced.py | 329 | 1850 | """
=================================================
SVM: Separating hyperplane for unbalanced classes
=================================================
Find the optimal separating hyperplane using an SVC for classes that
are unbalanced.
We first find the separating plane with a plain SVC and then plot
(dashed) the separating hyperplane with automatically correction for
unbalanced classes.
.. currentmodule:: sklearn.linear_model
.. note::
This example will also work by replacing ``SVC(kernel="linear")``
with ``SGDClassifier(loss="hinge")``. Setting the ``loss`` parameter
of the :class:`SGDClassifier` equal to ``hinge`` will yield behaviour
such as that of a SVC with a linear kernel.
For example try instead of the ``SVC``::
clf = SGDClassifier(n_iter=100, alpha=0.01)
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from sklearn import svm
#from sklearn.linear_model import SGDClassifier
# we create 40 separable points
rng = np.random.RandomState(0)
n_samples_1 = 1000
n_samples_2 = 100
X = np.r_[1.5 * rng.randn(n_samples_1, 2),
0.5 * rng.randn(n_samples_2, 2) + [2, 2]]
y = [0] * (n_samples_1) + [1] * (n_samples_2)
# fit the model and get the separating hyperplane
clf = svm.SVC(kernel='linear', C=1.0)
clf.fit(X, y)
w = clf.coef_[0]
a = -w[0] / w[1]
xx = np.linspace(-5, 5)
yy = a * xx - clf.intercept_[0] / w[1]
# get the separating hyperplane using weighted classes
wclf = svm.SVC(kernel='linear', class_weight={1: 10})
wclf.fit(X, y)
ww = wclf.coef_[0]
wa = -ww[0] / ww[1]
wyy = wa * xx - wclf.intercept_[0] / ww[1]
# plot separating hyperplanes and samples
h0 = plt.plot(xx, yy, 'k-', label='no weights')
h1 = plt.plot(xx, wyy, 'k--', label='with weights')
plt.scatter(X[:, 0], X[:, 1], c=y, cmap=plt.cm.Paired)
plt.legend()
plt.axis('tight')
plt.show()
| bsd-3-clause |
marakeby/udl | udl/model.py | 1 | 1310 | __author__ = 'haitham'
import sklearn
class UDLModel(sklearn.base.BaseEstimator):
# self.estimator =None
# self.configs= {}
def __init__(self):
# a dictionary of all configurable parameters. This dictionary will be used in get_params. see the note in get_params function
self.configs = {}
# there is no restriction on the internal implementation of you estimator. You estimator has to be able to transform input into output, see predict function
self.estimator = None
def fit(self, x_train, **kwargs):
# fit() is model dependent
raise NotImplementedError
def predict(self, x_test):
assert self.estimator
pred = self.estimator(x_test)
return pred
def transform(self, x_test, y=None, **fit_params):
pred = self.estimator(x_test)
return pred
def fit_transform(self, x, y=None, **fit_params):
self.fit(x)
return self.transform(x)
def get_params(self, deep=True):
# Note: Pylearn2 has a function with the same name, please replace with another name e.g. get_params_list
return self.configs
def set_params(self, **parameters):
for parameter, value in parameters.items():
self.setattr(parameter, value)
# return self
| bsd-2-clause |
gamer13/keras | tests/manual/check_callbacks.py | 82 | 7540 | import numpy as np
import random
import theano
from keras.models import Sequential
from keras.callbacks import Callback
from keras.layers.core import Dense, Dropout, Activation, Flatten
from keras.regularizers import l2
from keras.layers.convolutional import Convolution2D, MaxPooling2D
from keras.utils import np_utils
from keras.datasets import mnist
import keras.callbacks as cbks
from matplotlib import pyplot as plt
from matplotlib import animation
##############################
# model DrawActivations test #
##############################
print('Running DrawActivations test')
nb_classes = 10
batch_size = 128
nb_epoch = 10
max_train_samples = 512
max_test_samples = 1
np.random.seed(1337)
# the data, shuffled and split between tran and test sets
(X_train, y_train), (X_test, y_test) = mnist.load_data()
X_train = X_train.reshape(-1,1,28,28)[:max_train_samples]
X_train = X_train.astype("float32")
X_train /= 255
X_test = X_test.reshape(-1,1,28,28)[:max_test_samples]
X_test = X_test.astype("float32")
X_test /= 255
# convert class vectors to binary class matrices
Y_train = np_utils.to_categorical(y_train, nb_classes)[:max_train_samples]
class Frames(object):
def __init__(self, n_plots=16):
self._n_frames = 0
self._framedata = []
self._titles = []
for i in range(n_plots):
self._framedata.append([])
def add_frame(self, i, frame):
self._framedata[i].append(frame)
def set_title(self, title):
self._titles.append(title)
class SubplotTimedAnimation(animation.TimedAnimation):
def __init__(self, fig, frames, grid=(4, 4), interval=10, blit=False, **kwargs):
self.n_plots = grid[0] * grid[1]
self.axes = [fig.add_subplot(grid[0], grid[1], i + 1) for i in range(self.n_plots)]
for axis in self.axes:
axis.get_xaxis().set_ticks([])
axis.get_yaxis().set_ticks([])
self.frames = frames
self.imgs = [self.axes[i].imshow(frames._framedata[i][0], interpolation='nearest', cmap='bone') for i in range(self.n_plots)]
self.title = fig.suptitle('')
super(SubplotTimedAnimation, self).__init__(fig, interval=interval, blit=blit, **kwargs)
def _draw_frame(self, j):
for i in range(self.n_plots):
self.imgs[i].set_data(self.frames._framedata[i][j])
if len(self.frames._titles) > j:
self.title.set_text(self.frames._titles[j])
self._drawn_artists = self.imgs
def new_frame_seq(self):
return iter(range(len(self.frames._framedata[0])))
def _init_draw(self):
for img in self.imgs:
img.set_data([[]])
def combine_imgs(imgs, grid=(1,1)):
n_imgs, img_h, img_w = imgs.shape
if n_imgs != grid[0] * grid[1]:
raise ValueError()
combined = np.zeros((grid[0] * img_h, grid[1] * img_w))
for i in range(grid[0]):
for j in range(grid[1]):
combined[img_h*i:img_h*(i+1),img_w*j:img_w*(j+1)] = imgs[grid[0] * i + j]
return combined
class DrawActivations(Callback):
def __init__(self, figsize):
self.fig = plt.figure(figsize=figsize)
def on_train_begin(self, logs={}):
self.imgs = Frames(n_plots=5)
layers_0_ids = np.random.choice(32, 16, replace=False)
self.test_layer0 = theano.function([self.model.get_input()], self.model.layers[1].get_output(train=False)[0, layers_0_ids])
layers_1_ids = np.random.choice(64, 36, replace=False)
self.test_layer1 = theano.function([self.model.get_input()], self.model.layers[5].get_output(train=False)[0, layers_1_ids])
self.test_layer2 = theano.function([self.model.get_input()], self.model.layers[10].get_output(train=False)[0])
def on_epoch_begin(self, epoch, logs={}):
self.epoch = epoch
def on_batch_end(self, batch, logs={}):
if batch % 5 == 0:
self.imgs.add_frame(0, X_test[0,0])
self.imgs.add_frame(1, combine_imgs(self.test_layer0(X_test), grid=(4, 4)))
self.imgs.add_frame(2, combine_imgs(self.test_layer1(X_test), grid=(6, 6)))
self.imgs.add_frame(3, self.test_layer2(X_test).reshape((16,16)))
self.imgs.add_frame(4, self.model._predict(X_test)[0].reshape((1,10)))
self.imgs.set_title('Epoch #%d - Batch #%d' % (self.epoch, batch))
def on_train_end(self, logs={}):
anim = SubplotTimedAnimation(self.fig, self.imgs, grid=(1,5), interval=10, blit=False, repeat_delay=1000)
# anim.save('test_gif.gif', fps=15, writer='imagemagick')
plt.show()
# model = Sequential()
# model.add(Dense(784, 50))
# model.add(Activation('relu'))
# model.add(Dense(50, 10))
# model.add(Activation('softmax'))
model = Sequential()
model.add(Convolution2D(32, 1, 3, 3, border_mode='full'))
model.add(Activation('relu'))
model.add(MaxPooling2D(poolsize=(2, 2)))
model.add(Dropout(0.25))
model.add(Convolution2D(64, 32, 3, 3, border_mode='full'))
model.add(Activation('relu'))
model.add(MaxPooling2D(poolsize=(2, 2)))
model.add(Dropout(0.25))
model.add(Flatten())
model.add(Dense(64*8*8, 256))
model.add(Activation('relu'))
model.add(Dropout(0.5))
model.add(Dense(256, 10, W_regularizer = l2(0.1)))
model.add(Activation('softmax'))
model.compile(loss='categorical_crossentropy', optimizer='rmsprop')
# Fit the model
draw_weights = DrawActivations(figsize=(5.4, 1.35))
model.fit(X_train, Y_train, batch_size=batch_size, nb_epoch=nb_epoch, verbose=1, callbacks=[draw_weights])
##########################
# model checkpoint tests #
##########################
print('Running ModelCheckpoint test')
nb_classes = 10
batch_size = 128
nb_epoch = 20
# small sample size to overfit on training data
max_train_samples = 50
max_test_samples = 1000
np.random.seed(1337) # for reproducibility
# the data, shuffled and split between tran and test sets
(X_train, y_train), (X_test, y_test) = mnist.load_data()
X_train = X_train.reshape(60000,784)[:max_train_samples]
X_test = X_test.reshape(10000,784)[:max_test_samples]
X_train = X_train.astype("float32")
X_test = X_test.astype("float32")
X_train /= 255
X_test /= 255
# convert class vectors to binary class matrices
Y_train = np_utils.to_categorical(y_train, nb_classes)[:max_train_samples]
Y_test = np_utils.to_categorical(y_test, nb_classes)[:max_test_samples]
# Create a slightly larger network than required to test best validation save only
model = Sequential()
model.add(Dense(784, 500))
model.add(Activation('relu'))
model.add(Dense(500, 10))
model.add(Activation('softmax'))
model.compile(loss='categorical_crossentropy', optimizer='rmsprop')
# test file location
path = "/tmp"
filename = "model_weights.hdf5"
import os
f = os.path.join(path, filename)
print("Test model checkpointer")
# only store best validation model in checkpointer
checkpointer = cbks.ModelCheckpoint(filepath=f, verbose=1, save_best_only=True)
model.fit(X_train, Y_train, batch_size=batch_size, nb_epoch=nb_epoch, show_accuracy=True, verbose=0, validation_data=(X_test, Y_test), callbacks =[checkpointer])
if not os.path.isfile(f):
raise Exception("Model weights were not saved to %s" % (f))
print("Test model checkpointer without validation data")
import warnings
warnings.filterwarnings('error')
try:
# this should issue a warning
model.fit(X_train, Y_train, batch_size=batch_size, nb_epoch=nb_epoch, show_accuracy=True, verbose=0, callbacks =[checkpointer])
except:
print("Tests passed")
import sys
sys.exit(0)
raise Exception("Modelcheckpoint tests did not pass")
| mit |
anaandresarroyo/Python-GarminDataAnalyser | database/gui.py | 1 | 6886 | import numpy as np
import pandas as pd
from PyQt5 import QtWidgets, QtCore
def fill_table(df, table, max_rows=50):
# TODO: threading
# read indices of currently selected rows
selected_indexes = table.selectedIndexes()
selected_rows = []
for item in selected_indexes:
selected_rows.append(item.row())
# initialise the GUI table columns
table.clear()
# disable sorting to solve issues with repopulating
table.setSortingEnabled(False)
number_of_rows = min(max_rows, len(df.index))
table.setRowCount(number_of_rows)
table.setColumnCount(len(df.columns))
table.setHorizontalHeaderLabels(df.columns)
# fill the GUI table
for col in range(len(df.columns)):
for row in range(number_of_rows):
data = df.iloc[row, col]
item = QtWidgets.QTableWidgetItem()
if isinstance(data, (float, np.float64)):
# pad the floats so they'll be sorted correctly
formatted_data = '{:.3f}'.format(data).rjust(15)
item.setTextAlignment(QtCore.Qt.AlignRight | QtCore.Qt.AlignVCenter)
elif isinstance(data, (int, np.int64)):
# pad the integers so they'll be sorted correctly
formatted_data = '{:d}'.format(data).rjust(15)
item.setTextAlignment(QtCore.Qt.AlignRight | QtCore.Qt.AlignVCenter)
else:
formatted_data = str(data)
item.setData(QtCore.Qt.EditRole, formatted_data)
table.setItem(row, col, item)
table.resizeColumnToContents(col)
# enable table sorting by columns
table.setSortingEnabled(True)
# temporarily set MultiSelection
table.setSelectionMode(QtWidgets.QAbstractItemView.MultiSelection)
# reselect the prevously selected rows
# TODO: reselect by filename instead of table row number
for row in selected_rows:
table.selectRow(row)
# revert MultiSelection to ExtendedSelection
table.setSelectionMode(QtWidgets.QAbstractItemView.ExtendedSelection)
def populate_combobox(items_list, item_default, combobox_list):
# populate comboboxes
for combobox in combobox_list:
combobox.clear()
for item in items_list:
combobox.addItem(item, 0)
if item_default in items_list:
index = combobox.findText(item_default)
else:
index = 0
combobox.setCurrentIndex(index)
def populate_comboboxes(config, df, numeric_comboboxes, units_comboboxes, trace_comboboxes, comboboxes):
default_value_keys = [x[0] for x in config.items('DEFAULT VALUES')]
# TODO: also include datetime.time values
options = df.select_dtypes(include=['float64', 'int64', 'datetime64[ns]']).columns.values
for key, value in numeric_comboboxes.items():
if key in default_value_keys:
default_value = config['DEFAULT VALUES'][key]
else:
default_value = options[0]
populate_combobox(sorted(np.unique(options)),
default_value,
value)
for key, value in units_comboboxes.items():
options = [x[0] for x in config.items('%s UNIT FACTORS' % key.upper())]
populate_combobox(options,
options[0], # TODO: choose the one with value 1, in case it's not the first
[value])
for key, value in trace_comboboxes.items():
if key in default_value_keys:
default_value = config['DEFAULT VALUES'][key]
else:
default_value = None
populate_combobox([default_value],
default_value,
value)
for key, value in comboboxes.items():
options = list(filter(None, [x.strip() for x in config['GUI OPTIONS'][key].splitlines()]))
if key in default_value_keys:
default_value = config['DEFAULT VALUES'][key]
else:
default_value = options[0]
populate_combobox(options,
default_value,
value)
def populate_dates(column_date_local, df, start_date_edit, end_date_edit):
if column_date_local in df.columns:
start_date_edit.setDate(df[column_date_local].min())
end_date_edit.setDate(df[column_date_local].max())
def read_units(units_comboboxes):
"""Read units from GUI comboboxes."""
units = dict()
for key in units_comboboxes.keys():
units[key] = units_comboboxes[key].currentText()
return units
def list_selection(widget):
selected_options = []
for item in widget.selectedItems():
selected_options.append(item.text())
return selected_options
def populate_list(df, column, widget):
"""Populate the list widget with items from the dataframe column."""
items = np.sort(df[column].unique())
widget.clear()
for row, item in enumerate(items):
widget.addItem(item)
widget.item(row).setSelected(True)
def read_table(table, rows=None):
# read GUI table size
if rows is None:
rows = []
if not len(rows):
rows = range(table.rowCount())
columns = range(table.columnCount())
# read column names from the GUI table
column_names = []
for column in columns:
column_names.append(table.horizontalHeaderItem(column).text())
# initialise dataframe with certain columns
df = pd.DataFrame(columns=column_names)
# read data from GUI table
for row in rows:
for column_number, column_name in enumerate(column_names):
df.loc[row, column_name] = table.item(row, column_number).data(0)
# TODO: make this formatting more automatic
# format data types
datetime_column_names = ['start time', 'start time local', 'end time', 'end time local', 'timestamp']
for column_name in column_names:
# format dates
if column_name in datetime_column_names:
df[column_name] = pd.to_datetime(df[column_name])
elif 'position' in column_name:
df[column_name] = pd.to_numeric(df[column_name], errors='coerce')
# change strings to numbers
# elif column_name != 'file_name':
# df[column_name] = pd.to_numeric(df[column_name], errors='ignore')
return df
def read_selected_table_rows(table):
selected_rows = []
for item in table.selectedIndexes():
selected_rows.append(item.row())
selected_rows = np.unique(selected_rows)
# read the selected_rows from the table
df = read_table(table, selected_rows)
# return the dataframe
return df
def get_labels_text(labels):
text = dict()
for key, value in labels.items():
text[key] = value.text()
return text
def set_labels_text(labels, text):
for key in labels.keys():
labels[key].setText(text[key]) | mit |
ahoyosid/scikit-learn | sklearn/metrics/tests/test_regression.py | 272 | 6066 | from __future__ import division, print_function
import numpy as np
from itertools import product
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.metrics import explained_variance_score
from sklearn.metrics import mean_absolute_error
from sklearn.metrics import mean_squared_error
from sklearn.metrics import median_absolute_error
from sklearn.metrics import r2_score
from sklearn.metrics.regression import _check_reg_targets
def test_regression_metrics(n_samples=50):
y_true = np.arange(n_samples)
y_pred = y_true + 1
assert_almost_equal(mean_squared_error(y_true, y_pred), 1.)
assert_almost_equal(mean_absolute_error(y_true, y_pred), 1.)
assert_almost_equal(median_absolute_error(y_true, y_pred), 1.)
assert_almost_equal(r2_score(y_true, y_pred), 0.995, 2)
assert_almost_equal(explained_variance_score(y_true, y_pred), 1.)
def test_multioutput_regression():
y_true = np.array([[1, 0, 0, 1], [0, 1, 1, 1], [1, 1, 0, 1]])
y_pred = np.array([[0, 0, 0, 1], [1, 0, 1, 1], [0, 0, 0, 1]])
error = mean_squared_error(y_true, y_pred)
assert_almost_equal(error, (1. / 3 + 2. / 3 + 2. / 3) / 4.)
# mean_absolute_error and mean_squared_error are equal because
# it is a binary problem.
error = mean_absolute_error(y_true, y_pred)
assert_almost_equal(error, (1. / 3 + 2. / 3 + 2. / 3) / 4.)
error = r2_score(y_true, y_pred, multioutput='variance_weighted')
assert_almost_equal(error, 1. - 5. / 2)
error = r2_score(y_true, y_pred, multioutput='uniform_average')
assert_almost_equal(error, -.875)
def test_regression_metrics_at_limits():
assert_almost_equal(mean_squared_error([0.], [0.]), 0.00, 2)
assert_almost_equal(mean_absolute_error([0.], [0.]), 0.00, 2)
assert_almost_equal(median_absolute_error([0.], [0.]), 0.00, 2)
assert_almost_equal(explained_variance_score([0.], [0.]), 1.00, 2)
assert_almost_equal(r2_score([0., 1], [0., 1]), 1.00, 2)
def test__check_reg_targets():
# All of length 3
EXAMPLES = [
("continuous", [1, 2, 3], 1),
("continuous", [[1], [2], [3]], 1),
("continuous-multioutput", [[1, 1], [2, 2], [3, 1]], 2),
("continuous-multioutput", [[5, 1], [4, 2], [3, 1]], 2),
("continuous-multioutput", [[1, 3, 4], [2, 2, 2], [3, 1, 1]], 3),
]
for (type1, y1, n_out1), (type2, y2, n_out2) in product(EXAMPLES,
repeat=2):
if type1 == type2 and n_out1 == n_out2:
y_type, y_check1, y_check2, multioutput = _check_reg_targets(
y1, y2, None)
assert_equal(type1, y_type)
if type1 == 'continuous':
assert_array_equal(y_check1, np.reshape(y1, (-1, 1)))
assert_array_equal(y_check2, np.reshape(y2, (-1, 1)))
else:
assert_array_equal(y_check1, y1)
assert_array_equal(y_check2, y2)
else:
assert_raises(ValueError, _check_reg_targets, y1, y2, None)
def test_regression_multioutput_array():
y_true = [[1, 2], [2.5, -1], [4.5, 3], [5, 7]]
y_pred = [[1, 1], [2, -1], [5, 4], [5, 6.5]]
mse = mean_squared_error(y_true, y_pred, multioutput='raw_values')
mae = mean_absolute_error(y_true, y_pred, multioutput='raw_values')
r = r2_score(y_true, y_pred, multioutput='raw_values')
evs = explained_variance_score(y_true, y_pred, multioutput='raw_values')
assert_array_almost_equal(mse, [0.125, 0.5625], decimal=2)
assert_array_almost_equal(mae, [0.25, 0.625], decimal=2)
assert_array_almost_equal(r, [0.95, 0.93], decimal=2)
assert_array_almost_equal(evs, [0.95, 0.93], decimal=2)
# mean_absolute_error and mean_squared_error are equal because
# it is a binary problem.
y_true = [[0, 0]]*4
y_pred = [[1, 1]]*4
mse = mean_squared_error(y_true, y_pred, multioutput='raw_values')
mae = mean_absolute_error(y_true, y_pred, multioutput='raw_values')
r = r2_score(y_true, y_pred, multioutput='raw_values')
assert_array_almost_equal(mse, [1., 1.], decimal=2)
assert_array_almost_equal(mae, [1., 1.], decimal=2)
assert_array_almost_equal(r, [0., 0.], decimal=2)
r = r2_score([[0, -1], [0, 1]], [[2, 2], [1, 1]], multioutput='raw_values')
assert_array_almost_equal(r, [0, -3.5], decimal=2)
assert_equal(np.mean(r), r2_score([[0, -1], [0, 1]], [[2, 2], [1, 1]],
multioutput='uniform_average'))
evs = explained_variance_score([[0, -1], [0, 1]], [[2, 2], [1, 1]],
multioutput='raw_values')
assert_array_almost_equal(evs, [0, -1.25], decimal=2)
# Checking for the condition in which both numerator and denominator is
# zero.
y_true = [[1, 3], [-1, 2]]
y_pred = [[1, 4], [-1, 1]]
r2 = r2_score(y_true, y_pred, multioutput='raw_values')
assert_array_almost_equal(r2, [1., -3.], decimal=2)
assert_equal(np.mean(r2), r2_score(y_true, y_pred,
multioutput='uniform_average'))
evs = explained_variance_score(y_true, y_pred, multioutput='raw_values')
assert_array_almost_equal(evs, [1., -3.], decimal=2)
assert_equal(np.mean(evs), explained_variance_score(y_true, y_pred))
def test_regression_custom_weights():
y_true = [[1, 2], [2.5, -1], [4.5, 3], [5, 7]]
y_pred = [[1, 1], [2, -1], [5, 4], [5, 6.5]]
msew = mean_squared_error(y_true, y_pred, multioutput=[0.4, 0.6])
maew = mean_absolute_error(y_true, y_pred, multioutput=[0.4, 0.6])
rw = r2_score(y_true, y_pred, multioutput=[0.4, 0.6])
evsw = explained_variance_score(y_true, y_pred, multioutput=[0.4, 0.6])
assert_almost_equal(msew, 0.39, decimal=2)
assert_almost_equal(maew, 0.475, decimal=3)
assert_almost_equal(rw, 0.94, decimal=2)
assert_almost_equal(evsw, 0.94, decimal=2)
| bsd-3-clause |
mjvakili/ccppabc | ccppabc/code/archive/knn_richness_hod.py | 1 | 12403 | import numpy as np
import matplotlib.pyplot as plt
from interruptible_pool import InterruptiblePool
import time
plt.switch_backend("Agg")
from halotools.empirical_models import Zheng07
from astropy.table import Table
import corner
from scipy.stats import multivariate_normal
from scipy.spatial import cKDTree
model = Zheng07(threshold = -21.)
print 'Data HOD Parameters ', model.param_dict
N_threads = 10
N_particles = 100
N_iter = 40
eps0 = np.array([1.e34 , 1.e34])#, 1.e34, 1.e34, 1.e34, 1.e34])
def richness(group_id):
gals = Table()
gals['groupid'] = group_id
gals['dummy'] = 1
grouped_table = gals.group_by('groupid')
grp_richness = grouped_table['dummy'].groups.aggregate(np.sum)
return grp_richness
#generate data
model.populate_mock()
group_id = model.mock.compute_fof_group_ids()
data_richness = richness(group_id)
np.savetxt("richness.dat" , data_richness)
nz = np.loadtxt("nz.dat")
covar_nz = np.cov(nz)
avg_nz = np.mean(nz)
data = [avg_nz , data_richness]
print data[0]
print data[1]
#n_mocks = 1000
#n_bins = 12
#print hist_bins
#histograms = np.zeros((n_mocks , n_bins))
#avg_nz = []
#for i in xrange(n_mocks):
# model.populate_mock()
# number density
# avg_nz.append(model.mock.number_density)
# # richness histogram
# group_id = model.mock.compute_fof_group_ids()
# group_richness = richness(group_id)
#
# #print 'Group Richness computation takes ', time.time() - hod_time, ' seconds'
# hist, bin_edge = np.histogram(group_richness, bins=hist_bins)
#
# #bin_mid = 0.5 * (bin_edge[1:] + bin_edge[:-1])
#
# histograms[i,:] = hist
# np.savetxt("group_rich.dat", histograms)
#np.savetxt("nz.dat", avg_nz)
"""data and covariance """
"""
nz = np.loadtxt("nz.dat")
histograms = np.loadtxt("group_rich.dat")
covar_nz = np.cov(nz)
covar_gr = np.cov(histograms)
#print covar_nz
#print np.diag(covar_gr)
snr_gr = 1./np.diag(covar_gr)
avg_nz = np.mean(nz)
avg_gr = np.mean(histograms.T , axis = 0)
data = [avg_nz, avg_gr]
#alpha , logMmin , sigma_logM , logM0 , logM1
"""
data_hod = np.array([11.92 , 0.39 , 12.79 , 1.15 , 13.94])
"""simulator"""
class HODsim(object):
def __init__(self):
self.model = Zheng07(threshold = -21.)
def sum_stat(self, theta_star):
#print theta_star
self.model.param_dict['alpha'] = theta_star[3]
self.model.param_dict['logMmin'] = theta_star[2]
self.model.param_dict['sigma_logM'] = theta_star[1]
self.model.param_dict['logM0'] = theta_star[0]
self.model.param_dict['logM1'] = theta_star[4]
#print self.model.param_dict
#a = time.time()
try:
self.model.populate_mock()
#print "pop time", time.time() - a
#a = time.time()
nz = self.model.mock.number_density
#print "nz time" , time.time() - a
#hist = np.zeros((12))
#a = time.time()
group_id =self. model.mock.compute_fof_group_ids()
#print "fof time" , time.time() - a
#a = time.time()
group_richness = richness(group_id)
#print "rich time" , time.time() - a
#a = time.time()
#hist_temp, bin_edge = np.histogram(group_richness, bins=hist_bins)
#print hist , hist_temp
#hist += hist_temp
#self.model.populate_mock()
return [nz , group_richness]
except ValueError:
return [0 , np.zeros(10000)]
ourmodel = HODsim()
simz = ourmodel.sum_stat
from scipy.stats import ks_2samp
"""distance"""
def distance(d_data, d_model, type = 'group distance'):
if type == 'added distance':
dist_nz = np.abs(d_data[0] - d_model[0])/d_data[0]
dist_xi = np.sum(np.abs(d_data[1] - d_model[1])/d_data[1])
dist = dist_nz + dist_xi
elif type == 'separate distance':
dist_nz = (d_data[0] - d_model[0])**2. / covar_nz
dist_xi = np.sum((d_data[1] - d_model[1])**2. * snr_gr)
dist = np.array([dist_nz , dist_xi])
elif type == 'group distance':
dist_nz = (d_data[0] - d_model[0])**2. / covar_nz
dist_ri = ks_2samp(d_data[1] , d_model[1])[0]
dist = np.array([dist_nz , dist_ri])
return np.atleast_1d(dist)
"""covariance matrix in abc sampler"""
def covariance(theta , w , type = 'weighted'):
if type == 'neutral':
return np.cov(theta)
if type == 'weighted':
ww = w.sum() / (w.sum()**2 - (w**2).sum())
mean = np.sum(theta*w[None,:] , axis = 1)/ np.sum(w)
tmm = theta - mean.reshape(theta.shape[0] , 1)
sigma2 = ww * (tmm*w[None,:]).dot(tmm.T)
return sigma2
def knn_cov(x , theta, k):
tree = cKDTree(theta.T)
index = tree.query(x, k, p=2)[1]
knn_x = theta.T[index , :]
sigma2 = np.cov(knn_x.T) + np.diag(1.e-16*np.ones(x.shape[0]))
return sigma2
def transition_kernel(x, sigma):
return multivariate_normal(mean = x , cov = sigma).rvs(1)
"""Prior"""
from scipy.stats import uniform
from scipy.stats import norm
class Prior(object):
def __init__(self, prior_dict):
self.prior_dict = prior_dict.copy()
def prior(self):
priorz = []
for key in self.prior_dict.keys():
prior_key = self.prior_dict[key]
if prior_key['shape'] == 'uniform':
loc = prior_key['min']
scale = prior_key['max'] - prior_key['min']
priorz.append( uniform(loc, scale))
elif prior_key['shape'] == 'gauss':
loc = prior_key['mean']
scale = prior_key['stddev']
priorz.append( norm(loc, scale) )
return priorz
prior_dict = {
'logM0' : {'shape': 'uniform', 'min': 9. , 'max': 15.},
'sigma_logM': {'shape': 'uniform', 'min': 0. , 'max': 1.},
'logMmin': {'shape': 'uniform', 'min': 12.5, 'max': 13.09},
'alpha': {'shape': 'uniform', 'min': .9 , 'max': 1.45},
'logM1' : {'shape': 'uniform', 'min': 13.6 , 'max': 14.25},
}
n_params = len(prior_dict.keys())
prior_obj = Prior(prior_dict)
def prior_sampler():
""" Sample prior distribution and return theta_star
"""
theta_star = np.zeros(n_params)
for i in xrange(n_params):
np.random.seed()
theta_star[i] = prior_obj.prior()[i].rvs(size=1)[0]
return theta_star
def pi_priors(tmp_theta):
for i in xrange(n_params):
try:
p_theta *= prior_obj.prior()[i].pdf(tmp_theta[i])
except UnboundLocalError:
p_theta = prior_obj.prior()[i].pdf(tmp_theta[i])
return p_theta
def weighted_sampling(theta, w):
#w_cdf = w.cumsum()/w.sum() # normalized CDF
#np.random.seed()
#rand1 = np.random.random(1)
#cdf_closest_index = np.argmin( np.abs(w_cdf - rand1) )
#closest_theta = theta[:, cdf_closest_index]
np.random.seed()
index = np.random.choice(range(N_particles), 1, p = w/np.sum(w))[0]
closest_theta = theta[:,index]
return closest_theta
def better_multinorm(theta_stst, theta_before, cov):
n_par, n_part = theta_before.shape
sig_inv = np.linalg.inv(cov)
x_mu = theta_before.T - theta_stst
nrmliz = 1.0 / np.sqrt( (2.0*np.pi)**n_par * np.linalg.det(cov))
multinorm = nrmliz * np.exp(-0.5 * np.sum( (x_mu.dot(sig_inv[None,:])[:,0,:]) * x_mu, axis=1 ) )
return multinorm
prior_range = []
for key in ['logM0', 'sigma_logM', 'logMmin','alpha','logM1']:
prior_range.append([prior_dict[key]['min'], prior_dict[key]['max']])
plot_range = prior_range
prior_range = np.array(prior_range)
print "prior range is = " , prior_range
def plot_thetas(theta , w , t):
fig = corner.corner(
theta.T, weights = w.flatten() , truths= data_hod,
truth_color="red", plot_datapoints=True, fill_contours=False, levels=[0.68],
color='k', bins=25, smooth= True,
range=plot_range,
labels=[r"$\log M_{0}$", r"$\sigma_{log M}$", r"$\log M_{min}$" , r"$\alpha$" , r"$\log M_{1}$" ]
)
plt.savefig("/home/mj/public_html/knn_hod5_flat_t"+str(t)+".png")
plt.close()
np.savetxt("/home/mj/public_html/knn_hod5_flat_t"+str(t)+".dat" , theta.T)
np.savetxt("/home/mj/public_html/knn_hod5_flat_t"+str(t)+".dat" , w.T)
def initial_pool_sampling(i_particle):
""" Sample theta_star from prior distribution for the initial pool
"""
rho = eps0 + 1.
while np.all(rho < eps0)==False:
theta_star = prior_sampler()
model_theta = simz(theta_star)
rho = distance(data, model_theta)
pool_list = [np.int(i_particle)]
for i_param in xrange(n_params):
pool_list.append(theta_star[i_param])
pool_list.append(1./np.float(N_particles))
for r in rho:
pool_list.append(r)
return np.array(pool_list)
def initial_pool():
args_list = np.arange(N_particles)
"""serial"""
#results = []
#for arg in args_list:
# results.append(initial_pool_sampling(arg))
"""parallel"""
pool = InterruptiblePool(processes = N_threads)
mapfn = pool.map
results = mapfn(initial_pool_sampling, args_list)
pool.close()
pool.terminate()
pool.join()
results = np.array(results).T
theta_t = results[1:n_params+1,:]
w_t = results[n_params+1,:]
w_t = w_t / np.sum(w_t)
rhos = results[n_params+2:,:]
sig_t = covariance(theta_t , w_t)
return theta_t, w_t, rhos, sig_t
def importance_pool_sampling(args):
i_particle = args[0]
theta_t_1 = args[1]
w_t_1 = args[2]
sig_t_1 = args[3]
eps_t = args[4]
rho = eps_t + 1.
while np.all(rho < eps_t)==False:
theta_star = weighted_sampling(theta_t_1, w_t_1)
sigma_star = knn_cov(theta_star , theta_t_1, k = 10)
theta_starstar = transition_kernel(theta_star , sigma_star)
while np.all((prior_range[:,0] < theta_starstar)&(theta_starstar < prior_range[:,1]))==False:
theta_star = weighted_sampling(theta_t_1, w_t_1)
sigma_star = knn_cov(theta_star , theta_t_1, k = 10)
theta_starstar = transition_kernel(theta_star , sigma_star)
model_starstar = simz(theta_starstar)
rho = distance(data, model_starstar)
p_theta = pi_priors(theta_starstar)
w_starstar = p_theta/np.sum( w_t_1 * better_multinorm(theta_starstar, theta_t_1, sig_t_1) )
pool_list = [np.int(i_particle)]
for i_p in xrange(n_params):
pool_list.append(theta_starstar[i_p])
pool_list.append(w_starstar)
for r in rho:
pool_list.append(r)
return pool_list
def pmc_abc(N_threads = N_threads):
# initial pool
theta_t, w_t, rhos, sig_t = initial_pool()
w_t = w_t/np.sum(w_t)
t = 0 # iternation number
plot_thetas(theta_t , w_t, t)
while t < N_iter:
if t < 4 :
eps_t = np.percentile(np.atleast_2d(rhos), 20, axis=1)
else:
eps_t = np.percentile(np.atleast_2d(rhos), 50, axis=1)
print 'New Distance Threshold Eps_t = ', eps_t , "t=" , t
theta_t_1 = theta_t.copy()
w_t_1 = w_t.copy()
sig_t_1 = sig_t.copy()
args_list = [[i, theta_t_1, w_t_1, sig_t_1, eps_t] for i in xrange(N_particles)]
"""serial"""
results = []
#for args in args_list:
# pool_sample = importance_pool_sampling(args)
# results.append( pool_sample )
"""parallel"""
pool = InterruptiblePool(processes = N_threads)
mapfn = pool.map
results = mapfn(importance_pool_sampling, args_list)
pool.close()
pool.terminate()
pool.join()
results = np.array(results).T
theta_t = results[1:n_params+1,:]
w_t = results[n_params+1,:]
w_t = w_t/np.sum(w_t)
rhos = results[n_params+2:,:]
#sig_t = knn_sigma(theta_t , k = 10)
sig_t = 2. * covariance(theta_t , w_t)
t += 1
plot_thetas(theta_t, w_t , t)
pmc_abc()
| mit |
mehdidc/scikit-learn | examples/linear_model/plot_iris_logistic.py | 283 | 1678 | #!/usr/bin/python
# -*- coding: utf-8 -*-
"""
=========================================================
Logistic Regression 3-class Classifier
=========================================================
Show below is a logistic-regression classifiers decision boundaries on the
`iris <http://en.wikipedia.org/wiki/Iris_flower_data_set>`_ dataset. The
datapoints are colored according to their labels.
"""
print(__doc__)
# Code source: Gaël Varoquaux
# Modified for documentation by Jaques Grobler
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
from sklearn import linear_model, datasets
# import some data to play with
iris = datasets.load_iris()
X = iris.data[:, :2] # we only take the first two features.
Y = iris.target
h = .02 # step size in the mesh
logreg = linear_model.LogisticRegression(C=1e5)
# we create an instance of Neighbours Classifier and fit the data.
logreg.fit(X, Y)
# Plot the decision boundary. For that, we will assign a color to each
# point in the mesh [x_min, m_max]x[y_min, y_max].
x_min, x_max = X[:, 0].min() - .5, X[:, 0].max() + .5
y_min, y_max = X[:, 1].min() - .5, X[:, 1].max() + .5
xx, yy = np.meshgrid(np.arange(x_min, x_max, h), np.arange(y_min, y_max, h))
Z = logreg.predict(np.c_[xx.ravel(), yy.ravel()])
# Put the result into a color plot
Z = Z.reshape(xx.shape)
plt.figure(1, figsize=(4, 3))
plt.pcolormesh(xx, yy, Z, cmap=plt.cm.Paired)
# Plot also the training points
plt.scatter(X[:, 0], X[:, 1], c=Y, edgecolors='k', cmap=plt.cm.Paired)
plt.xlabel('Sepal length')
plt.ylabel('Sepal width')
plt.xlim(xx.min(), xx.max())
plt.ylim(yy.min(), yy.max())
plt.xticks(())
plt.yticks(())
plt.show()
| bsd-3-clause |
3WiseMen/python | 12. CSV2SQL/CSV2SQL.py | 1 | 2697 | import pandas as pd
import os
import time
from datetime import datetime
from matplotlib.dates import date2num
import sqlite3
# Create NEW SQL database
connect = sqlite3.connect('Yahoo_sqlite.db')
cursor = connect.cursor()
PATH = '/Users/insuyu/GitHub/historical.data/yahoo.csv/'
#PATH = '/Users/insuyu/GitHub/python/12. CSV2SQL/'
# Create a New Table for Code
# DateStamp(REAL) | Date(TEXT) | Open(REAL) | High(REAL) | Low(REAL)
# | Close(REAL) | Volume(REAL) | AdjClose(REAL) | MA20(REAL) | MA30(REAL) | MA200(REAL) | MA240 (REAL)
command = "CREATE TABLE YAHOO(Code TEXT, DateStamp INT, Date TEXT, Open REAL, High REAL, Low REAL, Close REAL, Volume INT, AdjClose REAL, MA20 REAL, MA30 REAL, MA200 REAL, MA240 REAL)"
print(command)
cursor.execute(command)
# For All Files in PATH
nFiles = 0
for filename in os.listdir(PATH):
# Find only *.csv files
if filename.endswith(".csv"):
print ("[%d] %s" % (nFiles, filename))
# Load CVS to Database
DB = pd.read_csv(PATH+filename)
# Generate Moving Average Data
DB['MA20'] = pd.stats.moments.rolling_mean(DB['Adj Close'], 20)
DB['MA30'] = pd.stats.moments.rolling_mean(DB['Adj Close'], 30)
#DB['MA30'] = pd.stats.moments.rolling_mean(DB['Adj Close'], 30)
#DB['MA60'] = pd.stats.moments.rolling_mean(DB['Adj Close'], 60)
#DB['MA120'] = pd.stats.moments.rolling_mean(DB['Adj Close'], 120)
DB['MA200'] = pd.stats.moments.rolling_mean(DB['Adj Close'], 200)
DB['MA240'] = pd.stats.moments.rolling_mean(DB['Adj Close'], 240)
DB['DateStamp'] = [ date2num(datetime.strptime(date,"%Y-%m-%d")) for date in DB['Date'] ]
###########################################################################
Code = filename[:-4] # Trading Code
command = "INSERT INTO YAHOO(Code, DateStamp, Date, Open, High, Low, Close, Volume, AdjClose, MA20, MA30, MA200, MA240) VALUES (?,?,?,?,?,?,?,?,?,?,?,?,?)"
#print(command)
for i in range(len(DB)):
DateStamp = DB['DateStamp'][i]
Date = DB['Date'][i]
Open = DB['Open'][i]
High = DB['High'][i]
Low = DB['Low'][i]
Close = DB['Close'][i]
Volume = int(DB['Volume'][i])
AdjClose = DB['Adj Close'][i]
MA20 = DB['MA20'][i]
MA30 = DB['MA30'][i]
MA200 = DB['MA200'][i]
MA240 = DB['MA240'][i]
cursor.execute(command,(Code, DateStamp, Date, Open, High, Low, Close, Volume, AdjClose, MA20, MA30, MA200, MA240) )
connect.commit()
nFiles += 1
| mit |
iismd17/scikit-learn | sklearn/manifold/tests/test_locally_linear.py | 232 | 4761 | from itertools import product
from nose.tools import assert_true
import numpy as np
from numpy.testing import assert_almost_equal, assert_array_almost_equal
from scipy import linalg
from sklearn import neighbors, manifold
from sklearn.manifold.locally_linear import barycenter_kneighbors_graph
from sklearn.utils.testing import assert_less
from sklearn.utils.testing import ignore_warnings
eigen_solvers = ['dense', 'arpack']
#----------------------------------------------------------------------
# Test utility routines
def test_barycenter_kneighbors_graph():
X = np.array([[0, 1], [1.01, 1.], [2, 0]])
A = barycenter_kneighbors_graph(X, 1)
assert_array_almost_equal(
A.toarray(),
[[0., 1., 0.],
[1., 0., 0.],
[0., 1., 0.]])
A = barycenter_kneighbors_graph(X, 2)
# check that columns sum to one
assert_array_almost_equal(np.sum(A.toarray(), 1), np.ones(3))
pred = np.dot(A.toarray(), X)
assert_less(linalg.norm(pred - X) / X.shape[0], 1)
#----------------------------------------------------------------------
# Test LLE by computing the reconstruction error on some manifolds.
def test_lle_simple_grid():
# note: ARPACK is numerically unstable, so this test will fail for
# some random seeds. We choose 2 because the tests pass.
rng = np.random.RandomState(2)
tol = 0.1
# grid of equidistant points in 2D, n_components = n_dim
X = np.array(list(product(range(5), repeat=2)))
X = X + 1e-10 * rng.uniform(size=X.shape)
n_components = 2
clf = manifold.LocallyLinearEmbedding(n_neighbors=5,
n_components=n_components,
random_state=rng)
tol = 0.1
N = barycenter_kneighbors_graph(X, clf.n_neighbors).toarray()
reconstruction_error = linalg.norm(np.dot(N, X) - X, 'fro')
assert_less(reconstruction_error, tol)
for solver in eigen_solvers:
clf.set_params(eigen_solver=solver)
clf.fit(X)
assert_true(clf.embedding_.shape[1] == n_components)
reconstruction_error = linalg.norm(
np.dot(N, clf.embedding_) - clf.embedding_, 'fro') ** 2
assert_less(reconstruction_error, tol)
assert_almost_equal(clf.reconstruction_error_,
reconstruction_error, decimal=1)
# re-embed a noisy version of X using the transform method
noise = rng.randn(*X.shape) / 100
X_reembedded = clf.transform(X + noise)
assert_less(linalg.norm(X_reembedded - clf.embedding_), tol)
def test_lle_manifold():
rng = np.random.RandomState(0)
# similar test on a slightly more complex manifold
X = np.array(list(product(np.arange(18), repeat=2)))
X = np.c_[X, X[:, 0] ** 2 / 18]
X = X + 1e-10 * rng.uniform(size=X.shape)
n_components = 2
for method in ["standard", "hessian", "modified", "ltsa"]:
clf = manifold.LocallyLinearEmbedding(n_neighbors=6,
n_components=n_components,
method=method, random_state=0)
tol = 1.5 if method == "standard" else 3
N = barycenter_kneighbors_graph(X, clf.n_neighbors).toarray()
reconstruction_error = linalg.norm(np.dot(N, X) - X)
assert_less(reconstruction_error, tol)
for solver in eigen_solvers:
clf.set_params(eigen_solver=solver)
clf.fit(X)
assert_true(clf.embedding_.shape[1] == n_components)
reconstruction_error = linalg.norm(
np.dot(N, clf.embedding_) - clf.embedding_, 'fro') ** 2
details = ("solver: %s, method: %s" % (solver, method))
assert_less(reconstruction_error, tol, msg=details)
assert_less(np.abs(clf.reconstruction_error_ -
reconstruction_error),
tol * reconstruction_error, msg=details)
def test_pipeline():
# check that LocallyLinearEmbedding works fine as a Pipeline
# only checks that no error is raised.
# TODO check that it actually does something useful
from sklearn import pipeline, datasets
X, y = datasets.make_blobs(random_state=0)
clf = pipeline.Pipeline(
[('filter', manifold.LocallyLinearEmbedding(random_state=0)),
('clf', neighbors.KNeighborsClassifier())])
clf.fit(X, y)
assert_less(.9, clf.score(X, y))
# Test the error raised when the weight matrix is singular
def test_singular_matrix():
from nose.tools import assert_raises
M = np.ones((10, 3))
f = ignore_warnings
assert_raises(ValueError, f(manifold.locally_linear_embedding),
M, 2, 1, method='standard', eigen_solver='arpack')
| bsd-3-clause |
akionakamura/scikit-learn | examples/cluster/plot_feature_agglomeration_vs_univariate_selection.py | 218 | 3893 | """
==============================================
Feature agglomeration vs. univariate selection
==============================================
This example compares 2 dimensionality reduction strategies:
- univariate feature selection with Anova
- feature agglomeration with Ward hierarchical clustering
Both methods are compared in a regression problem using
a BayesianRidge as supervised estimator.
"""
# Author: Alexandre Gramfort <[email protected]>
# License: BSD 3 clause
print(__doc__)
import shutil
import tempfile
import numpy as np
import matplotlib.pyplot as plt
from scipy import linalg, ndimage
from sklearn.feature_extraction.image import grid_to_graph
from sklearn import feature_selection
from sklearn.cluster import FeatureAgglomeration
from sklearn.linear_model import BayesianRidge
from sklearn.pipeline import Pipeline
from sklearn.grid_search import GridSearchCV
from sklearn.externals.joblib import Memory
from sklearn.cross_validation import KFold
###############################################################################
# Generate data
n_samples = 200
size = 40 # image size
roi_size = 15
snr = 5.
np.random.seed(0)
mask = np.ones([size, size], dtype=np.bool)
coef = np.zeros((size, size))
coef[0:roi_size, 0:roi_size] = -1.
coef[-roi_size:, -roi_size:] = 1.
X = np.random.randn(n_samples, size ** 2)
for x in X: # smooth data
x[:] = ndimage.gaussian_filter(x.reshape(size, size), sigma=1.0).ravel()
X -= X.mean(axis=0)
X /= X.std(axis=0)
y = np.dot(X, coef.ravel())
noise = np.random.randn(y.shape[0])
noise_coef = (linalg.norm(y, 2) / np.exp(snr / 20.)) / linalg.norm(noise, 2)
y += noise_coef * noise # add noise
###############################################################################
# Compute the coefs of a Bayesian Ridge with GridSearch
cv = KFold(len(y), 2) # cross-validation generator for model selection
ridge = BayesianRidge()
cachedir = tempfile.mkdtemp()
mem = Memory(cachedir=cachedir, verbose=1)
# Ward agglomeration followed by BayesianRidge
connectivity = grid_to_graph(n_x=size, n_y=size)
ward = FeatureAgglomeration(n_clusters=10, connectivity=connectivity,
memory=mem)
clf = Pipeline([('ward', ward), ('ridge', ridge)])
# Select the optimal number of parcels with grid search
clf = GridSearchCV(clf, {'ward__n_clusters': [10, 20, 30]}, n_jobs=1, cv=cv)
clf.fit(X, y) # set the best parameters
coef_ = clf.best_estimator_.steps[-1][1].coef_
coef_ = clf.best_estimator_.steps[0][1].inverse_transform(coef_)
coef_agglomeration_ = coef_.reshape(size, size)
# Anova univariate feature selection followed by BayesianRidge
f_regression = mem.cache(feature_selection.f_regression) # caching function
anova = feature_selection.SelectPercentile(f_regression)
clf = Pipeline([('anova', anova), ('ridge', ridge)])
# Select the optimal percentage of features with grid search
clf = GridSearchCV(clf, {'anova__percentile': [5, 10, 20]}, cv=cv)
clf.fit(X, y) # set the best parameters
coef_ = clf.best_estimator_.steps[-1][1].coef_
coef_ = clf.best_estimator_.steps[0][1].inverse_transform(coef_)
coef_selection_ = coef_.reshape(size, size)
###############################################################################
# Inverse the transformation to plot the results on an image
plt.close('all')
plt.figure(figsize=(7.3, 2.7))
plt.subplot(1, 3, 1)
plt.imshow(coef, interpolation="nearest", cmap=plt.cm.RdBu_r)
plt.title("True weights")
plt.subplot(1, 3, 2)
plt.imshow(coef_selection_, interpolation="nearest", cmap=plt.cm.RdBu_r)
plt.title("Feature Selection")
plt.subplot(1, 3, 3)
plt.imshow(coef_agglomeration_, interpolation="nearest", cmap=plt.cm.RdBu_r)
plt.title("Feature Agglomeration")
plt.subplots_adjust(0.04, 0.0, 0.98, 0.94, 0.16, 0.26)
plt.show()
# Attempt to remove the temporary cachedir, but don't worry if it fails
shutil.rmtree(cachedir, ignore_errors=True)
| bsd-3-clause |
dhermes/bezier | docs/conf.py | 1 | 11552 | # Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# bezier documentation build configuration file, created by
# sphinx-quickstart on Sun Nov 6 22:34:38 2016.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import doctest
import os
import sys
import sphinx.domains.c
import sphinx_rtd_theme
import bezier # ``bezier`` must be installed to build the docs.
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
sys.path.insert(0, os.path.abspath(os.path.dirname(__file__)))
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#
needs_sphinx = "3.5.1"
nitpicky = True
nitpick_ignore = [
("py:class", "bezier._base.Base"),
]
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named "sphinx.ext.*") or your custom
# ones.
extensions = [
"sphinx.ext.autodoc",
"sphinx.ext.coverage",
"sphinx.ext.doctest",
"sphinx.ext.intersphinx",
"sphinx.ext.mathjax",
"sphinx.ext.napoleon",
"sphinx_docstring_typing",
"custom_html_writer",
"doctest_monkeypatch",
]
# Add any paths that contain templates here, relative to this directory.
templates_path = []
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
#
# source_suffix = [".rst", ".md"]
source_suffix = ".rst"
# The encoding of source files.
#
# source_encoding = "utf-8-sig"
# The primary toctree document.
master_doc = "index"
# General information about the project.
project = u"bezier"
copyright = u"2016, Danny Hermes"
author = bezier.__author__
version = bezier.__version__
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# ``version`` **can be** the short X.Y version.
# The full version, including alpha/beta/rc tags.
release = version
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#
# today = ""
#
# Else, today_fmt is used as the format for a strftime call.
#
# today_fmt = "%B %d, %Y"
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This patterns also effect to html_static_path and html_extra_path
exclude_patterns = []
# The reST default role (used for this markup: `text`) to use for all
# documents.
#
# default_role = None
# If true, "()" will be appended to :func: etc. cross-reference text.
#
# add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#
# add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#
# show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = "sphinx"
# A list of ignored prefixes for module index sorting.
# modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
# keep_warnings = False
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
ON_READ_THE_DOCS = os.environ.get("READTHEDOCS") == "True"
if not ON_READ_THE_DOCS:
html_theme = "sphinx_rtd_theme"
html_theme_path = [sphinx_rtd_theme.get_html_theme_path()]
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#
# html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
# html_theme_path = []
# The name for this set of Sphinx documents.
# "<project> v<release> documentation" by default.
#
# html_title = u"bezier v0.0.1"
# A shorter title for the navigation bar. Default is the same as html_title.
#
# html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#
# html_logo = None
# The name of an image file (relative to this directory) to use as a favicon of
# the docs. This file should be a Windows icon file (.ico) being 16x16 or
# 32x32 pixels large.
#
# html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ["images"]
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
#
# html_extra_path = []
# If not None, a "Last updated on:" timestamp is inserted at every page
# bottom, using the given strftime format.
# The empty string is equivalent to "%b %d, %Y".
#
# html_last_updated_fmt = None
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#
# html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#
# html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#
# html_additional_pages = {}
# If false, no module index is generated.
#
# html_domain_indices = True
# If false, no index is generated.
#
# html_use_index = True
# If true, the index is split into individual pages for each letter.
#
# html_split_index = False
# If true, links to the reST sources are added to the pages.
#
# html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#
# html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#
# html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#
# html_use_opensearch = ""
# This is the file name suffix for HTML files (e.g. ".xhtml").
# html_file_suffix = None
# Language to be used for generating the HTML full-text search index.
# Sphinx supports the following languages:
# "da", "de", "en", "es", "fi", "fr", "hu", "it", "ja"
# "nl", "no", "pt", "ro", "ru", "sv", "tr", "zh"
#
# html_search_language = "en"
# A dictionary with options for the search language support, empty by default.
# "ja" uses this config value.
# "zh" user can custom change `jieba` dictionary path.
#
# html_search_options = {"type": "default"}
# The name of a javascript file (relative to the configuration directory) that
# implements a search results scorer. If empty, the default will be used.
#
# html_search_scorer = "scorer.js"
# Output file base name for HTML help builder.
htmlhelp_basename = "bezier-doc"
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(
master_doc,
"bezier.tex",
u"bezier Documentation",
u"Danny Hermes",
"manual",
)
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#
# latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#
# latex_use_parts = False
# If true, show page references after internal links.
#
# latex_show_pagerefs = False
# If true, show URL addresses after external links.
#
# latex_show_urls = False
# Documents to append as an appendix to all manuals.
#
# latex_appendices = []
# It false, will not define \strong, \code, \titleref, \crossref ... but only
# \sphinxstrong, ..., \sphinxtitleref, ... To help avoid clash with user added
# packages.
#
# latex_keep_old_macro_names = True
# If false, no module index is generated.
#
# latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [(master_doc, "bezier", u"bezier Documentation", [author], 1)]
# If true, show URL addresses after external links.
#
# man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(
master_doc,
"bezier",
u"bezier Documentation",
author,
"bezier",
"One line description of project.",
"Miscellaneous",
)
]
# Documents to append as an appendix to all manuals.
#
# texinfo_appendices = []
# If false, no module index is generated.
#
# texinfo_domain_indices = True
# How to display URL addresses: "footnote", "no", or "inline".
#
# texinfo_show_urls = "footnote"
# If true, do not generate a @detailmenu in the "Top" node's menu.
#
# texinfo_no_detailmenu = False
# Example configuration for intersphinx: refer to the Python standard library.
intersphinx_mapping = {
"matplotlib": ("https://matplotlib.org/", None),
"numpy": ("https://numpy.org/doc/stable/", None),
"python": ("https://docs.python.org/3/", None),
"sympy": ("https://docs.sympy.org/latest/", None),
}
# Autodoc config
autoclass_content = "both"
autodoc_member_order = "bysource"
autodoc_mock_imports = []
# -- Options for sphinx.ext.doctest --------------------------------------
# See:
# (http://www.sphinx-doc.org/en/stable/ext/
# doctest.html#confval-doctest_default_flags)
# Defaults (as of 1.7.1) are:
# - ELLIPSIS
# - IGNORE_EXCEPTION_DETAIL (TURN THIS ONE BACK ON)
# - DONT_ACCEPT_TRUE_FOR_1
# We want to turn off
# (https://docs.python.org/3/library/\
# doctest.html#doctest.IGNORE_EXCEPTION_DETAIL)
doctest_default_flags = doctest.DONT_ACCEPT_TRUE_FOR_1 | doctest.ELLIPSIS
| apache-2.0 |
quevedin/ThinkStats2 | code/scatter.py | 69 | 4281 | """This file contains code for use with "Think Stats",
by Allen B. Downey, available from greenteapress.com
Copyright 2010 Allen B. Downey
License: GNU GPLv3 http://www.gnu.org/licenses/gpl.html
"""
from __future__ import print_function
import sys
import numpy as np
import math
import brfss
import thinkplot
import thinkstats2
def GetHeightWeight(df, hjitter=0.0, wjitter=0.0):
"""Get sequences of height and weight.
df: DataFrame with htm3 and wtkg2
hjitter: float magnitude of random noise added to heights
wjitter: float magnitude of random noise added to weights
returns: tuple of sequences (heights, weights)
"""
heights = df.htm3
if hjitter:
heights = thinkstats2.Jitter(heights, hjitter)
weights = df.wtkg2
if wjitter:
weights = thinkstats2.Jitter(weights, wjitter)
return heights, weights
def ScatterPlot(heights, weights, alpha=1.0):
"""Make a scatter plot and save it.
heights: sequence of float
weights: sequence of float
alpha: float
"""
thinkplot.Scatter(heights, weights, alpha=alpha)
thinkplot.Config(xlabel='height (cm)',
ylabel='weight (kg)',
axis=[140, 210, 20, 200],
legend=False)
def HexBin(heights, weights, bins=None):
"""Make a hexbin plot and save it.
heights: sequence of float
weights: sequence of float
bins: 'log' or None for linear
"""
thinkplot.HexBin(heights, weights, bins=bins)
thinkplot.Config(xlabel='height (cm)',
ylabel='weight (kg)',
axis=[140, 210, 20, 200],
legend=False)
def MakeFigures(df):
"""Make scatterplots.
"""
sample = thinkstats2.SampleRows(df, 5000)
# simple scatter plot
thinkplot.PrePlot(cols=2)
heights, weights = GetHeightWeight(sample)
ScatterPlot(heights, weights)
# scatter plot with jitter
thinkplot.SubPlot(2)
heights, weights = GetHeightWeight(sample, hjitter=1.3, wjitter=0.5)
ScatterPlot(heights, weights)
thinkplot.Save(root='scatter1')
# with jitter and transparency
thinkplot.PrePlot(cols=2)
ScatterPlot(heights, weights, alpha=0.1)
# hexbin plot
thinkplot.SubPlot(2)
heights, weights = GetHeightWeight(df, hjitter=1.3, wjitter=0.5)
HexBin(heights, weights)
thinkplot.Save(root='scatter2')
def BinnedPercentiles(df):
"""Bin the data by height and plot percentiles of weight for eachbin.
df: DataFrame
"""
cdf = thinkstats2.Cdf(df.htm3)
print('Fraction between 140 and 200 cm', cdf[200] - cdf[140])
bins = np.arange(135, 210, 5)
indices = np.digitize(df.htm3, bins)
groups = df.groupby(indices)
heights = [group.htm3.mean() for i, group in groups][1:-1]
cdfs = [thinkstats2.Cdf(group.wtkg2) for i, group in groups][1:-1]
thinkplot.PrePlot(3)
for percent in [75, 50, 25]:
weights = [cdf.Percentile(percent) for cdf in cdfs]
label = '%dth' % percent
thinkplot.Plot(heights, weights, label=label)
thinkplot.Save(root='scatter3',
xlabel='height (cm)',
ylabel='weight (kg)')
def Correlations(df):
print('pandas cov', df.htm3.cov(df.wtkg2))
#print('NumPy cov', np.cov(df.htm3, df.wtkg2, ddof=0))
print('thinkstats2 Cov', thinkstats2.Cov(df.htm3, df.wtkg2))
print()
print('pandas corr', df.htm3.corr(df.wtkg2))
#print('NumPy corrcoef', np.corrcoef(df.htm3, df.wtkg2, ddof=0))
print('thinkstats2 Corr', thinkstats2.Corr(df.htm3, df.wtkg2))
print()
print('pandas corr spearman', df.htm3.corr(df.wtkg2, method='spearman'))
print('thinkstats2 SpearmanCorr',
thinkstats2.SpearmanCorr(df.htm3, df.wtkg2))
print('thinkstats2 SpearmanCorr log wtkg3',
thinkstats2.SpearmanCorr(df.htm3, np.log(df.wtkg2)))
print()
print('thinkstats2 Corr log wtkg3',
thinkstats2.Corr(df.htm3, np.log(df.wtkg2)))
print()
def main(script):
thinkstats2.RandomSeed(17)
df = brfss.ReadBrfss(nrows=None)
df = df.dropna(subset=['htm3', 'wtkg2'])
Correlations(df)
return
MakeFigures(df)
BinnedPercentiles(df)
if __name__ == '__main__':
main(*sys.argv)
| gpl-3.0 |
RPGOne/Skynet | scikit-learn-0.18.1/sklearn/semi_supervised/label_propagation.py | 17 | 15941 | # coding=utf8
"""
Label propagation in the context of this module refers to a set of
semisupervised classification algorithms. In the high level, these algorithms
work by forming a fully-connected graph between all points given and solving
for the steady-state distribution of labels at each point.
These algorithms perform very well in practice. The cost of running can be very
expensive, at approximately O(N^3) where N is the number of (labeled and
unlabeled) points. The theory (why they perform so well) is motivated by
intuitions from random walk algorithms and geometric relationships in the data.
For more information see the references below.
Model Features
--------------
Label clamping:
The algorithm tries to learn distributions of labels over the dataset. In the
"Hard Clamp" mode, the true ground labels are never allowed to change. They
are clamped into position. In the "Soft Clamp" mode, they are allowed some
wiggle room, but some alpha of their original value will always be retained.
Hard clamp is the same as soft clamping with alpha set to 1.
Kernel:
A function which projects a vector into some higher dimensional space. This
implementation supprots RBF and KNN kernels. Using the RBF kernel generates
a dense matrix of size O(N^2). KNN kernel will generate a sparse matrix of
size O(k*N) which will run much faster. See the documentation for SVMs for
more info on kernels.
Examples
--------
>>> from sklearn import datasets
>>> from sklearn.semi_supervised import LabelPropagation
>>> label_prop_model = LabelPropagation()
>>> iris = datasets.load_iris()
>>> random_unlabeled_points = np.where(np.random.randint(0, 2,
... size=len(iris.target)))
>>> labels = np.copy(iris.target)
>>> labels[random_unlabeled_points] = -1
>>> label_prop_model.fit(iris.data, labels)
... # doctest: +NORMALIZE_WHITESPACE +ELLIPSIS
LabelPropagation(...)
Notes
-----
References:
[1] Yoshua Bengio, Olivier Delalleau, Nicolas Le Roux. In Semi-Supervised
Learning (2006), pp. 193-216
[2] Olivier Delalleau, Yoshua Bengio, Nicolas Le Roux. Efficient
Non-Parametric Function Induction in Semi-Supervised Learning. AISTAT 2005
"""
# Authors: Clay Woolam <[email protected]>
# License: BSD
from abc import ABCMeta, abstractmethod
import numpy as np
from scipy import sparse
from ..base import BaseEstimator, ClassifierMixin
from ..externals import six
from ..metrics.pairwise import rbf_kernel
from ..neighbors.unsupervised import NearestNeighbors
from ..utils.extmath import safe_sparse_dot
from ..utils.graph import graph_laplacian
from ..utils.multiclass import check_classification_targets
from ..utils.validation import check_X_y, check_is_fitted, check_array
# Helper functions
def _not_converged(y_truth, y_prediction, tol=1e-3):
"""basic convergence check"""
return np.abs(y_truth - y_prediction).sum() > tol
class BaseLabelPropagation(six.with_metaclass(ABCMeta, BaseEstimator,
ClassifierMixin)):
"""Base class for label propagation module.
Parameters
----------
kernel : {'knn', 'rbf'}
String identifier for kernel function to use.
Only 'rbf' and 'knn' kernels are currently supported..
gamma : float
Parameter for rbf kernel
alpha : float
Clamping factor
max_iter : float
Change maximum number of iterations allowed
tol : float
Convergence tolerance: threshold to consider the system at steady
state
n_neighbors : integer > 0
Parameter for knn kernel
n_jobs : int, optional (default = 1)
The number of parallel jobs to run.
If ``-1``, then the number of jobs is set to the number of CPU cores.
"""
def __init__(self, kernel='rbf', gamma=20, n_neighbors=7,
alpha=1, max_iter=30, tol=1e-3, n_jobs=1):
self.max_iter = max_iter
self.tol = tol
# kernel parameters
self.kernel = kernel
self.gamma = gamma
self.n_neighbors = n_neighbors
# clamping factor
self.alpha = alpha
self.n_jobs = n_jobs
def _get_kernel(self, X, y=None):
if self.kernel == "rbf":
if y is None:
return rbf_kernel(X, X, gamma=self.gamma)
else:
return rbf_kernel(X, y, gamma=self.gamma)
elif self.kernel == "knn":
if self.nn_fit is None:
self.nn_fit = NearestNeighbors(self.n_neighbors,
n_jobs=self.n_jobs).fit(X)
if y is None:
return self.nn_fit.kneighbors_graph(self.nn_fit._fit_X,
self.n_neighbors,
mode='connectivity')
else:
return self.nn_fit.kneighbors(y, return_distance=False)
else:
raise ValueError("%s is not a valid kernel. Only rbf and knn"
" are supported at this time" % self.kernel)
@abstractmethod
def _build_graph(self):
raise NotImplementedError("Graph construction must be implemented"
" to fit a label propagation model.")
def predict(self, X):
"""Performs inductive inference across the model.
Parameters
----------
X : array_like, shape = [n_samples, n_features]
Returns
-------
y : array_like, shape = [n_samples]
Predictions for input data
"""
probas = self.predict_proba(X)
return self.classes_[np.argmax(probas, axis=1)].ravel()
def predict_proba(self, X):
"""Predict probability for each possible outcome.
Compute the probability estimates for each single sample in X
and each possible outcome seen during training (categorical
distribution).
Parameters
----------
X : array_like, shape = [n_samples, n_features]
Returns
-------
probabilities : array, shape = [n_samples, n_classes]
Normalized probability distributions across
class labels
"""
check_is_fitted(self, 'X_')
X_2d = check_array(X, accept_sparse=['csc', 'csr', 'coo', 'dok',
'bsr', 'lil', 'dia'])
weight_matrices = self._get_kernel(self.X_, X_2d)
if self.kernel == 'knn':
probabilities = []
for weight_matrix in weight_matrices:
ine = np.sum(self.label_distributions_[weight_matrix], axis=0)
probabilities.append(ine)
probabilities = np.array(probabilities)
else:
weight_matrices = weight_matrices.T
probabilities = np.dot(weight_matrices, self.label_distributions_)
normalizer = np.atleast_2d(np.sum(probabilities, axis=1)).T
probabilities /= normalizer
return probabilities
def fit(self, X, y):
"""Fit a semi-supervised label propagation model based
All the input data is provided matrix X (labeled and unlabeled)
and corresponding label matrix y with a dedicated marker value for
unlabeled samples.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
A {n_samples by n_samples} size matrix will be created from this
y : array_like, shape = [n_samples]
n_labeled_samples (unlabeled points are marked as -1)
All unlabeled samples will be transductively assigned labels
Returns
-------
self : returns an instance of self.
"""
X, y = check_X_y(X, y)
self.X_ = X
check_classification_targets(y)
# actual graph construction (implementations should override this)
graph_matrix = self._build_graph()
# label construction
# construct a categorical distribution for classification only
classes = np.unique(y)
classes = (classes[classes != -1])
self.classes_ = classes
n_samples, n_classes = len(y), len(classes)
y = np.asarray(y)
unlabeled = y == -1
clamp_weights = np.ones((n_samples, 1))
clamp_weights[unlabeled, 0] = self.alpha
# initialize distributions
self.label_distributions_ = np.zeros((n_samples, n_classes))
for label in classes:
self.label_distributions_[y == label, classes == label] = 1
y_static = np.copy(self.label_distributions_)
if self.alpha > 0.:
y_static *= 1 - self.alpha
y_static[unlabeled] = 0
l_previous = np.zeros((self.X_.shape[0], n_classes))
remaining_iter = self.max_iter
if sparse.isspmatrix(graph_matrix):
graph_matrix = graph_matrix.tocsr()
while (_not_converged(self.label_distributions_, l_previous, self.tol)
and remaining_iter > 1):
l_previous = self.label_distributions_
self.label_distributions_ = safe_sparse_dot(
graph_matrix, self.label_distributions_)
# clamp
self.label_distributions_ = np.multiply(
clamp_weights, self.label_distributions_) + y_static
remaining_iter -= 1
normalizer = np.sum(self.label_distributions_, axis=1)[:, np.newaxis]
self.label_distributions_ /= normalizer
# set the transduction item
transduction = self.classes_[np.argmax(self.label_distributions_,
axis=1)]
self.transduction_ = transduction.ravel()
self.n_iter_ = self.max_iter - remaining_iter
return self
class LabelPropagation(BaseLabelPropagation):
"""Label Propagation classifier
Read more in the :ref:`User Guide <label_propagation>`.
Parameters
----------
kernel : {'knn', 'rbf'}
String identifier for kernel function to use.
Only 'rbf' and 'knn' kernels are currently supported..
gamma : float
Parameter for rbf kernel
n_neighbors : integer > 0
Parameter for knn kernel
alpha : float
Clamping factor
max_iter : float
Change maximum number of iterations allowed
tol : float
Convergence tolerance: threshold to consider the system at steady
state
Attributes
----------
X_ : array, shape = [n_samples, n_features]
Input array.
classes_ : array, shape = [n_classes]
The distinct labels used in classifying instances.
label_distributions_ : array, shape = [n_samples, n_classes]
Categorical distribution for each item.
transduction_ : array, shape = [n_samples]
Label assigned to each item via the transduction.
n_iter_ : int
Number of iterations run.
Examples
--------
>>> from sklearn import datasets
>>> from sklearn.semi_supervised import LabelPropagation
>>> label_prop_model = LabelPropagation()
>>> iris = datasets.load_iris()
>>> random_unlabeled_points = np.where(np.random.randint(0, 2,
... size=len(iris.target)))
>>> labels = np.copy(iris.target)
>>> labels[random_unlabeled_points] = -1
>>> label_prop_model.fit(iris.data, labels)
... # doctest: +NORMALIZE_WHITESPACE +ELLIPSIS
LabelPropagation(...)
References
----------
Xiaojin Zhu and Zoubin Ghahramani. Learning from labeled and unlabeled data
with label propagation. Technical Report CMU-CALD-02-107, Carnegie Mellon
University, 2002 http://pages.cs.wisc.edu/~jerryzhu/pub/CMU-CALD-02-107.pdf
See Also
--------
LabelSpreading : Alternate label propagation strategy more robust to noise
"""
def _build_graph(self):
"""Matrix representing a fully connected graph between each sample
This basic implementation creates a non-stochastic affinity matrix, so
class distributions will exceed 1 (normalization may be desired).
"""
if self.kernel == 'knn':
self.nn_fit = None
affinity_matrix = self._get_kernel(self.X_)
normalizer = affinity_matrix.sum(axis=0)
if sparse.isspmatrix(affinity_matrix):
affinity_matrix.data /= np.diag(np.array(normalizer))
else:
affinity_matrix /= normalizer[:, np.newaxis]
return affinity_matrix
class LabelSpreading(BaseLabelPropagation):
"""LabelSpreading model for semi-supervised learning
This model is similar to the basic Label Propgation algorithm,
but uses affinity matrix based on the normalized graph Laplacian
and soft clamping across the labels.
Read more in the :ref:`User Guide <label_propagation>`.
Parameters
----------
kernel : {'knn', 'rbf'}
String identifier for kernel function to use.
Only 'rbf' and 'knn' kernels are currently supported.
gamma : float
parameter for rbf kernel
n_neighbors : integer > 0
parameter for knn kernel
alpha : float
clamping factor
max_iter : float
maximum number of iterations allowed
tol : float
Convergence tolerance: threshold to consider the system at steady
state
n_jobs : int, optional (default = 1)
The number of parallel jobs to run.
If ``-1``, then the number of jobs is set to the number of CPU cores.
Attributes
----------
X_ : array, shape = [n_samples, n_features]
Input array.
classes_ : array, shape = [n_classes]
The distinct labels used in classifying instances.
label_distributions_ : array, shape = [n_samples, n_classes]
Categorical distribution for each item.
transduction_ : array, shape = [n_samples]
Label assigned to each item via the transduction.
n_iter_ : int
Number of iterations run.
Examples
--------
>>> from sklearn import datasets
>>> from sklearn.semi_supervised import LabelSpreading
>>> label_prop_model = LabelSpreading()
>>> iris = datasets.load_iris()
>>> random_unlabeled_points = np.where(np.random.randint(0, 2,
... size=len(iris.target)))
>>> labels = np.copy(iris.target)
>>> labels[random_unlabeled_points] = -1
>>> label_prop_model.fit(iris.data, labels)
... # doctest: +NORMALIZE_WHITESPACE +ELLIPSIS
LabelSpreading(...)
References
----------
Dengyong Zhou, Olivier Bousquet, Thomas Navin Lal, Jason Weston,
Bernhard Schoelkopf. Learning with local and global consistency (2004)
http://citeseer.ist.psu.edu/viewdoc/summary?doi=10.1.1.115.3219
See Also
--------
LabelPropagation : Unregularized graph based semi-supervised learning
"""
def __init__(self, kernel='rbf', gamma=20, n_neighbors=7, alpha=0.2,
max_iter=30, tol=1e-3, n_jobs=1):
# this one has different base parameters
super(LabelSpreading, self).__init__(kernel=kernel, gamma=gamma,
n_neighbors=n_neighbors,
alpha=alpha, max_iter=max_iter,
tol=tol,
n_jobs=n_jobs)
def _build_graph(self):
"""Graph matrix for Label Spreading computes the graph laplacian"""
# compute affinity matrix (or gram matrix)
if self.kernel == 'knn':
self.nn_fit = None
n_samples = self.X_.shape[0]
affinity_matrix = self._get_kernel(self.X_)
laplacian = graph_laplacian(affinity_matrix, normed=True)
laplacian = -laplacian
if sparse.isspmatrix(laplacian):
diag_mask = (laplacian.row == laplacian.col)
laplacian.data[diag_mask] = 0.0
else:
laplacian.flat[::n_samples + 1] = 0.0 # set diag to 0.0
return laplacian
| bsd-3-clause |
krez13/scikit-learn | sklearn/utils/graph.py | 289 | 6239 | """
Graph utilities and algorithms
Graphs are represented with their adjacency matrices, preferably using
sparse matrices.
"""
# Authors: Aric Hagberg <[email protected]>
# Gael Varoquaux <[email protected]>
# Jake Vanderplas <[email protected]>
# License: BSD 3 clause
import numpy as np
from scipy import sparse
from .validation import check_array
from .graph_shortest_path import graph_shortest_path
###############################################################################
# Path and connected component analysis.
# Code adapted from networkx
def single_source_shortest_path_length(graph, source, cutoff=None):
"""Return the shortest path length from source to all reachable nodes.
Returns a dictionary of shortest path lengths keyed by target.
Parameters
----------
graph: sparse matrix or 2D array (preferably LIL matrix)
Adjacency matrix of the graph
source : node label
Starting node for path
cutoff : integer, optional
Depth to stop the search - only
paths of length <= cutoff are returned.
Examples
--------
>>> from sklearn.utils.graph import single_source_shortest_path_length
>>> import numpy as np
>>> graph = np.array([[ 0, 1, 0, 0],
... [ 1, 0, 1, 0],
... [ 0, 1, 0, 1],
... [ 0, 0, 1, 0]])
>>> single_source_shortest_path_length(graph, 0)
{0: 0, 1: 1, 2: 2, 3: 3}
>>> single_source_shortest_path_length(np.ones((6, 6)), 2)
{0: 1, 1: 1, 2: 0, 3: 1, 4: 1, 5: 1}
"""
if sparse.isspmatrix(graph):
graph = graph.tolil()
else:
graph = sparse.lil_matrix(graph)
seen = {} # level (number of hops) when seen in BFS
level = 0 # the current level
next_level = [source] # dict of nodes to check at next level
while next_level:
this_level = next_level # advance to next level
next_level = set() # and start a new list (fringe)
for v in this_level:
if v not in seen:
seen[v] = level # set the level of vertex v
next_level.update(graph.rows[v])
if cutoff is not None and cutoff <= level:
break
level += 1
return seen # return all path lengths as dictionary
if hasattr(sparse, 'connected_components'):
connected_components = sparse.connected_components
else:
from .sparsetools import connected_components
###############################################################################
# Graph laplacian
def graph_laplacian(csgraph, normed=False, return_diag=False):
""" Return the Laplacian matrix of a directed graph.
For non-symmetric graphs the out-degree is used in the computation.
Parameters
----------
csgraph : array_like or sparse matrix, 2 dimensions
compressed-sparse graph, with shape (N, N).
normed : bool, optional
If True, then compute normalized Laplacian.
return_diag : bool, optional
If True, then return diagonal as well as laplacian.
Returns
-------
lap : ndarray
The N x N laplacian matrix of graph.
diag : ndarray
The length-N diagonal of the laplacian matrix.
diag is returned only if return_diag is True.
Notes
-----
The Laplacian matrix of a graph is sometimes referred to as the
"Kirchoff matrix" or the "admittance matrix", and is useful in many
parts of spectral graph theory. In particular, the eigen-decomposition
of the laplacian matrix can give insight into many properties of the graph.
For non-symmetric directed graphs, the laplacian is computed using the
out-degree of each node.
"""
if csgraph.ndim != 2 or csgraph.shape[0] != csgraph.shape[1]:
raise ValueError('csgraph must be a square matrix or array')
if normed and (np.issubdtype(csgraph.dtype, np.int)
or np.issubdtype(csgraph.dtype, np.uint)):
csgraph = check_array(csgraph, dtype=np.float64, accept_sparse=True)
if sparse.isspmatrix(csgraph):
return _laplacian_sparse(csgraph, normed=normed,
return_diag=return_diag)
else:
return _laplacian_dense(csgraph, normed=normed,
return_diag=return_diag)
def _laplacian_sparse(graph, normed=False, return_diag=False):
n_nodes = graph.shape[0]
if not graph.format == 'coo':
lap = (-graph).tocoo()
else:
lap = -graph.copy()
diag_mask = (lap.row == lap.col)
if not diag_mask.sum() == n_nodes:
# The sparsity pattern of the matrix has holes on the diagonal,
# we need to fix that
diag_idx = lap.row[diag_mask]
diagonal_holes = list(set(range(n_nodes)).difference(diag_idx))
new_data = np.concatenate([lap.data, np.ones(len(diagonal_holes))])
new_row = np.concatenate([lap.row, diagonal_holes])
new_col = np.concatenate([lap.col, diagonal_holes])
lap = sparse.coo_matrix((new_data, (new_row, new_col)),
shape=lap.shape)
diag_mask = (lap.row == lap.col)
lap.data[diag_mask] = 0
w = -np.asarray(lap.sum(axis=1)).squeeze()
if normed:
w = np.sqrt(w)
w_zeros = (w == 0)
w[w_zeros] = 1
lap.data /= w[lap.row]
lap.data /= w[lap.col]
lap.data[diag_mask] = (1 - w_zeros[lap.row[diag_mask]]).astype(
lap.data.dtype)
else:
lap.data[diag_mask] = w[lap.row[diag_mask]]
if return_diag:
return lap, w
return lap
def _laplacian_dense(graph, normed=False, return_diag=False):
n_nodes = graph.shape[0]
lap = -np.asarray(graph) # minus sign leads to a copy
# set diagonal to zero
lap.flat[::n_nodes + 1] = 0
w = -lap.sum(axis=0)
if normed:
w = np.sqrt(w)
w_zeros = (w == 0)
w[w_zeros] = 1
lap /= w
lap /= w[:, np.newaxis]
lap.flat[::n_nodes + 1] = (1 - w_zeros).astype(lap.dtype)
else:
lap.flat[::n_nodes + 1] = w.astype(lap.dtype)
if return_diag:
return lap, w
return lap
| bsd-3-clause |
wkfwkf/statsmodels | statsmodels/examples/ex_generic_mle_t.py | 29 | 10826 | # -*- coding: utf-8 -*-
"""
Created on Wed Jul 28 08:28:04 2010
Author: josef-pktd
"""
from __future__ import print_function
import numpy as np
from scipy import stats, special
import statsmodels.api as sm
from statsmodels.base.model import GenericLikelihoodModel
#redefine some shortcuts
np_log = np.log
np_pi = np.pi
sps_gamln = special.gammaln
def maxabs(arr1, arr2):
return np.max(np.abs(arr1 - arr2))
def maxabsrel(arr1, arr2):
return np.max(np.abs(arr2 / arr1 - 1))
class MyT(GenericLikelihoodModel):
'''Maximum Likelihood Estimation of Poisson Model
This is an example for generic MLE which has the same
statistical model as discretemod.Poisson.
Except for defining the negative log-likelihood method, all
methods and results are generic. Gradients and Hessian
and all resulting statistics are based on numerical
differentiation.
'''
def loglike(self, params):
return -self.nloglikeobs(params).sum(0)
# copied from discretemod.Poisson
def nloglikeobs(self, params):
"""
Loglikelihood of Poisson model
Parameters
----------
params : array-like
The parameters of the model.
Returns
-------
The log likelihood of the model evaluated at `params`
Notes
--------
.. math :: \\ln L=\\sum_{i=1}^{n}\\left[-\\lambda_{i}+y_{i}x_{i}^{\\prime}\\beta-\\ln y_{i}!\\right]
"""
#print len(params),
beta = params[:-2]
df = params[-2]
scale = params[-1]
loc = np.dot(self.exog, beta)
endog = self.endog
x = (endog - loc)/scale
#next part is stats.t._logpdf
lPx = sps_gamln((df+1)/2) - sps_gamln(df/2.)
lPx -= 0.5*np_log(df*np_pi) + (df+1)/2.*np_log(1+(x**2)/df)
lPx -= np_log(scale) # correction for scale
return -lPx
#Example:
np.random.seed(98765678)
nobs = 1000
rvs = np.random.randn(nobs,5)
data_exog = sm.add_constant(rvs, prepend=False)
xbeta = 0.9 + 0.1*rvs.sum(1)
data_endog = xbeta + 0.1*np.random.standard_t(5, size=nobs)
#print data_endog
modp = MyT(data_endog, data_exog)
modp.start_value = np.ones(data_exog.shape[1]+2)
modp.start_value[-2] = 10
modp.start_params = modp.start_value
resp = modp.fit(start_params = modp.start_value)
print(resp.params)
print(resp.bse)
from statsmodels.tools.numdiff import approx_fprime, approx_hess
hb=-approx_hess(modp.start_value, modp.loglike, epsilon=-1e-4)
tmp = modp.loglike(modp.start_value)
print(tmp.shape)
'''
>>> tmp = modp.loglike(modp.start_value)
8
>>> tmp.shape
(100,)
>>> tmp.sum(0)
-24220.877108016182
>>> tmp = modp.nloglikeobs(modp.start_value)
8
>>> tmp.shape
(100, 100)
>>> np.dot(modp.exog, beta).shape
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
NameError: name 'beta' is not defined
>>> params = modp.start_value
>>> beta = params[:-2]
>>> beta.shape
(6,)
>>> np.dot(modp.exog, beta).shape
(100,)
>>> modp.endog.shape
(100, 100)
>>> xbeta.shape
(100,)
>>>
'''
'''
C:\Programs\Python25\lib\site-packages\matplotlib-0.99.1-py2.5-win32.egg\matplotlib\rcsetup.py:117: UserWarning: rcParams key "numerix" is obsolete and has no effect;
please delete it from your matplotlibrc file
warnings.warn('rcParams key "numerix" is obsolete and has no effect;\n'
repr(start_params) array([ 1., 1., 1., 1., 1., 1., 1., 1.])
Optimization terminated successfully.
Current function value: 91.897859
Iterations: 108
Function evaluations: 173
Gradient evaluations: 173
[ 1.58253308e-01 1.73188603e-01 1.77357447e-01 2.06707494e-02
-1.31174789e-01 8.79915580e-01 6.47663840e+03 6.73457641e+02]
[ NaN NaN NaN NaN NaN
28.26906182 NaN NaN]
()
>>> resp.params
array([ 1.58253308e-01, 1.73188603e-01, 1.77357447e-01,
2.06707494e-02, -1.31174789e-01, 8.79915580e-01,
6.47663840e+03, 6.73457641e+02])
>>> resp.bse
array([ NaN, NaN, NaN, NaN,
NaN, 28.26906182, NaN, NaN])
>>> resp.jac
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
AttributeError: 'GenericLikelihoodModelResults' object has no attribute 'jac'
>>> resp.bsejac
array([ 45243.35919908, 51997.80776897, 41418.33021984,
42763.46575168, 50101.91631612, 42804.92083525,
3005625.35649203, 13826948.68708931])
>>> resp.bsejhj
array([ 1.51643931, 0.80229636, 0.27720185, 0.4711138 , 0.9028682 ,
0.31673747, 0.00524426, 0.69729368])
>>> resp.covjac
array([[ 2.04696155e+09, 1.46643494e+08, 7.59932781e+06,
-2.39993397e+08, 5.62644255e+08, 2.34300598e+08,
-3.07824799e+09, -1.93425470e+10],
[ 1.46643494e+08, 2.70377201e+09, 1.06005712e+08,
3.76824011e+08, -1.21778986e+08, 5.38612723e+08,
-2.12575784e+10, -1.69503271e+11],
[ 7.59932781e+06, 1.06005712e+08, 1.71547808e+09,
-5.94451158e+07, -1.44586401e+08, -5.41830441e+06,
1.25899515e+10, 1.06372065e+11],
[ -2.39993397e+08, 3.76824011e+08, -5.94451158e+07,
1.82871400e+09, -5.66930891e+08, 3.75061111e+08,
-6.84681772e+09, -7.29993789e+10],
[ 5.62644255e+08, -1.21778986e+08, -1.44586401e+08,
-5.66930891e+08, 2.51020202e+09, -4.67886982e+08,
1.78890380e+10, 1.75428694e+11],
[ 2.34300598e+08, 5.38612723e+08, -5.41830441e+06,
3.75061111e+08, -4.67886982e+08, 1.83226125e+09,
-1.27484996e+10, -1.12550321e+11],
[ -3.07824799e+09, -2.12575784e+10, 1.25899515e+10,
-6.84681772e+09, 1.78890380e+10, -1.27484996e+10,
9.03378378e+12, 2.15188047e+13],
[ -1.93425470e+10, -1.69503271e+11, 1.06372065e+11,
-7.29993789e+10, 1.75428694e+11, -1.12550321e+11,
2.15188047e+13, 1.91184510e+14]])
>>> hb
array([[ 33.68732564, -2.33209221, -13.51255321, -1.60840159,
-13.03920385, -9.3506543 , 4.86239173, -9.30409101],
[ -2.33209221, 3.12512611, -6.08530968, -6.79232244,
3.66804898, 1.26497071, 5.10113409, -2.53482995],
[ -13.51255321, -6.08530968, 31.14883498, -5.01514705,
-10.48819911, -2.62533035, 3.82241581, -12.51046342],
[ -1.60840159, -6.79232244, -5.01514705, 28.40141917,
-8.72489636, -8.82449456, 5.47584023, -18.20500017],
[ -13.03920385, 3.66804898, -10.48819911, -8.72489636,
9.03650914, 3.65206176, 6.55926726, -1.8233635 ],
[ -9.3506543 , 1.26497071, -2.62533035, -8.82449456,
3.65206176, 21.41825348, -1.28610793, 4.28101146],
[ 4.86239173, 5.10113409, 3.82241581, 5.47584023,
6.55926726, -1.28610793, 46.52354448, -32.23861427],
[ -9.30409101, -2.53482995, -12.51046342, -18.20500017,
-1.8233635 , 4.28101146, -32.23861427, 178.61978279]])
>>> np.linalg.eigh(hb)
(array([ -10.50373649, 0.7460258 , 14.73131793, 29.72453087,
36.24103832, 41.98042979, 48.99815223, 190.04303734]), array([[-0.40303259, 0.10181305, 0.18164206, 0.48201456, 0.03916688,
0.00903695, 0.74620692, 0.05853619],
[-0.3201713 , -0.88444855, -0.19867642, 0.02828812, 0.16733946,
-0.21440765, -0.02927317, 0.01176904],
[-0.41847094, 0.00170161, 0.04973298, 0.43276118, -0.55894304,
0.26454728, -0.49745582, 0.07251685],
[-0.3508729 , -0.08302723, 0.25004884, -0.73495077, -0.38936448,
0.20677082, 0.24464779, 0.11448238],
[-0.62065653, 0.44662675, -0.37388565, -0.19453047, 0.29084735,
-0.34151809, -0.19088978, 0.00342713],
[-0.15119802, -0.01099165, 0.84377273, 0.00554863, 0.37332324,
-0.17917015, -0.30371283, -0.03635211],
[ 0.15813581, 0.0293601 , 0.09882271, 0.03515962, -0.48768565,
-0.81960996, 0.05248464, 0.22533642],
[-0.06118044, -0.00549223, 0.03205047, -0.01782649, -0.21128588,
-0.14391393, 0.05973658, -0.96226835]]))
>>> np.linalg.eigh(np.linalg.inv(hb))
(array([-0.09520422, 0.00526197, 0.02040893, 0.02382062, 0.02759303,
0.03364225, 0.06788259, 1.34043621]), array([[-0.40303259, 0.05853619, 0.74620692, -0.00903695, -0.03916688,
0.48201456, 0.18164206, 0.10181305],
[-0.3201713 , 0.01176904, -0.02927317, 0.21440765, -0.16733946,
0.02828812, -0.19867642, -0.88444855],
[-0.41847094, 0.07251685, -0.49745582, -0.26454728, 0.55894304,
0.43276118, 0.04973298, 0.00170161],
[-0.3508729 , 0.11448238, 0.24464779, -0.20677082, 0.38936448,
-0.73495077, 0.25004884, -0.08302723],
[-0.62065653, 0.00342713, -0.19088978, 0.34151809, -0.29084735,
-0.19453047, -0.37388565, 0.44662675],
[-0.15119802, -0.03635211, -0.30371283, 0.17917015, -0.37332324,
0.00554863, 0.84377273, -0.01099165],
[ 0.15813581, 0.22533642, 0.05248464, 0.81960996, 0.48768565,
0.03515962, 0.09882271, 0.0293601 ],
[-0.06118044, -0.96226835, 0.05973658, 0.14391393, 0.21128588,
-0.01782649, 0.03205047, -0.00549223]]))
>>> np.diag(np.linalg.inv(hb))
array([ 0.01991288, 1.0433882 , 0.00516616, 0.02642799, 0.24732871,
0.05281555, 0.02236704, 0.00643486])
>>> np.sqrt(np.diag(np.linalg.inv(hb)))
array([ 0.14111302, 1.02146375, 0.07187597, 0.16256686, 0.49732154,
0.22981633, 0.14955616, 0.08021756])
>>> hess = modp.hessian(resp.params)
>>> np.sqrt(np.diag(np.linalg.inv(hess)))
array([ 231.3823423 , 117.79508218, 31.46595143, 53.44753106,
132.4855704 , NaN, 5.47881705, 90.75332693])
>>> hb=-approx_hess(resp.params, modp.loglike, epsilon=-1e-4)
>>> np.sqrt(np.diag(np.linalg.inv(hb)))
array([ 31.93524822, 22.0333515 , NaN, 29.90198792,
38.82615785, NaN, NaN, NaN])
>>> hb=-approx_hess(resp.params, modp.loglike, epsilon=-1e-8)
>>> np.sqrt(np.diag(np.linalg.inv(hb)))
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
File "C:\Programs\Python25\lib\site-packages\numpy\linalg\linalg.py", line 423, in inv
return wrap(solve(a, identity(a.shape[0], dtype=a.dtype)))
File "C:\Programs\Python25\lib\site-packages\numpy\linalg\linalg.py", line 306, in solve
raise LinAlgError, 'Singular matrix'
numpy.linalg.linalg.LinAlgError: Singular matrix
>>> resp.params
array([ 1.58253308e-01, 1.73188603e-01, 1.77357447e-01,
2.06707494e-02, -1.31174789e-01, 8.79915580e-01,
6.47663840e+03, 6.73457641e+02])
>>>
'''
| bsd-3-clause |
sanja7s/SR_Twitter | src_general/sentiment_neighborhood_threshold.py | 1 | 6934 | #!/usr/bin/env python
# -*- coding: UTF-8 -*-
import matplotlib
import numpy as np
import matplotlib.mlab as mlab
import matplotlib.pyplot as plt
import os
import matplotlib.cm as cm
from collections import defaultdict
from igraph import *
font = {'family' : 'monospace',
'variant' : 'normal',
'weight' : 'light',
'size' : 12}
matplotlib.rc('font', **font)
IN_DIR = "../../../DATA/taxonomy_stats/"
f_sent_in = "sentiment/user_sentiment.tab"
f_weighted_edges_in = "sentiment/mention_graph_weights.dat"
def read_in_recip():
f = open(f_sent_in, "r")
G = Graph.Read_Ncol(f_weighted_edges_in,names=True, directed=True, weights=True)
summary(G)
G.to_undirected(mode="mutual", combine_edges=min)
summary(G)
G.simplify(multiple=False, loops=True)
summary(G)
cnt = 0
for line in f:
(vid, vsent, vsentval) = line[:-1].split('\t')
vsentval = float(vsentval)
v = G.vs.select(name = vid)
v["sent"] = vsentval
cnt += 1
print cnt
to_delete_vertices = [v.index for v in G.vs if v["sent"] == None]
print len(to_delete_vertices)
G.delete_vertices(to_delete_vertices)
summary(G)
return G
#########################################################################
def pairwise_assortativity(G):
sa = []
ne = []
xaxis = []
f = open('sentiment/pairwise_assortativity.tab', 'w')
for threshold in np.arange(1, 100):
s, n = threshold_pairwise_assortativity(G, threshold)
sa.append(s)
ne.append(n)
xaxis.append(threshold)
f.write(str(threshold) + '\t' + str(s) + '\t' + str(n) + '\n')
#plot_SA(xaxis, sa, ne, 'pairwise_assortativity_v4.png')
return xaxis, sa, ne
def threshold_pairwise_assortativity(G, threshold):
print "stats for %d" % threshold
summary(G)
to_delete_edges = [e.index for e in G.es if float(e["weight"]) <= threshold]
G.delete_edges(to_delete_edges)
# just a check
not_connected_nodes = G.vs(_degree_eq=0)
print len(not_connected_nodes)
G.delete_vertices(not_connected_nodes)
summary(G)
#r = G.assortativity(directed=False,types1=G.strength(weights='weight'))
r = G.assortativity("sent", directed=False)
print "Sentmiment value assortativity for threshold %d is %f " % (threshold, r)
N = G.ecount() # - len(not_connected_nodes)
return r, N
def plot_SA(xaxis, sa, ne, img_out_plot):
x = np.array(xaxis)
y = np.array(sa)
y1 = np.log(np.array(ne))
fig, ax1 = plt.subplots()
ax1.plot(x, y, 'cp-')
ax1.set_xlabel('# mention threshold')
# Make the y-axis label and tick labels match the line color.
ax1.set_ylabel('pairwise assortativity', color='c')
for tl in ax1.get_yticklabels():
tl.set_color('c')
ax2 = ax1.twinx()
ax2.plot(x, y1, 'rd-')
ax2.set_ylabel('log(# edges)', color='r')
#ax2.set_yscale("log")
for tl in ax2.get_yticklabels():
tl.set_color('r')
plt.grid(True)
plt.title('Sentiment pairwise assortativity')
#plt.legend(bbox_to_anchor=(0, 1), bbox_transform=plt.gcf().transFigure)
plt.savefig('sentiment/' + img_out_plot,format='png',dpi=200)
#########################################################################
def neighborhood_assortativity(G):
sa = []
ne = []
xaxis = []
f = open('sentiment/neighborhood/neighborhood_assortativity.tab', 'w')
for threshold in np.arange(1, 100):
s, n = threshold_neighborhood_assortativity(G, threshold)
sa.append(s)
ne.append(n)
xaxis.append(threshold)
f.write(str(threshold) + '\t' + str(s) + '\t' + str(n) + '\n')
#plot_SA_neighborhood(xaxis, sa, ne, 'neighborhood_assortativity_v2.png')
return xaxis, sa, ne
def threshold_neighborhood_assortativity(G, threshold):
print "stats for %d" % threshold
summary(G)
to_delete_edges = [e.index for e in G.es if float(e["weight"]) <= threshold]
G.delete_edges(to_delete_edges)
# just a check
not_connected_nodes = G.vs(_degree_eq=0)
print len(not_connected_nodes)
G.delete_vertices(not_connected_nodes)
summary(G)
neighborhood_sent = []
self_sent = []
no_neighbors = []
cnt_no_neighbors = 0
for v in G.vs:
nb = G.neighbors(v.index)
NS = G.vs.select(nb)["sent"]
if NS == []:
cnt_no_neighbors += 1
print v.index, nb
no_neighbors.append(v.index)
continue
ns = np.array(NS)
ns_mean = np.average(ns)
self_sent.append(v["sent"])
neighborhood_sent.append(ns_mean)
print cnt_no_neighbors
neighborhood_sent = np.array(neighborhood_sent)
self_sent = np.array(self_sent)
return np.corrcoef(self_sent, neighborhood_sent)[1,0], neighborhood_sent.size
def plot_SA_neighborhood(xaxis, sa, ne, img_out_plot):
x = np.array(xaxis)
y = np.array(sa)
y1 = np.log(np.array(ne))
fig, ax1 = plt.subplots()
ax1.plot(x, y, 'gp-')
ax1.set_xlabel('# mention threshold')
# Make the y-axis label and tick labels match the line color.
ax1.set_ylabel('neighborhood assortativity', color='g')
for tl in ax1.get_yticklabels():
tl.set_color('g')
ax2 = ax1.twinx()
ax2.plot(x, y1, 'md-')
ax2.set_ylabel('log(# edges)', color='m')
#ax2.set_yscale("log", nonposy='clip')
for tl in ax2.get_yticklabels():
tl.set_color('m')
plt.grid(True)
plt.title('Sentiment neighborhood assortativity')
#plt.legend(bbox_to_anchor=(0, 1), bbox_transform=plt.gcf().transFigure)
plt.savefig('sentiment/neighborhood/' + img_out_plot,format='png',dpi=200)
#########################################################################
def plot_SA_both(xaxis, sa, ne, nsa, nne, img_out_plot):
#from matplotlib.font_manager import FontProperties
#fontP = FontProperties()
#fontP.set_size('small')
#legend([plot1], "title", prop = fontP)
x = np.array(xaxis)
y = np.array(sa)
y1 = np.log(np.array(ne))
yn = np.array(nsa)
yn1 = np.log(np.array(nne))
fig, ax1 = plt.subplots()
ax1.plot(x, y, 'gp-', label='pairwise')
ax1.plot(x, yn, 'gd-', label='neighborhood')
ax1.set_xlabel('# mentions threshold')
# Make the y-axis label and tick labels match the line color.
ax1.set_ylabel('sentiment assortativity', color='g')
for tl in ax1.get_yticklabels():
tl.set_color('g')
plt.legend(loc='best',frameon=False)
ax2 = ax1.twinx()
ax2.plot(x, y1, 'mp-', label='pairwise')
ax2.plot(x, yn1, 'md-', label='neighborhood')
ax2.set_ylabel('log(# edges)', color='m')
#ax2.set_yscale("log", nonposy='clip')
for tl in ax2.get_yticklabels():
tl.set_color('m')
plt.legend(loc='best',frameon=False)
plt.grid(True)
#plt.title('Sentiment parwise and neighborhood assortativity')
#plt.legend(bbox_to_anchor=(0, 1), bbox_transform=plt.gcf().transFigure)
plt.savefig('sentiment/both/' + img_out_plot,format='png',dpi=200)
def main_pairwise():
os.chdir(IN_DIR)
G = read_in_recip()
pairwise_assortativity(G)
#main_pairwise()
def main_neighborhood():
os.chdir(IN_DIR)
G = read_in_recip()
neighborhood_assortativity(G)
#main_neighborhood()
def main():
os.chdir(IN_DIR)
G = read_in_recip()
xaxis, sa, ne = pairwise_assortativity(G)
G = read_in_recip()
nxaxis, nsa, nne = neighborhood_assortativity(G)
plot_SA_both(xaxis, sa, ne, nsa, nne, 'sentimen_assortativity_100.png')
main() | mit |
mattgiguere/scikit-learn | examples/cluster/plot_dict_face_patches.py | 337 | 2747 | """
Online learning of a dictionary of parts of faces
==================================================
This example uses a large dataset of faces to learn a set of 20 x 20
images patches that constitute faces.
From the programming standpoint, it is interesting because it shows how
to use the online API of the scikit-learn to process a very large
dataset by chunks. The way we proceed is that we load an image at a time
and extract randomly 50 patches from this image. Once we have accumulated
500 of these patches (using 10 images), we run the `partial_fit` method
of the online KMeans object, MiniBatchKMeans.
The verbose setting on the MiniBatchKMeans enables us to see that some
clusters are reassigned during the successive calls to
partial-fit. This is because the number of patches that they represent
has become too low, and it is better to choose a random new
cluster.
"""
print(__doc__)
import time
import matplotlib.pyplot as plt
import numpy as np
from sklearn import datasets
from sklearn.cluster import MiniBatchKMeans
from sklearn.feature_extraction.image import extract_patches_2d
faces = datasets.fetch_olivetti_faces()
###############################################################################
# Learn the dictionary of images
print('Learning the dictionary... ')
rng = np.random.RandomState(0)
kmeans = MiniBatchKMeans(n_clusters=81, random_state=rng, verbose=True)
patch_size = (20, 20)
buffer = []
index = 1
t0 = time.time()
# The online learning part: cycle over the whole dataset 6 times
index = 0
for _ in range(6):
for img in faces.images:
data = extract_patches_2d(img, patch_size, max_patches=50,
random_state=rng)
data = np.reshape(data, (len(data), -1))
buffer.append(data)
index += 1
if index % 10 == 0:
data = np.concatenate(buffer, axis=0)
data -= np.mean(data, axis=0)
data /= np.std(data, axis=0)
kmeans.partial_fit(data)
buffer = []
if index % 100 == 0:
print('Partial fit of %4i out of %i'
% (index, 6 * len(faces.images)))
dt = time.time() - t0
print('done in %.2fs.' % dt)
###############################################################################
# Plot the results
plt.figure(figsize=(4.2, 4))
for i, patch in enumerate(kmeans.cluster_centers_):
plt.subplot(9, 9, i + 1)
plt.imshow(patch.reshape(patch_size), cmap=plt.cm.gray,
interpolation='nearest')
plt.xticks(())
plt.yticks(())
plt.suptitle('Patches of faces\nTrain time %.1fs on %d patches' %
(dt, 8 * len(faces.images)), fontsize=16)
plt.subplots_adjust(0.08, 0.02, 0.92, 0.85, 0.08, 0.23)
plt.show()
| bsd-3-clause |
INM-6/hybridLFPy | examples/Hagen_et_al_2016_cercor/figure_09.py | 2 | 8384 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
from figure_10 import fig_exc_inh_contrib
from plot_methods import plotPowers, plot_population, plot_signal_sum_colorplot, plot_signal_sum
from cellsim16popsParams_modified_spontan import multicompartment_params
import analysis_params
import plotting_helpers as phlp
import os
import matplotlib.pyplot as plt
import numpy as np
import matplotlib.style
matplotlib.style.use('classic')
######################################
### OUTSIDE SCOPE DEFINITIONS ###
######################################
######################################
### IMPORT PANELS ###
######################################
######################################
### FIGURE ###
######################################
'''Plot signal (total power) decomposition as function of depth and show single population LFP'''
def fig_lfp_decomposition(
fig, axes, params, transient=200, X=[
'L23E', 'L6E'], show_xlabels=True):
# ana_params.set_PLOS_2column_fig_style(ratio=0.5)
# fig, axes = plt.subplots(1,5)
# fig.subplots_adjust(left=0.06, right=0.96, wspace=0.4, hspace=0.2)
if analysis_params.bw:
# linestyles = ['-', '-', '--', '--', '-.', '-.', ':', ':']
linestyles = ['-', '-', '-', '-', '-', '-', '-', '-']
markerstyles = ['s', 's', 'v', 'v', 'o', 'o', '^', '^']
else:
if plt.matplotlib.__version__ == '1.5.x':
linestyles = ['-', ':'] * (len(params.Y) // 2)
print(('CSD variance semi log plots may fail with matplotlib.__version__ {}'.format(
plt.matplotlib.__version__)))
else:
linestyles = ['-', (0, (1, 1))] * \
(len(params.Y) // 2) # cercor version
# markerstyles = ['s', 's', 'v', 'v', 'o', 'o', '^', '^']
markerstyles = [None] * len(params.Y)
linewidths = [1.25 for i in range(len(linestyles))]
plt.delaxes(axes[0])
# population plot
axes[0] = fig.add_subplot(261)
axes[0].xaxis.set_ticks([])
axes[0].yaxis.set_ticks([])
axes[0].set_frame_on(False)
plot_population(axes[0], params, aspect='tight', isometricangle=np.pi / 32,
plot_somas=False, plot_morphos=True,
num_unitsE=1, num_unitsI=1,
clip_dendrites=False, main_pops=True,
rasterized=False)
phlp.annotate_subplot(axes[0], ncols=5, nrows=1, letter='A')
axes[0].set_aspect('auto')
axes[0].set_ylim(-1550, 50)
axis = axes[0].axis()
phlp.remove_axis_junk(axes[1])
plot_signal_sum(axes[1],
params,
fname=os.path.join(params.populations_path,
X[0] + '_population_RecExtElectrode.h5'),
unit='mV',
T=[800,
1000],
ylim=[axis[2],
axis[3]],
rasterized=False)
# CSD background colorplot
im = plot_signal_sum_colorplot(axes[1], params, os.path.join(params.populations_path, X[0] + '_population_LaminarCurrentSourceDensity.h5'),
unit=r'$\mu$Amm$^{-3}$', T=[800, 1000],
colorbar=False,
ylim=[axis[2], axis[3]], fancy=False,
cmap=plt.get_cmap(
'gray', 21) if analysis_params.bw else plt.get_cmap(
'bwr_r', 21),
rasterized=False,
scaling_factor=1E6 # unit nA um^-3 -> muA mm-3
)
cb = phlp.colorbar(fig, axes[1], im,
width=0.05, height=0.5,
hoffset=-0.05, voffset=0.5)
cb.set_label('($\\mu$Amm$^{-3}$)', labelpad=0.)
axes[1].set_ylim(-1550, 50)
axes[1].set_title('LFP and CSD ({})'.format(X[0]), va='baseline')
phlp.annotate_subplot(axes[1], ncols=3, nrows=1, letter='B')
# quickfix on first axes
axes[0].set_ylim(-1550, 50)
if show_xlabels:
axes[1].set_xlabel(r'$t$ (ms)', labelpad=0.)
else:
axes[1].set_xlabel('')
phlp.remove_axis_junk(axes[2])
plot_signal_sum(axes[2],
params,
fname=os.path.join(params.populations_path,
X[1] + '_population_RecExtElectrode.h5'),
ylabels=False,
unit='mV',
T=[800,
1000],
ylim=[axis[2],
axis[3]],
rasterized=False)
# CSD background colorplot
im = plot_signal_sum_colorplot(axes[2], params,
os.path.join(
params.populations_path,
X[1] + '_population_LaminarCurrentSourceDensity.h5'),
unit=r'$\mu$Amm$^{-3}$', T=[800, 1000], ylabels=False,
colorbar=False,
ylim=[axis[2], axis[3]], fancy=False,
cmap=plt.get_cmap(
'gray',
21) if analysis_params.bw else plt.get_cmap(
'bwr_r',
21),
rasterized=False,
scaling_factor=1E6 # unit nA um^-3 -> muA mm-3
)
cb = phlp.colorbar(fig, axes[2], im,
width=0.05, height=0.5,
hoffset=-0.05, voffset=0.5)
cb.set_label('($\\mu$Amm$^{-3}$)', labelpad=0.)
axes[2].set_ylim(-1550, 50)
axes[2].set_title('LFP and CSD ({})'.format(X[1]), va='baseline')
phlp.annotate_subplot(axes[2], ncols=1, nrows=1, letter='C')
if show_xlabels:
axes[2].set_xlabel(r'$t$ (ms)', labelpad=0.)
else:
axes[2].set_xlabel('')
plotPowers(axes[3], params, params.Y, 'LaminarCurrentSourceDensity',
linestyles=linestyles, transient=transient,
markerstyles=markerstyles, linewidths=linewidths,
scaling_factor=1E6)
axes[3].axis(axes[3].axis('tight'))
axes[3].set_ylim(-1550, 50)
axes[3].set_yticks(-np.arange(16) * 100)
if show_xlabels:
axes[3].set_xlabel(r'$\sigma^2$ ($(\mu$Amm$^{-3})^2$)', va='center')
axes[3].set_title('CSD variance', va='baseline')
axes[3].set_xlim(left=1E-7)
phlp.remove_axis_junk(axes[3])
phlp.annotate_subplot(axes[3], ncols=1, nrows=1, letter='D')
plotPowers(
axes[4],
params,
params.Y,
'RecExtElectrode',
linestyles=linestyles,
transient=transient,
markerstyles=markerstyles,
linewidths=linewidths)
axes[4].axis(axes[4].axis('tight'))
axes[4].set_ylim(-1550, 50)
axes[4].set_yticks(-np.arange(16) * 100)
if show_xlabels:
axes[4].set_xlabel(r'$\sigma^2$ (mV$^2$)', va='center')
axes[4].set_title('LFP variance', va='baseline')
axes[4].legend(bbox_to_anchor=(1.37, 1.0), frameon=False)
axes[4].set_xlim(left=1E-7)
phlp.remove_axis_junk(axes[4])
phlp.annotate_subplot(axes[4], ncols=1, nrows=1, letter='E')
return fig
if __name__ == '__main__':
plt.close('all')
params = multicompartment_params()
ana_params = analysis_params.params()
ana_params.set_PLOS_2column_fig_style(ratio=1)
fig, axes = plt.subplots(2, 5)
fig.subplots_adjust(
left=0.06,
right=0.96,
wspace=0.4,
hspace=0.2,
bottom=0.05,
top=0.95)
# params.figures_path = os.path.join(params.savefolder, 'figures')
# params.populations_path = os.path.join(params.savefolder, 'populations')
# params.spike_output_path = os.path.join(params.savefolder,
# 'processed_nest_output')
# params.networkSimParams['spike_output_path'] = params.spike_output_path
fig_lfp_decomposition(
fig,
axes[0],
params,
transient=200,
show_xlabels=False)
fig_exc_inh_contrib(fig, axes[1], params,
savefolders=['simulation_output_modified_spontan_exc',
'simulation_output_modified_spontan_inh',
'simulation_output_modified_spontan'],
T=[800, 1000], transient=200, panel_labels='FGHIJ')
fig.savefig('figure_09.pdf',
dpi=300, bbox_inches='tight', pad_inches=0)
fig.savefig('figure_09.eps',
bbox_inches='tight', pad_inches=0.01)
plt.show()
| gpl-3.0 |
xR86/ml-stuff | scripts/utils_keras.py | 1 | 2773 | '''Keras utility library
Utilities for common Keras use. Oppinionated file naming.
TODO:
+ use globs to set common naming templates:
+ `m_[model_name]_[graph/weight/plot] - model-specific files (folder structure ?)
+ `results_[plot...]` - general analysis plots
+ `spool_[summaries]` - saved spools (if needed separately from the notebook)
Usual stages:
+ save the model
+ save model summaries
+ save/display model plots
'''
# Assuming multiple models, you would represent them like this:
#
# models = {
# 'autoencoder': autoencoder,
# 'encoder': encoder,
# 'decoder': decoder
# }
def model_save(model_name, model):
'''Save model graph and weights.
# eg: model_save('encoder', encoder)
'''
# from keras.models import Model
# serialize model to JSON
model_json = model.to_json()
with open('%s.json' % model_name, 'w') as f:
f.write(model_json)
# serialize weights to HDF5
model.save_weights('%s_weights.h5' % model_name)
print('Saved %s to disk' % model_name)
def model_save_mult(models_dict):
'''Save multiple model graphs and weights.
# eg: model_save_mult({'encoder': encoder})
'''
for name, model in models_dict.items():
model_save(name, model)
def model_summary_spool_mult(models_dict, save_flag=True):
'''Saves multiple model summaries to spool and returns it.
# eg: summary_spool_mult({'encoder': encoder}, save_flag=False)
'''
# from keras.models import Model
# import numpy as np
spool = str()
for name, model in models_dict.items():
temp_lst = list()
model.summary(print_fn = lambda x: temp_lst.append(x))
spool += '## %s ##\n' % name.title() # or .upper()
spool += '\n'.join(temp_lst) + '\n'
spool += '***' * 20 + '\n\n'
if save_flag:
np.savetxt('spool_summaries.txt', [spool], fmt='%s')
return spool
# single plot_model use
# from keras.utils import plot_model
# plot_model(model, to_file="model.png")
def model_plot_save_mult(models_dict):
'''Saves multiple models plot
# eg: plot_model_mult({'model_name': model})
'''
# graphviz and pydot needed for plot_model
from keras.utils import plot_model
# single plot_model use
# plot_model(model, to_file="model.png")
for name, model in models_dict.items():
plot_model(model, to_file='%s_plot.png' % name)
def model_display(model_name, figsize=(5,5)):
'''Displays model plot (if it has been saved)
# eg: model_display('encoder')
'''
# import matplotlib.pyplot as plt
# %matplotlib inline
import matplotlib.image as mpimg
from pathlib import Path
model_path = '%s_plot.png' % model_name
if Path(model_path).is_file():
# plt.rcParams.update({'figure.figsize': (30,30)})
plt.figure(figsize=figsize)
img = mpimg.imread(model_path)
plt.imshow(img)
plt.axis('off')
plt.show()
if __name__ == '__main__':
pass
| mit |
idlead/scikit-learn | examples/cluster/plot_birch_vs_minibatchkmeans.py | 333 | 3694 | """
=================================
Compare BIRCH and MiniBatchKMeans
=================================
This example compares the timing of Birch (with and without the global
clustering step) and MiniBatchKMeans on a synthetic dataset having
100,000 samples and 2 features generated using make_blobs.
If ``n_clusters`` is set to None, the data is reduced from 100,000
samples to a set of 158 clusters. This can be viewed as a preprocessing
step before the final (global) clustering step that further reduces these
158 clusters to 100 clusters.
"""
# Authors: Manoj Kumar <[email protected]
# Alexandre Gramfort <[email protected]>
# License: BSD 3 clause
print(__doc__)
from itertools import cycle
from time import time
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.colors as colors
from sklearn.preprocessing import StandardScaler
from sklearn.cluster import Birch, MiniBatchKMeans
from sklearn.datasets.samples_generator import make_blobs
# Generate centers for the blobs so that it forms a 10 X 10 grid.
xx = np.linspace(-22, 22, 10)
yy = np.linspace(-22, 22, 10)
xx, yy = np.meshgrid(xx, yy)
n_centres = np.hstack((np.ravel(xx)[:, np.newaxis],
np.ravel(yy)[:, np.newaxis]))
# Generate blobs to do a comparison between MiniBatchKMeans and Birch.
X, y = make_blobs(n_samples=100000, centers=n_centres, random_state=0)
# Use all colors that matplotlib provides by default.
colors_ = cycle(colors.cnames.keys())
fig = plt.figure(figsize=(12, 4))
fig.subplots_adjust(left=0.04, right=0.98, bottom=0.1, top=0.9)
# Compute clustering with Birch with and without the final clustering step
# and plot.
birch_models = [Birch(threshold=1.7, n_clusters=None),
Birch(threshold=1.7, n_clusters=100)]
final_step = ['without global clustering', 'with global clustering']
for ind, (birch_model, info) in enumerate(zip(birch_models, final_step)):
t = time()
birch_model.fit(X)
time_ = time() - t
print("Birch %s as the final step took %0.2f seconds" % (
info, (time() - t)))
# Plot result
labels = birch_model.labels_
centroids = birch_model.subcluster_centers_
n_clusters = np.unique(labels).size
print("n_clusters : %d" % n_clusters)
ax = fig.add_subplot(1, 3, ind + 1)
for this_centroid, k, col in zip(centroids, range(n_clusters), colors_):
mask = labels == k
ax.plot(X[mask, 0], X[mask, 1], 'w',
markerfacecolor=col, marker='.')
if birch_model.n_clusters is None:
ax.plot(this_centroid[0], this_centroid[1], '+', markerfacecolor=col,
markeredgecolor='k', markersize=5)
ax.set_ylim([-25, 25])
ax.set_xlim([-25, 25])
ax.set_autoscaley_on(False)
ax.set_title('Birch %s' % info)
# Compute clustering with MiniBatchKMeans.
mbk = MiniBatchKMeans(init='k-means++', n_clusters=100, batch_size=100,
n_init=10, max_no_improvement=10, verbose=0,
random_state=0)
t0 = time()
mbk.fit(X)
t_mini_batch = time() - t0
print("Time taken to run MiniBatchKMeans %0.2f seconds" % t_mini_batch)
mbk_means_labels_unique = np.unique(mbk.labels_)
ax = fig.add_subplot(1, 3, 3)
for this_centroid, k, col in zip(mbk.cluster_centers_,
range(n_clusters), colors_):
mask = mbk.labels_ == k
ax.plot(X[mask, 0], X[mask, 1], 'w', markerfacecolor=col, marker='.')
ax.plot(this_centroid[0], this_centroid[1], '+', markeredgecolor='k',
markersize=5)
ax.set_xlim([-25, 25])
ax.set_ylim([-25, 25])
ax.set_title("MiniBatchKMeans")
ax.set_autoscaley_on(False)
plt.show()
| bsd-3-clause |
hainm/statsmodels | statsmodels/regression/mixed_linear_model.py | 19 | 91253 | """
Linear mixed effects models are regression models for dependent data.
They can be used to estimate regression relationships involving both
means and variances.
These models are also known as multilevel linear models, and
hierachical linear models.
The MixedLM class fits linear mixed effects models to data, and
provides support for some common post-estimation tasks. This is a
group-based implementation that is most efficient for models in which
the data can be partitioned into independent groups. Some models with
crossed effects can be handled by specifying a model with a single
group.
The data are partitioned into disjoint groups. The probability model
for group i is:
Y = X*beta + Z*gamma + epsilon
where
* n_i is the number of observations in group i
* Y is a n_i dimensional response vector (called endog in MixedLM)
* X is a n_i x k_fe dimensional design matrix for the fixed effects
(called exog in MixedLM)
* beta is a k_fe-dimensional vector of fixed effects parameters
(called fe_params in MixedLM)
* Z is a design matrix for the random effects with n_i rows (called
exog_re in MixedLM). The number of columns in Z can vary by group
as discussed below.
* gamma is a random vector with mean 0. The covariance matrix for the
first `k_re` elements of `gamma` (called cov_re in MixedLM) is
common to all groups. The remaining elements of `gamma` are
variance components as discussed in more detail below. Each group
receives its own independent realization of gamma.
* epsilon is a n_i dimensional vector of iid normal
errors with mean 0 and variance sigma^2; the epsilon
values are independent both within and between groups
Y, X and Z must be entirely observed. beta, Psi, and sigma^2 are
estimated using ML or REML estimation, and gamma and epsilon are
random so define the probability model.
The marginal mean structure is E[Y | X, Z] = X*beta. If only the mean
structure is of interest, GEE is an alternative to using linear mixed
models.
Two types of random effects are supported. Standard random effects
are correlated with each other in arbitary ways. Every group has the
same number (`k_re`) of standard random effects, with the same joint
distribution (but with independent realizations across the groups).
Variance components are uncorrelated with each other, and with the
standard random effects. Each variance component has mean zero, and
all realizations of a given variance component have the same variance
parameter. The number of realized variance components per variance
parameter can differ across the groups.
The primary reference for the implementation details is:
MJ Lindstrom, DM Bates (1988). "Newton Raphson and EM algorithms for
linear mixed effects models for repeated measures data". Journal of
the American Statistical Association. Volume 83, Issue 404, pages
1014-1022.
See also this more recent document:
http://econ.ucsb.edu/~doug/245a/Papers/Mixed%20Effects%20Implement.pdf
All the likelihood, gradient, and Hessian calculations closely follow
Lindstrom and Bates 1988, adapted to support variance components.
The following two documents are written more from the perspective of
users:
http://lme4.r-forge.r-project.org/lMMwR/lrgprt.pdf
http://lme4.r-forge.r-project.org/slides/2009-07-07-Rennes/3Longitudinal-4.pdf
Notation:
* `cov_re` is the random effects covariance matrix (referred to above
as Psi) and `scale` is the (scalar) error variance. For a single
group, the marginal covariance matrix of endog given exog is scale*I
+ Z * cov_re * Z', where Z is the design matrix for the random
effects in one group.
* `vcomp` is a vector of variance parameters. The length of `vcomp`
is determined by the number of keys in either the `exog_vc` argument
to ``MixedLM``, or the `vc_formula` argument when using formulas to
fit a model.
Notes:
1. Three different parameterizations are used in different places.
The regression slopes (usually called `fe_params`) are identical in
all three parameterizations, but the variance parameters differ. The
parameterizations are:
* The "user parameterization" in which cov(endog) = scale*I + Z *
cov_re * Z', as described above. This is the main parameterization
visible to the user.
* The "profile parameterization" in which cov(endog) = I +
Z * cov_re1 * Z'. This is the parameterization of the profile
likelihood that is maximized to produce parameter estimates.
(see Lindstrom and Bates for details). The "user" cov_re is
equal to the "profile" cov_re1 times the scale.
* The "square root parameterization" in which we work with the Cholesky
factor of cov_re1 instead of cov_re directly. This is hidden from the
user.
All three parameterizations can be packed into a vector by
(optionally) concatenating `fe_params` together with the lower
triangle or Cholesky square root of the dependence structure, followed
by the variance parameters for the variance components. The are
stored as square roots if (and only if) the random effects covariance
matrix is stored as its Choleky factor. Note that when unpacking, it
is important to either square or reflect the dependence structure
depending on which parameterization is being used.
Two score methods are implemented. One takes the score with respect
to the elements of the random effects covariance matrix (used for
inference once the MLE is reached), and the other takes the score with
respect to the parameters of the Choleky square root of the random
effects covariance matrix (used for optimization).
The numerical optimization uses GLS to avoid explicitly optimizing
over the fixed effects parameters. The likelihood that is optimized
is profiled over both the scale parameter (a scalar) and the fixed
effects parameters (if any). As a result of this profiling, it is
difficult and unnecessary to calculate the Hessian of the profiled log
likelihood function, so that calculation is not implemented here.
Therefore, optimization methods requiring the Hessian matrix such as
the Newton-Raphson algorihm cannot be used for model fitting.
"""
import numpy as np
import statsmodels.base.model as base
from scipy.optimize import fmin_ncg, fmin_cg, fmin_bfgs, fmin
from statsmodels.tools.decorators import cache_readonly
from statsmodels.tools import data as data_tools
from scipy.stats.distributions import norm
from scipy import sparse
import pandas as pd
import patsy
from statsmodels.compat.collections import OrderedDict
from statsmodels.compat import range
import warnings
from statsmodels.tools.sm_exceptions import ConvergenceWarning
from statsmodels.base._penalties import Penalty
from statsmodels.compat.numpy import np_matrix_rank
from pandas import DataFrame
def _dot(x, y):
"""
Returns the dot product of the arrays, works for sparse and dense.
"""
if isinstance(x, np.ndarray) and isinstance(y, np.ndarray):
return np.dot(x, y)
elif sparse.issparse(x):
return x.dot(y)
elif sparse.issparse(y):
return y.T.dot(x.T).T
# From numpy, adapted to work with sparse and dense arrays.
def _multi_dot_three(A, B, C):
"""
Find best ordering for three arrays and do the multiplication.
Doing in manually instead of using dynamic programing is
approximately 15 times faster.
"""
# cost1 = cost((AB)C)
cost1 = (A.shape[0] * A.shape[1] * B.shape[1] + # (AB)
A.shape[0] * B.shape[1] * C.shape[1]) # (--)C
# cost2 = cost((AB)C)
cost2 = (B.shape[0] * B.shape[1] * C.shape[1] + # (BC)
A.shape[0] * A.shape[1] * C.shape[1]) # A(--)
if cost1 < cost2:
return _dot(_dot(A, B), C)
else:
return _dot(A, _dot(B, C))
def _dotsum(x, y):
"""
Returns sum(x * y), where '*' is the pointwise product, computed
efficiently for dense and sparse matrices.
"""
if sparse.issparse(x):
return x.multiply(y).sum()
else:
# This way usually avoids allocating a temporary.
return np.dot(x.ravel(), y.ravel())
def _get_exog_re_names(self, exog_re):
"""
Passes through if given a list of names. Otherwise, gets pandas names
or creates some generic variable names as needed.
"""
if self.k_re == 0:
return []
if isinstance(exog_re, pd.DataFrame):
return exog_re.columns.tolist()
elif isinstance(exog_re, pd.Series) and exog_re.name is not None:
return [exog_re.name]
elif isinstance(exog_re, list):
return exog_re
return ["Z{0}".format(k + 1) for k in range(exog_re.shape[1])]
class MixedLMParams(object):
"""
This class represents a parameter state for a mixed linear model.
Parameters
----------
k_fe : integer
The number of covariates with fixed effects.
k_re : integer
The number of covariates with random coefficients (excluding
variance components).
k_vc : integer
The number of variance components parameters.
Notes
-----
This object represents the parameter state for the model in which
the scale parameter has been profiled out.
"""
def __init__(self, k_fe, k_re, k_vc):
self.k_fe = k_fe
self.k_re = k_re
self.k_re2 = k_re * (k_re + 1) // 2
self.k_vc = k_vc
self.k_tot = self.k_fe + self.k_re2 + self.k_vc
self._ix = np.tril_indices(self.k_re)
def from_packed(params, k_fe, k_re, use_sqrt, has_fe):
"""
Create a MixedLMParams object from packed parameter vector.
Parameters
----------
params : array-like
The mode parameters packed into a single vector.
k_fe : integer
The number of covariates with fixed effects
k_re : integer
The number of covariates with random effects (excluding
variance components).
use_sqrt : boolean
If True, the random effects covariance matrix is provided
as its Cholesky factor, otherwise the lower triangle of
the covariance matrix is stored.
has_fe : boolean
If True, `params` contains fixed effects parameters.
Otherwise, the fixed effects parameters are set to zero.
Returns
-------
A MixedLMParams object.
"""
k_re2 = int(k_re * (k_re + 1) / 2)
# The number of covariance parameters.
if has_fe:
k_vc = len(params) - k_fe - k_re2
else:
k_vc = len(params) - k_re2
pa = MixedLMParams(k_fe, k_re, k_vc)
cov_re = np.zeros((k_re, k_re))
ix = pa._ix
if has_fe:
pa.fe_params = params[0:k_fe]
cov_re[ix] = params[k_fe:k_fe+k_re2]
else:
pa.fe_params = np.zeros(k_fe)
cov_re[ix] = params[0:k_re2]
if use_sqrt:
cov_re = np.dot(cov_re, cov_re.T)
else:
cov_re = (cov_re + cov_re.T) - np.diag(np.diag(cov_re))
pa.cov_re = cov_re
if k_vc > 0:
if use_sqrt:
pa.vcomp = params[-k_vc:]**2
else:
pa.vcomp = params[-k_vc:]
else:
pa.vcomp = np.array([])
return pa
from_packed = staticmethod(from_packed)
def from_components(fe_params=None, cov_re=None, cov_re_sqrt=None, vcomp=None):
"""
Create a MixedLMParams object from each parameter component.
Parameters
----------
fe_params : array-like
The fixed effects parameter (a 1-dimensional array). If
None, there are no fixed effects.
cov_re : array-like
The random effects covariance matrix (a square, symmetric
2-dimensional array).
cov_re_sqrt : array-like
The Cholesky (lower triangular) square root of the random
effects covariance matrix.
vcomp : array-like
The variance component parameters. If None, there are no
variance components.
Returns
-------
A MixedLMParams object.
"""
if vcomp is None:
vcomp = np.empty(0)
if fe_params is None:
fe_params = np.empty(0)
if cov_re is None and cov_re_sqrt is None:
cov_re = np.empty((0, 0))
k_fe = len(fe_params)
k_vc = len(vcomp)
k_re = cov_re.shape[0] if cov_re is not None else cov_re_sqrt.shape[0]
pa = MixedLMParams(k_fe, k_re, k_vc)
pa.fe_params = fe_params
if cov_re_sqrt is not None:
pa.cov_re = np.dot(cov_re_sqrt, cov_re_sqrt.T)
elif cov_re is not None:
pa.cov_re = cov_re
pa.vcomp = vcomp
return pa
from_components = staticmethod(from_components)
def copy(self):
"""
Returns a copy of the object.
"""
obj = MixedLMParams(self.k_fe, self.k_re, self.k_vc)
obj.fe_params = self.fe_params.copy()
obj.cov_re = self.cov_re.copy()
obj.vcomp = self.vcomp.copy()
return obj
def get_packed(self, use_sqrt, has_fe=False):
"""
Return the model parameters packed into a single vector.
Parameters
----------
use_sqrt : bool
If True, the Cholesky square root of `cov_re` is
included in the packed result. Otherwise the
lower triangle of `cov_re` is included.
has_fe : bool
If True, the fixed effects parameters are included
in the packed result, otherwise they are omitted.
"""
if self.k_re > 0:
if use_sqrt:
L = np.linalg.cholesky(self.cov_re)
cpa = L[self._ix]
else:
cpa = self.cov_re[self._ix]
else:
cpa = np.zeros(0)
if use_sqrt:
vcomp = np.sqrt(self.vcomp)
else:
vcomp = self.vcomp
if has_fe:
pa = np.concatenate((self.fe_params, cpa, vcomp))
else:
pa = np.concatenate((cpa, vcomp))
return pa
def _smw_solver(s, A, AtA, BI, di):
"""
Solves the system (s*I + A*B*A') * x = rhs for an arbitrary rhs.
The inverse matrix of B is block diagonal. The upper left block
is BI and the lower right block is a diagonal matrix containing
di.
Parameters
----------
s : scalar
See above for usage
A : ndarray
See above for usage
AtA : square ndarray
A.T * A
BI : square symmetric ndarray
The inverse of `B`.
di : array-like
Returns
-------
A function that takes `rhs` as an input argument and returns a
solution to the linear system defined above.
"""
# Use SMW identity
qmat = AtA / s
m = BI.shape[0]
qmat[0:m, 0:m] += BI
ix = np.arange(m, A.shape[1])
qmat[ix, ix] += di
if sparse.issparse(A):
qi = sparse.linalg.inv(qmat)
qmati = A.dot(qi.T).T
else:
qmati = np.linalg.solve(qmat, A.T)
def solver(rhs):
if sparse.issparse(A):
ql = qmati.dot(rhs)
ql = A.dot(ql)
else:
ql = np.dot(qmati, rhs)
ql = np.dot(A, ql)
rslt = rhs / s - ql / s**2
if sparse.issparse(rslt):
rslt = np.asarray(rslt.todense())
return rslt
return solver
def _smw_logdet(s, A, AtA, BI, di, B_logdet):
"""
Returns the log determinant of s*I + A*B*A'.
Uses the matrix determinant lemma to accelerate the calculation.
Parameters
----------
s : scalar
See above for usage
A : square symmetric ndarray
See above for usage
AtA : square matrix
A.T * A
BI : square symmetric ndarray
The upper left block of B^-1.
di : array-like
The diagonal elements of the lower right block of B^-1.
B_logdet : real
The log determinant of B
Returns
-------
The log determinant of s*I + A*B*A'.
"""
p = A.shape[0]
ld = p * np.log(s)
qmat = AtA / s
m = BI.shape[0]
qmat[0:m, 0:m] += BI
ix = np.arange(m, A.shape[1])
qmat[ix, ix] += di
if sparse.issparse(qmat):
qmat = qmat.todense()
_, ld1 = np.linalg.slogdet(qmat)
return B_logdet + ld + ld1
class MixedLM(base.LikelihoodModel):
"""
An object specifying a linear mixed effects model. Use the `fit`
method to fit the model and obtain a results object.
Parameters
----------
endog : 1d array-like
The dependent variable
exog : 2d array-like
A matrix of covariates used to determine the
mean structure (the "fixed effects" covariates).
groups : 1d array-like
A vector of labels determining the groups -- data from
different groups are independent
exog_re : 2d array-like
A matrix of covariates used to determine the variance and
covariance structure (the "random effects" covariates). If
None, defaults to a random intercept for each group.
exog_vc : dict-like
A dicationary containing specifications of the variance
component terms. See below for details.
use_sqrt : bool
If True, optimization is carried out using the lower
triangle of the square root of the random effects
covariance matrix, otherwise it is carried out using the
lower triangle of the random effects covariance matrix.
missing : string
The approach to missing data handling
Notes
-----
`exog_vc` is a dictionary of dictionaries. Specifically,
`exog_vc[a][g]` is a matrix whose columns are linearly combined
using independent random coefficients. This random term then
contributes to the variance structure of the data for group `g`.
The random coefficients all have mean zero, and have the same
variance. The matrix must be `m x k`, where `m` is the number of
observations in group `g`. The number of columns may differ among
the top-level groups.
The covariates in `exog`, `exog_re` and `exog_vc` may (but need
not) partially or wholly overlap.
`use_sqrt` should almost always be set to True. The main use case
for use_sqrt=False is when complicated patterns of fixed values in
the covariance structure are set (using the `free` argument to
`fit`) that cannot be expressed in terms of the Cholesky factor L.
Examples
--------
A basic mixed model with fixed effects for the columns of
``exog`` and a random intercept for each distinct value of
``group``:
>>> model = sm.MixedLM(endog, exog, groups)
>>> result = model.fit()
A mixed model with fixed effects for the columns of ``exog`` and
correlated random coefficients for the columns of ``exog_re``:
>>> model = sm.MixedLM(endog, exog, groups, exog_re=exog_re)
>>> result = model.fit()
A mixed model with fixed effects for the columns of ``exog`` and
independent random coefficients for the columns of ``exog_re``:
>>> free = MixedLMParams.from_components(fe_params=np.ones(exog.shape[1]),
cov_re=np.eye(exog_re.shape[1]))
>>> model = sm.MixedLM(endog, exog, groups, exog_re=exog_re)
>>> result = model.fit(free=free)
A different way to specify independent random coefficients for the
columns of ``exog_re``. In this example ``groups`` must be a
Pandas Series with compatible indexing with ``exog_re``, and
``exog_re`` has two columns.
>>> g = pd.groupby(groups, by=groups).groups
>>> vc = {}
>>> vc['1'] = {k : exog_re.loc[g[k], 0] for k in g}
>>> vc['2'] = {k : exog_re.loc[g[k], 1] for k in g}
>>> model = sm.MixedLM(endog, exog, groups, vcomp=vc)
>>> result = model.fit()
"""
def __init__(self, endog, exog, groups, exog_re=None,
exog_vc=None, use_sqrt=True, missing='none',
**kwargs):
_allowed_kwargs = ["missing_idx", "design_info", "formula"]
for x in kwargs.keys():
if x not in _allowed_kwargs:
raise ValueError("argument %s not permitted for MixedLM initialization" % x)
self.use_sqrt = use_sqrt
# Some defaults
self.reml = True
self.fe_pen = None
self.re_pen = None
# Needs to run early so that the names are sorted.
self._setup_vcomp(exog_vc)
# If there is one covariate, it may be passed in as a column
# vector, convert these to 2d arrays.
# TODO: Can this be moved up in the class hierarchy?
# yes, it should be done up the hierarchy
if (exog is not None and
data_tools._is_using_ndarray_type(exog, None) and
exog.ndim == 1):
exog = exog[:, None]
if (exog_re is not None and
data_tools._is_using_ndarray_type(exog_re, None) and
exog_re.ndim == 1):
exog_re = exog_re[:, None]
# Calling super creates self.endog, etc. as ndarrays and the
# original exog, endog, etc. are self.data.endog, etc.
super(MixedLM, self).__init__(endog, exog, groups=groups,
exog_re=exog_re, missing=missing,
**kwargs)
self._init_keys.extend(["use_sqrt", "exog_vc"])
self.k_fe = exog.shape[1] # Number of fixed effects parameters
if exog_re is None and exog_vc is None:
# Default random effects structure (random intercepts).
self.k_re = 1
self.k_re2 = 1
self.exog_re = np.ones((len(endog), 1), dtype=np.float64)
self.data.exog_re = self.exog_re
self.data.param_names = self.exog_names + ['Group RE']
elif exog_re is not None:
# Process exog_re the same way that exog is handled
# upstream
# TODO: this is wrong and should be handled upstream wholly
self.data.exog_re = exog_re
self.exog_re = np.asarray(exog_re)
if self.exog_re.ndim == 1:
self.exog_re = self.exog_re[:, None]
# Model dimensions
# Number of random effect covariates
self.k_re = self.exog_re.shape[1]
# Number of covariance parameters
self.k_re2 = self.k_re * (self.k_re + 1) // 2
else:
# All random effects are variance components
self.k_re = 0
self.k_re2 = 0
if not self.data._param_names:
# HACK: could've been set in from_formula already
# needs refactor
(param_names, exog_re_names,
exog_re_names_full) = self._make_param_names(exog_re)
self.data.param_names = param_names
self.data.exog_re_names = exog_re_names
self.data.exog_re_names_full = exog_re_names_full
self.k_params = self.k_fe + self.k_re2
# Convert the data to the internal representation, which is a
# list of arrays, corresponding to the groups.
group_labels = list(set(groups))
group_labels.sort()
row_indices = dict((s, []) for s in group_labels)
for i,g in enumerate(groups):
row_indices[g].append(i)
self.row_indices = row_indices
self.group_labels = group_labels
self.n_groups = len(self.group_labels)
# Split the data by groups
self.endog_li = self.group_list(self.endog)
self.exog_li = self.group_list(self.exog)
self.exog_re_li = self.group_list(self.exog_re)
# Precompute this.
if self.exog_re is None:
self.exog_re2_li = None
else:
self.exog_re2_li = [np.dot(x.T, x) for x in self.exog_re_li]
# The total number of observations, summed over all groups
self.nobs = len(self.endog)
self.n_totobs = self.nobs
# Set the fixed effects parameter names
if self.exog_names is None:
self.exog_names = ["FE%d" % (k + 1) for k in
range(self.exog.shape[1])]
# Precompute this
self._aex_r = []
self._aex_r2 = []
for i in range(self.n_groups):
a = self._augment_exog(i)
self._aex_r.append(a)
self._aex_r2.append(_dot(a.T, a))
# Precompute this
self._lin, self._quad = self._reparam()
def _setup_vcomp(self, exog_vc):
if exog_vc is None:
exog_vc = {}
self.exog_vc = exog_vc
self.k_vc = len(exog_vc)
vc_names = list(set(exog_vc.keys()))
vc_names.sort()
self._vc_names = vc_names
def _make_param_names(self, exog_re):
"""
Returns the full parameter names list, just the exogenous random
effects variables, and the exogenous random effects variables with
the interaction terms.
"""
exog_names = list(self.exog_names)
exog_re_names = _get_exog_re_names(self, exog_re)
param_names = []
jj = self.k_fe
for i in range(len(exog_re_names)):
for j in range(i + 1):
if i == j:
param_names.append(exog_re_names[i] + " RE")
else:
param_names.append(exog_re_names[j] + " RE x " +
exog_re_names[i] + " RE")
jj += 1
vc_names = [x + " RE" for x in self._vc_names]
return exog_names + param_names + vc_names, exog_re_names, param_names
@classmethod
def from_formula(cls, formula, data, re_formula=None, vc_formula=None,
subset=None, use_sparse=False, *args, **kwargs):
"""
Create a Model from a formula and dataframe.
Parameters
----------
formula : str or generic Formula object
The formula specifying the model
data : array-like
The data for the model. See Notes.
re_formula : string
A one-sided formula defining the variance structure of the
model. The default gives a random intercept for each
group.
vc_formula : dict-like
Formulas describing variance components. `vc_formula[vc]` is
the formula for the component with variance parameter named
`vc`. The formula is processed into a matrix, and the columns
of this matrix are linearly combined with independent random
coefficients having mean zero and a common variance.
subset : array-like
An array-like object of booleans, integers, or index
values that indicate the subset of df to use in the
model. Assumes df is a `pandas.DataFrame`
args : extra arguments
These are passed to the model
kwargs : extra keyword arguments
These are passed to the model with one exception. The
``eval_env`` keyword is passed to patsy. It can be either a
:class:`patsy:patsy.EvalEnvironment` object or an integer
indicating the depth of the namespace to use. For example, the
default ``eval_env=0`` uses the calling namespace. If you wish
to use a "clean" environment set ``eval_env=-1``.
Returns
-------
model : Model instance
Notes
------
`data` must define __getitem__ with the keys in the formula
terms args and kwargs are passed on to the model
instantiation. E.g., a numpy structured or rec array, a
dictionary, or a pandas DataFrame.
If the variance component is intended to produce random
intercepts for disjoint subsets of a group, specified by
string labels or a categorical data value, always use '0 +' in
the formula so that no overall intercept is included.
If the variance components specify random slopes and you do
not also want a random group-level intercept in the model,
then use '0 +' in the formula to exclude the intercept.
The variance components formulas are processed separately for
each group. If a variable is categorical the results will not
be affected by whether the group labels are distinct or
re-used over the top-level groups.
This method currently does not correctly handle missing
values, so missing values should be explicitly dropped from
the DataFrame before calling this method.
Examples
--------
Suppose we have an educational data set with students nested
in classrooms nested in schools. The students take a test,
and we want to relate the test scores to the students' ages,
while accounting for the effects of classrooms and schools.
The school will be the top-level group, and the classroom is a
nested group that is specified as a variance component. Note
that the schools may have different number of classrooms, and
the classroom labels may (but need not be) different across
the schools.
>>> vc = {'classroom': '0 + C(classroom)'}
>>> MixedLM.from_formula('test_score ~ age', vc_formula=vc,
re_formula='1', groups='school', data=data)
Now suppose we also have a previous test score called
'pretest'. If we want the relationship between pretest
scores and the current test to vary by classroom, we can
specify a random slope for the pretest score
>>> vc = {'classroom': '0 + C(classroom)', 'pretest': '0 + pretest'}
>>> MixedLM.from_formula('test_score ~ age + pretest', vc_formula=vc,
re_formula='1', groups='school', data=data)
The following model is almost equivalent to the previous one,
but here the classroom random intercept and pretest slope may
be correlated.
>>> vc = {'classroom': '0 + C(classroom)'}
>>> MixedLM.from_formula('test_score ~ age + pretest', vc_formula=vc,
re_formula='1 + pretest', groups='school',
data=data)
"""
if "groups" not in kwargs.keys():
raise AttributeError("'groups' is a required keyword argument in MixedLM.from_formula")
# If `groups` is a variable name, retrieve the data for the
# groups variable.
group_name = "Group"
if type(kwargs["groups"]) == str:
group_name = kwargs["groups"]
kwargs["groups"] = np.asarray(data[kwargs["groups"]])
if re_formula is not None:
if re_formula.strip() == "1":
# Work around Patsy bug, fixed by 0.3.
exog_re = np.ones((data.shape[0], 1))
exog_re_names = ["Group"]
else:
eval_env = kwargs.get('eval_env', None)
if eval_env is None:
eval_env = 1
elif eval_env == -1:
from patsy import EvalEnvironment
eval_env = EvalEnvironment({})
exog_re = patsy.dmatrix(re_formula, data, eval_env=eval_env)
exog_re_names = exog_re.design_info.column_names
exog_re = np.asarray(exog_re)
if exog_re.ndim == 1:
exog_re = exog_re[:, None]
else:
exog_re = None
if vc_formula is None:
exog_re_names = ["groups"]
else:
exog_re_names = []
if vc_formula is not None:
eval_env = kwargs.get('eval_env', None)
if eval_env is None:
eval_env = 1
elif eval_env == -1:
from patsy import EvalEnvironment
eval_env = EvalEnvironment({})
exog_vc = {}
data["_group"] = kwargs["groups"]
gb = data.groupby("_group")
kylist = list(gb.groups.keys())
kylist.sort()
for vc_name in vc_formula.keys():
exog_vc[vc_name] = {}
for group_ix, group in enumerate(kylist):
ii = gb.groups[group]
vcg = vc_formula[vc_name]
mat = patsy.dmatrix(vcg, data.loc[ii, :], eval_env=eval_env,
return_type='dataframe')
if use_sparse:
exog_vc[vc_name][group] = sparse.csr_matrix(mat)
else:
exog_vc[vc_name][group] = np.asarray(mat)
exog_vc = exog_vc
else:
exog_vc = None
mod = super(MixedLM, cls).from_formula(formula, data,
subset=None,
exog_re=exog_re,
exog_vc=exog_vc,
*args, **kwargs)
# expand re names to account for pairs of RE
(param_names,
exog_re_names,
exog_re_names_full) = mod._make_param_names(exog_re_names)
mod.data.param_names = param_names
mod.data.exog_re_names = exog_re_names
mod.data.exog_re_names_full = exog_re_names_full
mod.data.vcomp_names = mod._vc_names
return mod
def predict(self, params, exog=None):
"""
Return predicted values from a design matrix.
Parameters
----------
params : array-like
Parameters of a mixed linear model. Can be either a
MixedLMParams instance, or a vector containing the packed
model parameters in which the fixed effects parameters are
at the beginning of the vector, or a vector containing
only the fixed effects parameters.
exog : array-like, optional
Design / exogenous data for the fixed effects. Model exog
is used if None.
Returns
-------
An array of fitted values. Note that these predicted values
only reflect the fixed effects mean structure of the model.
"""
if exog is None:
exog = self.exog
if isinstance(params, MixedLMParams):
params = params.fe_params
else:
params = params[0:self.k_fe]
return np.dot(exog, params)
def group_list(self, array):
"""
Returns `array` split into subarrays corresponding to the
grouping structure.
"""
if array is None:
return None
if array.ndim == 1:
return [np.array(array[self.row_indices[k]])
for k in self.group_labels]
else:
return [np.array(array[self.row_indices[k], :])
for k in self.group_labels]
def fit_regularized(self, start_params=None, method='l1', alpha=0,
ceps=1e-4, ptol=1e-6, maxit=200, **fit_kwargs):
"""
Fit a model in which the fixed effects parameters are
penalized. The dependence parameters are held fixed at their
estimated values in the unpenalized model.
Parameters
----------
method : string of Penalty object
Method for regularization. If a string, must be 'l1'.
alpha : array-like
Scalar or vector of penalty weights. If a scalar, the
same weight is applied to all coefficients; if a vector,
it contains a weight for each coefficient. If method is a
Penalty object, the weights are scaled by alpha. For L1
regularization, the weights are used directly.
ceps : positive real scalar
Fixed effects parameters smaller than this value
in magnitude are treaded as being zero.
ptol : positive real scalar
Convergence occurs when the sup norm difference
between successive values of `fe_params` is less than
`ptol`.
maxit : integer
The maximum number of iterations.
fit_kwargs : keywords
Additional keyword arguments passed to fit.
Returns
-------
A MixedLMResults instance containing the results.
Notes
-----
The covariance structure is not updated as the fixed effects
parameters are varied.
The algorithm used here for L1 regularization is a"shooting"
or cyclic coordinate descent algorithm.
If method is 'l1', then `fe_pen` and `cov_pen` are used to
obtain the covariance structure, but are ignored during the
L1-penalized fitting.
References
----------
Friedman, J. H., Hastie, T. and Tibshirani, R. Regularized
Paths for Generalized Linear Models via Coordinate
Descent. Journal of Statistical Software, 33(1) (2008)
http://www.jstatsoft.org/v33/i01/paper
http://statweb.stanford.edu/~tibs/stat315a/Supplements/fuse.pdf
"""
if type(method) == str and (method.lower() != 'l1'):
raise ValueError("Invalid regularization method")
# If method is a smooth penalty just optimize directly.
if isinstance(method, Penalty):
# Scale the penalty weights by alpha
method.alpha = alpha
fit_kwargs.update({"fe_pen": method})
return self.fit(**fit_kwargs)
if np.isscalar(alpha):
alpha = alpha * np.ones(self.k_fe, dtype=np.float64)
# Fit the unpenalized model to get the dependence structure.
mdf = self.fit(**fit_kwargs)
fe_params = mdf.fe_params
cov_re = mdf.cov_re
vcomp = mdf.vcomp
scale = mdf.scale
try:
cov_re_inv = np.linalg.inv(cov_re)
except np.linalg.LinAlgError:
cov_re_inv = None
for itr in range(maxit):
fe_params_s = fe_params.copy()
for j in range(self.k_fe):
if abs(fe_params[j]) < ceps:
continue
# The residuals
fe_params[j] = 0.
expval = np.dot(self.exog, fe_params)
resid_all = self.endog - expval
# The loss function has the form
# a*x^2 + b*x + pwt*|x|
a, b = 0., 0.
for group_ix, group in enumerate(self.group_labels):
vc_var = self._expand_vcomp(vcomp, group)
exog = self.exog_li[group_ix]
ex_r, ex2_r = self._aex_r[group_ix], self._aex_r2[group_ix]
resid = resid_all[self.row_indices[group]]
solver = _smw_solver(scale, ex_r, ex2_r, cov_re_inv, 1 / vc_var)
x = exog[:, j]
u = solver(x)
a += np.dot(u, x)
b -= 2 * np.dot(u, resid)
pwt1 = alpha[j]
if b > pwt1:
fe_params[j] = -(b - pwt1) / (2 * a)
elif b < -pwt1:
fe_params[j] = -(b + pwt1) / (2 * a)
if np.abs(fe_params_s - fe_params).max() < ptol:
break
# Replace the fixed effects estimates with their penalized
# values, leave the dependence parameters in their unpenalized
# state.
params_prof = mdf.params.copy()
params_prof[0:self.k_fe] = fe_params
scale = self.get_scale(fe_params, mdf.cov_re_unscaled, mdf.vcomp)
# Get the Hessian including only the nonzero fixed effects,
# then blow back up to the full size after inverting.
hess = self.hessian(params_prof)
pcov = np.nan * np.ones_like(hess)
ii = np.abs(params_prof) > ceps
ii[self.k_fe:] = True
ii = np.flatnonzero(ii)
hess1 = hess[ii, :][:, ii]
pcov[np.ix_(ii,ii)] = np.linalg.inv(-hess1)
params_object = MixedLMParams.from_components(fe_params, cov_re=cov_re)
results = MixedLMResults(self, params_prof, pcov / scale)
results.params_object = params_object
results.fe_params = fe_params
results.cov_re = cov_re
results.scale = scale
results.cov_re_unscaled = mdf.cov_re_unscaled
results.method = mdf.method
results.converged = True
results.cov_pen = self.cov_pen
results.k_fe = self.k_fe
results.k_re = self.k_re
results.k_re2 = self.k_re2
results.k_vc = self.k_vc
return MixedLMResultsWrapper(results)
def get_fe_params(self, cov_re, vcomp):
"""
Use GLS to update the fixed effects parameter estimates.
Parameters
----------
cov_re : array-like
The covariance matrix of the random effects.
Returns
-------
The GLS estimates of the fixed effects parameters.
"""
if self.k_fe == 0:
return np.array([])
if self.k_re == 0:
cov_re_inv = np.empty((0,0))
else:
cov_re_inv = np.linalg.inv(cov_re)
# Cache these quantities that don't change.
if not hasattr(self, "_endex_li"):
self._endex_li = []
for group_ix, _ in enumerate(self.group_labels):
mat = np.concatenate((self.exog_li[group_ix], self.endog_li[group_ix][:, None]), axis=1)
self._endex_li.append(mat)
xtxy = 0.
for group_ix, group in enumerate(self.group_labels):
vc_var = self._expand_vcomp(vcomp, group)
exog = self.exog_li[group_ix]
ex_r, ex2_r = self._aex_r[group_ix], self._aex_r2[group_ix]
solver = _smw_solver(1., ex_r, ex2_r, cov_re_inv, 1 / vc_var)
u = solver(self._endex_li[group_ix])
xtxy += np.dot(exog.T, u)
fe_params = np.linalg.solve(xtxy[:, 0:-1], xtxy[:, -1])
return fe_params
def _reparam(self):
"""
Returns parameters of the map converting parameters from the
form used in optimization to the form returned to the user.
Returns
-------
lin : list-like
Linear terms of the map
quad : list-like
Quadratic terms of the map
Notes
-----
If P are the standard form parameters and R are the
transformed parameters (i.e. with the Cholesky square root
covariance and square root transformed variane components),
then P[i] = lin[i] * R + R' * quad[i] * R
"""
k_fe, k_re, k_re2, k_vc = self.k_fe, self.k_re, self.k_re2, self.k_vc
k_tot = k_fe + k_re2 + k_vc
ix = np.tril_indices(self.k_re)
lin = []
for k in range(k_fe):
e = np.zeros(k_tot)
e[k] = 1
lin.append(e)
for k in range(k_re2):
lin.append(np.zeros(k_tot))
for k in range(k_vc):
lin.append(np.zeros(k_tot))
quad = []
# Quadratic terms for fixed effects.
for k in range(k_tot):
quad.append(np.zeros((k_tot, k_tot)))
# Quadratic terms for random effects covariance.
ii = np.tril_indices(k_re)
ix = [(a,b) for a,b in zip(ii[0], ii[1])]
for i1 in range(k_re2):
for i2 in range(k_re2):
ix1 = ix[i1]
ix2 = ix[i2]
if (ix1[1] == ix2[1]) and (ix1[0] <= ix2[0]):
ii = (ix2[0], ix1[0])
k = ix.index(ii)
quad[k_fe+k][k_fe+i2, k_fe+i1] += 1
for k in range(k_tot):
quad[k] = 0.5*(quad[k] + quad[k].T)
# Quadratic terms for variance components.
km = k_fe + k_re2
for k in range(km, km+k_vc):
quad[k][k, k] = 1
return lin, quad
def _expand_vcomp(self, vcomp, group):
"""
Replicate variance parameters to match a group's design.
Parameters
----------
vcomp : array-like
The variance parameters for the variance components.
group : string
The group label
Returns an expaded version of vcomp, in which each variance
parameter is copied as many times as there are independent
realizations of the variance component in the given group.
"""
if len(vcomp) == 0:
return np.empty(0)
vc_var = []
for j, k in enumerate(self._vc_names):
if group in self.exog_vc[k]:
vc_var.append(vcomp[j] * np.ones(self.exog_vc[k][group].shape[1]))
if len(vc_var) > 0:
return np.concatenate(vc_var)
else:
1/0
return np.empty(0)
def _augment_exog(self, group_ix):
"""
Concatenate the columns for variance components to the columns
for other random effects to obtain a single random effects
exog matrix for a given group.
"""
ex_r = self.exog_re_li[group_ix] if self.k_re > 0 else None
if self.k_vc == 0:
return ex_r
group = self.group_labels[group_ix]
ex = [ex_r] if self.k_re > 0 else []
any_sparse = False
for j,k in enumerate(self._vc_names):
if group not in self.exog_vc[k]:
continue
ex.append(self.exog_vc[k][group])
any_sparse |= sparse.issparse(ex[-1])
if any_sparse:
for j, x in enumerate(ex):
if not sparse.issparse(x):
ex[j] = sparse.csr_matrix(x)
ex = sparse.hstack(ex)
ex = sparse.csr_matrix(ex)
else:
ex = np.concatenate(ex, axis=1)
return ex
def loglike(self, params, profile_fe=True):
"""
Evaluate the (profile) log-likelihood of the linear mixed
effects model.
Parameters
----------
params : MixedLMParams, or array-like.
The parameter value. If array-like, must be a packed
parameter vector containing only the covariance
parameters.
profile_fe : boolean
If True, replace the provided value of `fe_params` with
the GLS estimates.
Returns
-------
The log-likelihood value at `params`.
Notes
-----
The scale parameter `scale` is always profiled out of the
log-likelihood. In addition, if `profile_fe` is true the
fixed effects parameters are also profiled out.
"""
if type(params) is not MixedLMParams:
params = MixedLMParams.from_packed(params, self.k_fe,
self.k_re, self.use_sqrt,
has_fe=False)
cov_re = params.cov_re
vcomp = params.vcomp
# Move to the profile set
if profile_fe:
fe_params = self.get_fe_params(cov_re, vcomp)
else:
fe_params = params.fe_params
if self.k_re > 0:
try:
cov_re_inv = np.linalg.inv(cov_re)
except np.linalg.LinAlgError:
cov_re_inv = None
_, cov_re_logdet = np.linalg.slogdet(cov_re)
else:
cov_re_inv = np.zeros((0, 0))
cov_re_logdet = 0
# The residuals
expval = np.dot(self.exog, fe_params)
resid_all = self.endog - expval
likeval = 0.
# Handle the covariance penalty
if (self.cov_pen is not None) and (self.k_re > 0):
likeval -= self.cov_pen.func(cov_re, cov_re_inv)
# Handle the fixed effects penalty
if (self.fe_pen is not None):
likeval -= self.fe_pen.func(fe_params)
xvx, qf = 0., 0.
for k, group in enumerate(self.group_labels):
vc_var = self._expand_vcomp(vcomp, group)
cov_aug_logdet = cov_re_logdet + np.sum(np.log(vc_var))
exog = self.exog_li[k]
ex_r, ex2_r = self._aex_r[k], self._aex_r2[k]
solver = _smw_solver(1., ex_r, ex2_r, cov_re_inv, 1 / vc_var)
resid = resid_all[self.row_indices[group]]
# Part 1 of the log likelihood (for both ML and REML)
ld = _smw_logdet(1., ex_r, ex2_r, cov_re_inv, 1 / vc_var, cov_aug_logdet)
likeval -= ld / 2.
# Part 2 of the log likelihood (for both ML and REML)
u = solver(resid)
qf += np.dot(resid, u)
# Adjustment for REML
if self.reml:
mat = solver(exog)
xvx += np.dot(exog.T, mat)
if self.reml:
likeval -= (self.n_totobs - self.k_fe) * np.log(qf) / 2.
_,ld = np.linalg.slogdet(xvx)
likeval -= ld / 2.
likeval -= (self.n_totobs - self.k_fe) * np.log(2 * np.pi) / 2.
likeval += ((self.n_totobs - self.k_fe) *
np.log(self.n_totobs - self.k_fe) / 2.)
likeval -= (self.n_totobs - self.k_fe) / 2.
else:
likeval -= self.n_totobs * np.log(qf) / 2.
likeval -= self.n_totobs * np.log(2 * np.pi) / 2.
likeval += self.n_totobs * np.log(self.n_totobs) / 2.
likeval -= self.n_totobs / 2.
return likeval
def _gen_dV_dPar(self, ex_r, solver, group, max_ix=None):
"""
A generator that yields the element-wise derivative of the
marginal covariance matrix with respect to the random effects
variance and covariance parameters.
ex_r : array-like
The random effects design matrix
solver : function
A function that given x returns V^{-1}x, where V
is the group's marginal covariance matrix.
group : scalar
The group label
max_ix : integer or None
If not None, the generator ends when this index
is reached.
"""
axr = solver(ex_r)
# Regular random effects
jj = 0
for j1 in range(self.k_re):
for j2 in range(j1 + 1):
if max_ix is not None and jj > max_ix:
return
mat_l, mat_r = ex_r[:,j1:j1+1], ex_r[:,j2:j2+1] # Need 2d
vsl, vsr = axr[:,j1:j1+1], axr[:,j2:j2+1]
yield jj, mat_l, mat_r, vsl, vsr, j1 == j2
jj += 1
# Variance components
for ky in self._vc_names:
if group in self.exog_vc[ky]:
if max_ix is not None and jj > max_ix:
return
mat = self.exog_vc[ky][group]
axmat = solver(mat)
yield jj, mat, mat, axmat, axmat, True
jj += 1
def score(self, params, profile_fe=True):
"""
Returns the score vector of the profile log-likelihood.
Notes
-----
The score vector that is returned is computed with respect to
the parameterization defined by this model instance's
`use_sqrt` attribute.
"""
if type(params) is not MixedLMParams:
params = MixedLMParams.from_packed(params, self.k_fe,
self.k_re, self.use_sqrt,
has_fe=False)
if profile_fe:
params.fe_params = self.get_fe_params(params.cov_re, params.vcomp)
if self.use_sqrt:
score_fe, score_re, score_vc = self.score_sqrt(params, calc_fe=not profile_fe)
else:
score_fe, score_re, score_vc = self.score_full(params, calc_fe=not profile_fe)
if self._freepat is not None:
score_fe *= self._freepat.fe_params
score_re *= self._freepat.cov_re[self._freepat._ix]
score_vc *= self._freepat.vcomp
if profile_fe:
return np.concatenate((score_re, score_vc))
else:
return np.concatenate((score_fe, score_re, score_vc))
def score_full(self, params, calc_fe):
"""
Returns the score with respect to untransformed parameters.
Calculates the score vector for the profiled log-likelihood of
the mixed effects model with respect to the parameterization
in which the random effects covariance matrix is represented
in its full form (not using the Cholesky factor).
Parameters
----------
params : MixedLMParams or array-like
The parameter at which the score function is evaluated.
If array-like, must contain the packed random effects
parameters (cov_re and vcomp) without fe_params.
calc_fe : boolean
If True, calculate the score vector for the fixed effects
parameters. If False, this vector is not calculated, and
a vector of zeros is returned in its place.
Returns
-------
score_fe : array-like
The score vector with respect to the fixed effects
parameters.
score_re : array-like
The score vector with respect to the random effects
parameters (excluding variance components parameters).
score_vc : array-like
The score vector with respect to variance components
parameters.
Notes
-----
`score_re` is taken with respect to the parameterization in
which `cov_re` is represented through its lower triangle
(without taking the Cholesky square root).
"""
fe_params = params.fe_params
cov_re = params.cov_re
vcomp = params.vcomp
try:
cov_re_inv = np.linalg.inv(cov_re)
except np.linalg.LinAlgError:
cov_re_inv = None
score_fe = np.zeros(self.k_fe)
score_re = np.zeros(self.k_re2)
score_vc = np.zeros(self.k_vc)
# Handle the covariance penalty.
if self.cov_pen is not None:
score_re -= self.cov_pen.grad(cov_re, cov_re_inv)
# Handle the fixed effects penalty.
if calc_fe and (self.fe_pen is not None):
score_fe -= self.fe_pen.grad(fe_params)
# resid' V^{-1} resid, summed over the groups (a scalar)
rvir = 0.
# exog' V^{-1} resid, summed over the groups (a k_fe
# dimensional vector)
xtvir = 0.
# exog' V^{_1} exog, summed over the groups (a k_fe x k_fe
# matrix)
xtvix = 0.
# V^{-1} exog' dV/dQ_jj exog V^{-1}, where Q_jj is the jj^th
# covariance parameter.
xtax = [0.,] * (self.k_re2 + self.k_vc)
# Temporary related to the gradient of log |V|
dlv = np.zeros(self.k_re2 + self.k_vc)
# resid' V^{-1} dV/dQ_jj V^{-1} resid (a scalar)
rvavr = np.zeros(self.k_re2 + self.k_vc)
for group_ix, group in enumerate(self.group_labels):
vc_var = self._expand_vcomp(vcomp, group)
exog = self.exog_li[group_ix]
ex_r, ex2_r = self._aex_r[group_ix], self._aex_r2[group_ix]
solver = _smw_solver(1., ex_r, ex2_r, cov_re_inv, 1 / vc_var)
# The residuals
resid = self.endog_li[group_ix]
if self.k_fe > 0:
expval = np.dot(exog, fe_params)
resid = resid - expval
if self.reml:
viexog = solver(exog)
xtvix += np.dot(exog.T, viexog)
# Contributions to the covariance parameter gradient
vir = solver(resid)
for jj, matl, matr, vsl, vsr, sym in self._gen_dV_dPar(ex_r, solver, group):
dlv[jj] = _dotsum(matr, vsl)
if not sym:
dlv[jj] += _dotsum(matl, vsr)
ul = _dot(vir, matl)
ur = ul.T if sym else _dot(matr.T, vir)
ulr = np.dot(ul, ur)
rvavr[jj] += ulr
if not sym:
rvavr[jj] += ulr.T
if self.reml:
ul = _dot(viexog.T, matl)
ur = ul.T if sym else _dot(matr.T, viexog)
ulr = np.dot(ul, ur)
xtax[jj] += ulr
if not sym:
xtax[jj] += ulr.T
# Contribution of log|V| to the covariance parameter
# gradient.
if self.k_re > 0:
score_re -= 0.5 * dlv[0:self.k_re2]
if self.k_vc > 0:
score_vc -= 0.5 * dlv[self.k_re2:]
rvir += np.dot(resid, vir)
if calc_fe:
xtvir += np.dot(exog.T, vir)
fac = self.n_totobs
if self.reml:
fac -= self.k_fe
if calc_fe and self.k_fe > 0:
score_fe += fac * xtvir / rvir
if self.k_re > 0:
score_re += 0.5 * fac * rvavr[0:self.k_re2] / rvir
if self.k_vc > 0:
score_vc += 0.5 * fac * rvavr[self.k_re2:] / rvir
if self.reml:
xtvixi = np.linalg.inv(xtvix)
for j in range(self.k_re2):
score_re[j] += 0.5 * _dotsum(xtvixi.T, xtax[j])
for j in range(self.k_vc):
score_vc[j] += 0.5 * _dotsum(xtvixi.T, xtax[self.k_re2 + j])
return score_fe, score_re, score_vc
def score_sqrt(self, params, calc_fe=True):
"""
Returns the score with respect to transformed parameters.
Calculates the score vector with respect to the
parameterization in which the random effects covariance matrix
is represented through its Cholesky square root.
Parameters
----------
params : MixedLMParams or array-like
The model parameters. If array-like must contain packed
parameters that are compatible with this model instance.
calc_fe : boolean
If True, calculate the score vector for the fixed effects
parameters. If False, this vector is not calculated, and
a vector of zeros is returned in its place.
Returns
-------
score_fe : array-like
The score vector with respect to the fixed effects
parameters.
score_re : array-like
The score vector with respect to the random effects
parameters (excluding variance components parameters).
score_vc : array-like
The score vector with respect to variance components
parameters.
"""
score_fe, score_re, score_vc = self.score_full(params, calc_fe=calc_fe)
params_vec = params.get_packed(use_sqrt=True, has_fe=True)
score_full = np.concatenate((score_fe, score_re, score_vc))
scr = 0.
for i in range(len(params_vec)):
v = self._lin[i] + 2 * np.dot(self._quad[i], params_vec)
scr += score_full[i] * v
score_fe = scr[0:self.k_fe]
score_re = scr[self.k_fe:self.k_fe + self.k_re2]
score_vc = scr[self.k_fe + self.k_re2:]
return score_fe, score_re, score_vc
def hessian(self, params):
"""
Returns the model's Hessian matrix.
Calculates the Hessian matrix for the linear mixed effects
model with respect to the parameterization in which the
covariance matrix is represented directly (without square-root
transformation).
Parameters
----------
params : MixedLMParams or array-like
The model parameters at which the Hessian is calculated.
If array-like, must contain the packed parameters in a
form that is compatible with this model instance.
Returns
-------
hess : 2d ndarray
The Hessian matrix, evaluated at `params`.
"""
if type(params) is not MixedLMParams:
params = MixedLMParams.from_packed(params, self.k_fe, self.k_re,
use_sqrt=self.use_sqrt,
has_fe=True)
fe_params = params.fe_params
vcomp = params.vcomp
cov_re = params.cov_re
if self.k_re > 0:
cov_re_inv = np.linalg.inv(cov_re)
else:
cov_re_inv = np.empty((0, 0))
# Blocks for the fixed and random effects parameters.
hess_fe = 0.
hess_re = np.zeros((self.k_re2 + self.k_vc, self.k_re2 + self.k_vc))
hess_fere = np.zeros((self.k_re2 + self.k_vc, self.k_fe))
fac = self.n_totobs
if self.reml:
fac -= self.exog.shape[1]
rvir = 0.
xtvix = 0.
xtax = [0.,] * (self.k_re2 + self.k_vc)
m = self.k_re2 + self.k_vc
B = np.zeros(m)
D = np.zeros((m, m))
F = [[0.] * m for k in range(m)]
for k, group in enumerate(self.group_labels):
vc_var = self._expand_vcomp(vcomp, group)
exog = self.exog_li[k]
ex_r, ex2_r = self._aex_r[k], self._aex_r2[k]
solver = _smw_solver(1., ex_r, ex2_r, cov_re_inv, 1 / vc_var)
# The residuals
resid = self.endog_li[k]
if self.k_fe > 0:
expval = np.dot(exog, fe_params)
resid = resid - expval
viexog = solver(exog)
xtvix += np.dot(exog.T, viexog)
vir = solver(resid)
rvir += np.dot(resid, vir)
for jj1, matl1, matr1, vsl1, vsr1, sym1 in self._gen_dV_dPar(ex_r, solver, group):
ul = _dot(viexog.T, matl1)
ur = _dot(matr1.T, vir)
hess_fere[jj1, :] += np.dot(ul, ur)
if not sym1:
ul = _dot(viexog.T, matr1)
ur = _dot(matl1.T, vir)
hess_fere[jj1, :] += np.dot(ul, ur)
if self.reml:
ul = _dot(viexog.T, matl1)
ur = ul if sym1 else np.dot(viexog.T, matr1)
ulr = _dot(ul, ur.T)
xtax[jj1] += ulr
if not sym1:
xtax[jj1] += ulr.T
ul = _dot(vir, matl1)
ur = ul if sym1 else _dot(vir, matr1)
B[jj1] += np.dot(ul, ur) * (1 if sym1 else 2)
# V^{-1} * dV/d_theta
E = [(vsl1, matr1)]
if not sym1:
E.append((vsr1, matl1))
for jj2, matl2, matr2, vsl2, vsr2, sym2 in self._gen_dV_dPar(ex_r, solver, group, jj1):
re = sum([_multi_dot_three(matr2.T, x[0], x[1].T) for x in E])
vt = 2 * _dot(_multi_dot_three(vir[None, :], matl2, re), vir[:, None])
if not sym2:
le = sum([_multi_dot_three(matl2.T, x[0], x[1].T) for x in E])
vt += 2 * _dot(_multi_dot_three(vir[None, :], matr2, le), vir[:, None])
D[jj1, jj2] += vt
if jj1 != jj2:
D[jj2, jj1] += vt
rt = _dotsum(vsl2, re.T) / 2
if not sym2:
rt += _dotsum(vsr2, le.T) / 2
hess_re[jj1, jj2] += rt
if jj1 != jj2:
hess_re[jj2, jj1] += rt
if self.reml:
ev = sum([_dot(x[0], _dot(x[1].T, viexog)) for x in E])
u1 = _dot(viexog.T, matl2)
u2 = _dot(matr2.T, ev)
um = np.dot(u1, u2)
F[jj1][jj2] += um + um.T
if not sym2:
u1 = np.dot(viexog.T, matr2)
u2 = np.dot(matl2.T, ev)
um = np.dot(u1, u2)
F[jj1][jj2] += um + um.T
hess_fe -= fac * xtvix / rvir
hess_re = hess_re - 0.5 * fac * (D/rvir - np.outer(B, B) / rvir**2)
hess_fere = -fac * hess_fere / rvir
if self.reml:
QL = [np.linalg.solve(xtvix, x) for x in xtax]
for j1 in range(self.k_re2 + self.k_vc):
for j2 in range(j1 + 1):
a = _dotsum(QL[j1].T, QL[j2])
a -= np.trace(np.linalg.solve(xtvix, F[j1][j2]))
a *= 0.5
hess_re[j1, j2] += a
if j1 > j2:
hess_re[j2, j1] += a
# Put the blocks together to get the Hessian.
m = self.k_fe + self.k_re2 + self.k_vc
hess = np.zeros((m, m))
hess[0:self.k_fe, 0:self.k_fe] = hess_fe
hess[0:self.k_fe, self.k_fe:] = hess_fere.T
hess[self.k_fe:, 0:self.k_fe] = hess_fere
hess[self.k_fe:, self.k_fe:] = hess_re
return hess
def get_scale(self, fe_params, cov_re, vcomp):
"""
Returns the estimated error variance based on given estimates
of the slopes and random effects covariance matrix.
Parameters
----------
fe_params : array-like
The regression slope estimates
cov_re : 2d array-like
Estimate of the random effects covariance matrix
vcomp : array-like
Estimate of the variance components
Returns
-------
scale : float
The estimated error variance.
"""
try:
cov_re_inv = np.linalg.inv(cov_re)
except np.linalg.LinAlgError:
cov_re_inv = None
qf = 0.
for group_ix, group in enumerate(self.group_labels):
vc_var = self._expand_vcomp(vcomp, group)
exog = self.exog_li[group_ix]
ex_r, ex2_r = self._aex_r[group_ix], self._aex_r2[group_ix]
solver = _smw_solver(1., ex_r, ex2_r, cov_re_inv, 1 / vc_var)
# The residuals
resid = self.endog_li[group_ix]
if self.k_fe > 0:
expval = np.dot(exog, fe_params)
resid = resid - expval
mat = solver(resid)
qf += np.dot(resid, mat)
if self.reml:
qf /= (self.n_totobs - self.k_fe)
else:
qf /= self.n_totobs
return qf
def fit(self, start_params=None, reml=True, niter_sa=0,
do_cg=True, fe_pen=None, cov_pen=None, free=None,
full_output=False, method='bfgs', **kwargs):
"""
Fit a linear mixed model to the data.
Parameters
----------
start_params: array-like or MixedLMParams
Starting values for the profile log-likeihood. If not a
`MixedLMParams` instance, this should be an array
containing the packed parameters for the profile
log-likelihood, including the fixed effects
parameters.
reml : bool
If true, fit according to the REML likelihood, else
fit the standard likelihood using ML.
cov_pen : CovariancePenalty object
A penalty for the random effects covariance matrix
fe_pen : Penalty object
A penalty on the fixed effects
free : MixedLMParams object
If not `None`, this is a mask that allows parameters to be
held fixed at specified values. A 1 indicates that the
correspondinig parameter is estimated, a 0 indicates that
it is fixed at its starting value. Setting the `cov_re`
component to the identity matrix fits a model with
independent random effects. Note that some optimization
methods do not respect this contraint (bfgs and lbfgs both
work).
full_output : bool
If true, attach iteration history to results
method : string
Optimization method.
Returns
-------
A MixedLMResults instance.
"""
_allowed_kwargs = ['gtol', 'maxiter']
for x in kwargs.keys():
if x not in _allowed_kwargs:
raise ValueError("Argument %s not allowed for MixedLM.fit" % x)
if method.lower() in ["newton", "ncg"]:
raise ValueError("method %s not available for MixedLM" % method)
self.reml = reml
self.cov_pen = cov_pen
self.fe_pen = fe_pen
self._freepat = free
if full_output:
hist = []
else:
hist = None
success = False
if start_params is None:
params = MixedLMParams(self.k_fe, self.k_re, self.k_vc)
params.fe_params = np.zeros(self.k_fe)
params.cov_re = np.eye(self.k_re)
params.vcomp = np.ones(self.k_vc)
else:
if isinstance(start_params, MixedLMParams):
params = start_params
else:
# It's a packed array
if len(start_params) == self.k_fe + self.k_re2 + self.k_vc:
params = MixedLMParams.from_packed(start_params, self.k_fe,
self.k_re, self.use_sqrt,
has_fe=True)
elif len(start_params) == self.k_re2 + self.k_vc:
params = MixedLMParams.from_packed(start_params, self.k_fe,
self.k_re, self.use_sqrt,
has_fe=False)
else:
raise ValueError("invalid start_params")
if do_cg:
kwargs["retall"] = hist is not None
if "disp" not in kwargs:
kwargs["disp"] = False
packed = params.get_packed(use_sqrt=self.use_sqrt, has_fe=False)
# It seems that the optimizers sometimes stop too soon, so
# we run a few times.
for rep in range(5):
rslt = super(MixedLM, self).fit(start_params=packed,
skip_hessian=True,
method=method,
**kwargs)
if rslt.mle_retvals['converged']:
break
packed = rslt.params
# The optimization succeeded
params = np.atleast_1d(rslt.params)
if hist is not None:
hist.append(rslt.mle_retvals)
converged = rslt.mle_retvals['converged']
if not converged:
msg = "Gradient optimization failed."
warnings.warn(msg, ConvergenceWarning)
# Convert to the final parameterization (i.e. undo the square
# root transform of the covariance matrix, and the profiling
# over the error variance).
params = MixedLMParams.from_packed(params, self.k_fe, self.k_re,
use_sqrt=self.use_sqrt, has_fe=False)
cov_re_unscaled = params.cov_re
vcomp_unscaled = params.vcomp
fe_params = self.get_fe_params(cov_re_unscaled, vcomp_unscaled)
params.fe_params = fe_params
scale = self.get_scale(fe_params, cov_re_unscaled, vcomp_unscaled)
cov_re = scale * cov_re_unscaled
vcomp = scale * vcomp_unscaled
if (((self.k_re > 0) and (np.min(np.abs(np.diag(cov_re))) < 0.01)) or
((self.k_vc > 0) and (np.min(np.abs(vcomp)) < 0.01))):
msg = "The MLE may be on the boundary of the parameter space."
warnings.warn(msg, ConvergenceWarning)
# Compute the Hessian at the MLE. Note that this is the
# Hessian with respect to the random effects covariance matrix
# (not its square root). It is used for obtaining standard
# errors, not for optimization.
hess = self.hessian(params)
hess_diag = np.diag(hess)
if free is not None:
pcov = np.zeros_like(hess)
pat = self._freepat.get_packed(use_sqrt=False, has_fe=True)
ii = np.flatnonzero(pat)
hess_diag = hess_diag[ii]
if len(ii) > 0:
hess1 = hess[np.ix_(ii, ii)]
pcov[np.ix_(ii, ii)] = np.linalg.inv(-hess1)
else:
pcov = np.linalg.inv(-hess)
if np.any(hess_diag >= 0):
msg = "The Hessian matrix at the estimated parameter values is not positive definite."
warnings.warn(msg, ConvergenceWarning)
# Prepare a results class instance
params_packed = params.get_packed(use_sqrt=False, has_fe=True)
results = MixedLMResults(self, params_packed, pcov / scale)
results.params_object = params
results.fe_params = fe_params
results.cov_re = cov_re
results.vcomp = vcomp
results.scale = scale
results.cov_re_unscaled = cov_re_unscaled
results.method = "REML" if self.reml else "ML"
results.converged = converged
results.hist = hist
results.reml = self.reml
results.cov_pen = self.cov_pen
results.k_fe = self.k_fe
results.k_re = self.k_re
results.k_re2 = self.k_re2
results.k_vc = self.k_vc
results.use_sqrt = self.use_sqrt
results.freepat = self._freepat
return MixedLMResultsWrapper(results)
class MixedLMResults(base.LikelihoodModelResults, base.ResultMixin):
'''
Class to contain results of fitting a linear mixed effects model.
MixedLMResults inherits from statsmodels.LikelihoodModelResults
Parameters
----------
See statsmodels.LikelihoodModelResults
Returns
-------
**Attributes**
model : class instance
Pointer to PHreg model instance that called fit.
normalized_cov_params : array
The sampling covariance matrix of the estimates
fe_params : array
The fitted fixed-effects coefficients
re_params : array
The fitted random-effects covariance matrix
bse_fe : array
The standard errors of the fitted fixed effects coefficients
bse_re : array
The standard errors of the fitted random effects covariance
matrix
See Also
--------
statsmodels.LikelihoodModelResults
'''
def __init__(self, model, params, cov_params):
super(MixedLMResults, self).__init__(model, params,
normalized_cov_params=cov_params)
self.nobs = self.model.nobs
self.df_resid = self.nobs - np_matrix_rank(self.model.exog)
@cache_readonly
def fittedvalues(self):
"""
Returns the fitted values for the model.
The fitted values reflect the mean structure specified by the
fixed effects and the predicted random effects.
"""
fit = np.dot(self.model.exog, self.fe_params)
re = self.random_effects
for group_ix, group in enumerate(self.model.group_labels):
ix = self.model.row_indices[group]
mat = [self.model.exog_re_li[group_ix]]
for c in self.model._vc_names:
if group in self.model.exog_vc[c]:
mat.append(self.model.exog_vc[c][group])
mat = np.concatenate(mat, axis=1)
fit[ix] += np.dot(mat, re[group])
return fit
@cache_readonly
def resid(self):
"""
Returns the residuals for the model.
The residuals reflect the mean structure specified by the
fixed effects and the predicted random effects.
"""
return self.model.endog - self.fittedvalues
@cache_readonly
def bse_fe(self):
"""
Returns the standard errors of the fixed effect regression
coefficients.
"""
p = self.model.exog.shape[1]
return np.sqrt(np.diag(self.cov_params())[0:p])
@cache_readonly
def bse_re(self):
"""
Returns the standard errors of the variance parameters. Note
that the sampling distribution of variance parameters is
strongly skewed unless the sample size is large, so these
standard errors may not give meaningful confidence intervals
of p-values if used in the usual way.
"""
p = self.model.exog.shape[1]
return np.sqrt(self.scale * np.diag(self.cov_params())[p:])
def _expand_re_names(self, group):
names = list(self.model.data.exog_re_names)
for v in self.model._vc_names:
if group in self.model.exog_vc[v]:
ix = range(self.model.exog_vc[v][group].shape[1])
na = ["%s[%d]" % (v, j + 1) for j in ix]
names.extend(na)
return names
@cache_readonly
def random_effects(self):
"""
The conditional means of random effects given the data.
Returns
-------
random_effects : dict
A dictionary mapping the distinct `group` values to the
means of the random effects for the group.
"""
try:
cov_re_inv = np.linalg.inv(self.cov_re)
except np.linalg.LinAlgError:
raise ValueError("Cannot predict random effects from singular covariance structure.")
vcomp = self.vcomp
k_re = self.k_re
ranef_dict = {}
for group_ix, group in enumerate(self.model.group_labels):
endog = self.model.endog_li[group_ix]
exog = self.model.exog_li[group_ix]
ex_r, ex2_r = self.model._aex_r[group_ix], self.model._aex_r2[group_ix]
vc_var = self.model._expand_vcomp(vcomp, group)
# Get the residuals relative to fixed effects
resid = endog
if self.k_fe > 0:
expval = np.dot(exog, self.fe_params)
resid = resid - expval
solver = _smw_solver(self.scale, ex_r, ex2_r, cov_re_inv, 1 / vc_var)
vir = solver(resid)
xtvir = _dot(ex_r.T, vir)
xtvir[0:k_re] = np.dot(self.cov_re, xtvir[0:k_re])
xtvir[k_re:] *= vc_var
ranef_dict[group] = pd.Series(xtvir, index=self._expand_re_names(group))
return ranef_dict
@cache_readonly
def random_effects_cov(self):
"""
Returns the conditional covariance matrix of the random
effects for each group given the data.
Returns
-------
random_effects_cov : dict
A dictionary mapping the distinct values of the `group`
variable to the conditional covariance matrix of the
random effects given the data.
"""
try:
cov_re_inv = np.linalg.inv(self.cov_re)
except np.linalg.LinAlgError:
cov_re_inv = None
vcomp = self.vcomp
ranef_dict = {}
for group_ix in range(self.model.n_groups):
ex_r, ex2_r = self.model._aex_r[group_ix], self.model._aex_r2[group_ix]
label = self.model.group_labels[group_ix]
vc_var = self.model._expand_vcomp(vcomp, group_ix)
solver = _smw_solver(self.scale, ex_r, ex2_r, cov_re_inv, 1 / vc_var)
n = ex_r.shape[0]
m = self.cov_re.shape[0]
mat1 = np.empty((n, m))
mat1[:, 0:m] = np.dot(ex_r[:, 0:m], self.cov_re)
mat1[:, m:] = np.dot(ex_r[:, m:], np.diag(vc_var))
mat2 = solver(mat1)
mat2 = np.dot(mat1.T, mat2)
v = -mat2
v[0:m, 0:m] += self.cov_re
ix = np.arange(m, v.shape[0])
v[ix, ix] += vc_var
na = self._expand_re_names(group_ix)
v = pd.DataFrame(v, index=na, columns=na)
ranef_dict[label] = v
return ranef_dict
# Need to override since t-tests are only used for fixed effects parameters.
def t_test(self, r_matrix, scale=None, use_t=None):
"""
Compute a t-test for a each linear hypothesis of the form Rb = q
Parameters
----------
r_matrix : array-like
If an array is given, a p x k 2d array or length k 1d
array specifying the linear restrictions. It is assumed
that the linear combination is equal to zero.
scale : float, optional
An optional `scale` to use. Default is the scale specified
by the model fit.
use_t : bool, optional
If use_t is None, then the default of the model is used.
If use_t is True, then the p-values are based on the t
distribution.
If use_t is False, then the p-values are based on the normal
distribution.
Returns
-------
res : ContrastResults instance
The results for the test are attributes of this results instance.
The available results have the same elements as the parameter table
in `summary()`.
"""
if r_matrix.shape[1] != self.k_fe:
raise ValueError("r_matrix for t-test should have %d columns" % self.k_fe)
d = self.k_re2 + self.k_vc
z0 = np.zeros((r_matrix.shape[0], d))
r_matrix = np.concatenate((r_matrix, z0), axis=1)
tst_rslt = super(MixedLMResults, self).t_test(r_matrix, scale=scale, use_t=use_t)
return tst_rslt
def summary(self, yname=None, xname_fe=None, xname_re=None,
title=None, alpha=.05):
"""
Summarize the mixed model regression results.
Parameters
-----------
yname : string, optional
Default is `y`
xname_fe : list of strings, optional
Fixed effects covariate names
xname_re : list of strings, optional
Random effects covariate names
title : string, optional
Title for the top table. If not None, then this replaces
the default title
alpha : float
significance level for the confidence intervals
Returns
-------
smry : Summary instance
this holds the summary tables and text, which can be
printed or converted to various output formats.
See Also
--------
statsmodels.iolib.summary.Summary : class to hold summary
results
"""
from statsmodels.iolib import summary2
smry = summary2.Summary()
info = OrderedDict()
info["Model:"] = "MixedLM"
if yname is None:
yname = self.model.endog_names
info["No. Observations:"] = str(self.model.n_totobs)
info["No. Groups:"] = str(self.model.n_groups)
gs = np.array([len(x) for x in self.model.endog_li])
info["Min. group size:"] = "%.0f" % min(gs)
info["Max. group size:"] = "%.0f" % max(gs)
info["Mean group size:"] = "%.1f" % np.mean(gs)
info["Dependent Variable:"] = yname
info["Method:"] = self.method
info["Scale:"] = self.scale
info["Likelihood:"] = self.llf
info["Converged:"] = "Yes" if self.converged else "No"
smry.add_dict(info)
smry.add_title("Mixed Linear Model Regression Results")
float_fmt = "%.3f"
sdf = np.nan * np.ones((self.k_fe + self.k_re2 + self.k_vc, 6))
# Coefficient estimates
sdf[0:self.k_fe, 0] = self.fe_params
# Standard errors
sdf[0:self.k_fe, 1] = np.sqrt(np.diag(self.cov_params()[0:self.k_fe]))
# Z-scores
sdf[0:self.k_fe, 2] = sdf[0:self.k_fe, 0] / sdf[0:self.k_fe, 1]
# p-values
sdf[0:self.k_fe, 3] = 2 * norm.cdf(-np.abs(sdf[0:self.k_fe, 2]))
# Confidence intervals
qm = -norm.ppf(alpha / 2)
sdf[0:self.k_fe, 4] = sdf[0:self.k_fe, 0] - qm * sdf[0:self.k_fe, 1]
sdf[0:self.k_fe, 5] = sdf[0:self.k_fe, 0] + qm * sdf[0:self.k_fe, 1]
# All random effects variances and covariances
jj = self.k_fe
for i in range(self.k_re):
for j in range(i + 1):
sdf[jj, 0] = self.cov_re[i, j]
sdf[jj, 1] = np.sqrt(self.scale) * self.bse[jj]
jj += 1
# Variance components
for i in range(self.k_vc):
sdf[jj, 0] = self.vcomp[i]
sdf[jj, 1] = np.sqrt(self.scale) * self.bse[jj]
jj += 1
sdf = pd.DataFrame(index=self.model.data.param_names, data=sdf)
sdf.columns = ['Coef.', 'Std.Err.', 'z', 'P>|z|',
'[' + str(alpha/2), str(1-alpha/2) + ']']
for col in sdf.columns:
sdf[col] = [float_fmt % x if np.isfinite(x) else ""
for x in sdf[col]]
smry.add_df(sdf, align='r')
return smry
@cache_readonly
def llf(self):
return self.model.loglike(self.params_object, profile_fe=False)
@cache_readonly
def aic(self):
if self.reml:
return np.nan
if self.freepat is not None:
df = self.freepat.get_packed(use_sqrt=False, has_fe=True).sum() + 1
else:
df = self.params.size + 1
return -2 * (self.llf - df)
@cache_readonly
def bic(self):
if self.reml:
return np.nan
if self.freepat is not None:
df = self.freepat.get_packed(use_sqrt=False, has_fe=True).sum() + 1
else:
df = self.params.size + 1
return -2 * self.llf + np.log(self.nobs) * df
def profile_re(self, re_ix, vtype, num_low=5, dist_low=1., num_high=5,
dist_high=1.):
"""
Profile-likelihood inference for variance parameters.
Parameters
----------
re_ix : integer
If vtype is `re`, this value is the index of the variance
parameter for which to construct a profile likelihood. If
`vtype` is 'vc' then `re_ix` is the name of the variance
parameter to be profiled.
vtype : string
Either 're' or 'vc', depending on whether the profile
analysis is for a random effect or a variance component.
num_low : integer
The number of points at which to calculate the likelihood
below the MLE of the parameter of interest.
dist_low : float
The distance below the MLE of the parameter of interest to
begin calculating points on the profile likelihood.
num_high : integer
The number of points at which to calculate the likelihood
abov the MLE of the parameter of interest.
dist_high : float
The distance above the MLE of the parameter of interest to
begin calculating points on the profile likelihood.
Returns
-------
An array with two columns. The first column contains the
values to which the parameter of interest is constrained. The
second column contains the corresponding likelihood values.
Notes
-----
Only variance parameters can be profiled.
"""
pmodel = self.model
k_fe = pmodel.k_fe
k_re = pmodel.k_re
k_vc = pmodel.k_vc
endog, exog, groups = pmodel.endog, pmodel.exog, pmodel.groups
# Need to permute the columns of the random effects design
# matrix so that the profiled variable is in the first column.
if vtype == 're':
ix = np.arange(k_re)
ix[0] = re_ix
ix[re_ix] = 0
exog_re = pmodel.exog_re.copy()[:, ix]
# Permute the covariance structure to match the permuted
# design matrix.
params = self.params_object.copy()
cov_re_unscaled = params.cov_re
cov_re_unscaled = cov_re_unscaled[np.ix_(ix, ix)]
params.cov_re = cov_re_unscaled
ru0 = cov_re_unscaled[0, 0]
# Convert dist_low and dist_high to the profile
# parameterization
cov_re = self.scale * cov_re_unscaled
low = (cov_re[0, 0] - dist_low) / self.scale
high = (cov_re[0, 0] + dist_high) / self.scale
elif vtype == 'vc':
re_ix = self.model._vc_names.index(re_ix)
params = self.params_object.copy()
vcomp = self.vcomp
low = (vcomp[re_ix] - dist_low) / self.scale
high = (vcomp[re_ix] + dist_high) / self.scale
ru0 = vcomp[re_ix] / self.scale
# Define the sequence of values to which the parameter of
# interest will be constrained.
if low <= 0:
raise ValueError("dist_low is too large and would result in a "
"negative variance. Try a smaller value.")
left = np.linspace(low, ru0, num_low + 1)
right = np.linspace(ru0, high, num_high+1)[1:]
rvalues = np.concatenate((left, right))
# Indicators of which parameters are free and fixed.
free = MixedLMParams(k_fe, k_re, k_vc)
if self.freepat is None:
free.fe_params = np.ones(k_fe)
vcomp = np.ones(k_vc)
mat = np.ones((k_re, k_re))
else:
# If a freepat already has been specified, we add the
# constraint to it.
free.fe_params = self.freepat.fe_params
vcomp = self.freepat.vcomp
mat = self.freepat.cov_re
if vtype == 're':
mat = mat[np.ix_(ix, ix)]
if vtype == 're':
mat[0, 0] = 0
else:
vcomp[re_ix] = 0
free.cov_re = mat
free.vcomp = vcomp
klass = self.model.__class__
init_kwargs = pmodel._get_init_kwds()
if vtype == 're':
init_kwargs['exog_re'] = exog_re
likev = []
for x in rvalues:
model = klass(endog, exog, **init_kwargs)
if vtype == 're':
cov_re = params.cov_re.copy()
cov_re[0, 0] = x
params.cov_re = cov_re
else:
params.vcomp[re_ix] = x
# TODO should use fit_kwargs
rslt = model.fit(start_params=params, free=free,
reml=self.reml, cov_pen=self.cov_pen)._results
likev.append([x * rslt.scale, rslt.llf])
likev = np.asarray(likev)
return likev
class MixedLMResultsWrapper(base.LikelihoodResultsWrapper):
_attrs = {'bse_re': ('generic_columns', 'exog_re_names_full'),
'fe_params': ('generic_columns', 'xnames'),
'bse_fe': ('generic_columns', 'xnames'),
'cov_re': ('generic_columns_2d', 'exog_re_names'),
'cov_re_unscaled': ('generic_columns_2d', 'exog_re_names'),
}
_upstream_attrs = base.LikelihoodResultsWrapper._wrap_attrs
_wrap_attrs = base.wrap.union_dicts(_attrs, _upstream_attrs)
_methods = {}
_upstream_methods = base.LikelihoodResultsWrapper._wrap_methods
_wrap_methods = base.wrap.union_dicts(_methods, _upstream_methods)
| bsd-3-clause |
jat255/seaborn | seaborn/tests/test_utils.py | 11 | 11338 | """Tests for plotting utilities."""
import warnings
import tempfile
import shutil
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import nose
import nose.tools as nt
from nose.tools import assert_equal, raises
import numpy.testing as npt
import pandas.util.testing as pdt
from distutils.version import LooseVersion
pandas_has_categoricals = LooseVersion(pd.__version__) >= "0.15"
from pandas.util.testing import network
try:
from bs4 import BeautifulSoup
except ImportError:
BeautifulSoup = None
from . import PlotTestCase
from .. import utils, rcmod
from ..utils import get_dataset_names, load_dataset
a_norm = np.random.randn(100)
def test_pmf_hist_basics():
"""Test the function to return barplot args for pmf hist."""
out = utils.pmf_hist(a_norm)
assert_equal(len(out), 3)
x, h, w = out
assert_equal(len(x), len(h))
# Test simple case
a = np.arange(10)
x, h, w = utils.pmf_hist(a, 10)
nose.tools.assert_true(np.all(h == h[0]))
def test_pmf_hist_widths():
"""Test histogram width is correct."""
x, h, w = utils.pmf_hist(a_norm)
assert_equal(x[1] - x[0], w)
def test_pmf_hist_normalization():
"""Test that output data behaves like a PMF."""
x, h, w = utils.pmf_hist(a_norm)
nose.tools.assert_almost_equal(sum(h), 1)
nose.tools.assert_less_equal(h.max(), 1)
def test_pmf_hist_bins():
"""Test bin specification."""
x, h, w = utils.pmf_hist(a_norm, 20)
assert_equal(len(x), 20)
def test_ci_to_errsize():
"""Test behavior of ci_to_errsize."""
cis = [[.5, .5],
[1.25, 1.5]]
heights = [1, 1.5]
actual_errsize = np.array([[.5, 1],
[.25, 0]])
test_errsize = utils.ci_to_errsize(cis, heights)
npt.assert_array_equal(actual_errsize, test_errsize)
def test_desaturate():
"""Test color desaturation."""
out1 = utils.desaturate("red", .5)
assert_equal(out1, (.75, .25, .25))
out2 = utils.desaturate("#00FF00", .5)
assert_equal(out2, (.25, .75, .25))
out3 = utils.desaturate((0, 0, 1), .5)
assert_equal(out3, (.25, .25, .75))
out4 = utils.desaturate("red", .5)
assert_equal(out4, (.75, .25, .25))
@raises(ValueError)
def test_desaturation_prop():
"""Test that pct outside of [0, 1] raises exception."""
utils.desaturate("blue", 50)
def test_saturate():
"""Test performance of saturation function."""
out = utils.saturate((.75, .25, .25))
assert_equal(out, (1, 0, 0))
def test_iqr():
"""Test the IQR function."""
a = np.arange(5)
iqr = utils.iqr(a)
assert_equal(iqr, 2)
class TestSpineUtils(PlotTestCase):
sides = ["left", "right", "bottom", "top"]
outer_sides = ["top", "right"]
inner_sides = ["left", "bottom"]
offset = 10
original_position = ("outward", 0)
offset_position = ("outward", offset)
def test_despine(self):
f, ax = plt.subplots()
for side in self.sides:
nt.assert_true(ax.spines[side].get_visible())
utils.despine()
for side in self.outer_sides:
nt.assert_true(~ax.spines[side].get_visible())
for side in self.inner_sides:
nt.assert_true(ax.spines[side].get_visible())
utils.despine(**dict(zip(self.sides, [True] * 4)))
for side in self.sides:
nt.assert_true(~ax.spines[side].get_visible())
def test_despine_specific_axes(self):
f, (ax1, ax2) = plt.subplots(2, 1)
utils.despine(ax=ax2)
for side in self.sides:
nt.assert_true(ax1.spines[side].get_visible())
for side in self.outer_sides:
nt.assert_true(~ax2.spines[side].get_visible())
for side in self.inner_sides:
nt.assert_true(ax2.spines[side].get_visible())
def test_despine_with_offset(self):
f, ax = plt.subplots()
for side in self.sides:
nt.assert_equal(ax.spines[side].get_position(),
self.original_position)
utils.despine(ax=ax, offset=self.offset)
for side in self.sides:
is_visible = ax.spines[side].get_visible()
new_position = ax.spines[side].get_position()
if is_visible:
nt.assert_equal(new_position, self.offset_position)
else:
nt.assert_equal(new_position, self.original_position)
def test_despine_with_offset_specific_axes(self):
f, (ax1, ax2) = plt.subplots(2, 1)
utils.despine(offset=self.offset, ax=ax2)
for side in self.sides:
nt.assert_equal(ax1.spines[side].get_position(),
self.original_position)
if ax2.spines[side].get_visible():
nt.assert_equal(ax2.spines[side].get_position(),
self.offset_position)
else:
nt.assert_equal(ax2.spines[side].get_position(),
self.original_position)
def test_despine_trim_spines(self):
f, ax = plt.subplots()
ax.plot([1, 2, 3], [1, 2, 3])
ax.set_xlim(.75, 3.25)
utils.despine(trim=True)
for side in self.inner_sides:
bounds = ax.spines[side].get_bounds()
nt.assert_equal(bounds, (1, 3))
def test_despine_trim_inverted(self):
f, ax = plt.subplots()
ax.plot([1, 2, 3], [1, 2, 3])
ax.set_ylim(.85, 3.15)
ax.invert_yaxis()
utils.despine(trim=True)
for side in self.inner_sides:
bounds = ax.spines[side].get_bounds()
nt.assert_equal(bounds, (1, 3))
def test_despine_trim_noticks(self):
f, ax = plt.subplots()
ax.plot([1, 2, 3], [1, 2, 3])
ax.set_yticks([])
utils.despine(trim=True)
nt.assert_equal(ax.get_yticks().size, 0)
def test_offset_spines_warns(self):
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter("always", category=UserWarning)
f, ax = plt.subplots()
utils.offset_spines(offset=self.offset)
nt.assert_true('deprecated' in str(w[0].message))
nt.assert_true(issubclass(w[0].category, UserWarning))
def test_offset_spines(self):
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter("always", category=UserWarning)
f, ax = plt.subplots()
for side in self.sides:
nt.assert_equal(ax.spines[side].get_position(),
self.original_position)
utils.offset_spines(offset=self.offset)
for side in self.sides:
nt.assert_equal(ax.spines[side].get_position(),
self.offset_position)
def test_offset_spines_specific_axes(self):
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter("always", category=UserWarning)
f, (ax1, ax2) = plt.subplots(2, 1)
utils.offset_spines(offset=self.offset, ax=ax2)
for side in self.sides:
nt.assert_equal(ax1.spines[side].get_position(),
self.original_position)
nt.assert_equal(ax2.spines[side].get_position(),
self.offset_position)
def test_ticklabels_overlap():
rcmod.set()
f, ax = plt.subplots(figsize=(2, 2))
f.tight_layout() # This gets the Agg renderer working
assert not utils.axis_ticklabels_overlap(ax.get_xticklabels())
big_strings = "abcdefgh", "ijklmnop"
ax.set_xlim(-.5, 1.5)
ax.set_xticks([0, 1])
ax.set_xticklabels(big_strings)
assert utils.axis_ticklabels_overlap(ax.get_xticklabels())
x, y = utils.axes_ticklabels_overlap(ax)
assert x
assert not y
def test_categorical_order():
x = ["a", "c", "c", "b", "a", "d"]
y = [3, 2, 5, 1, 4]
order = ["a", "b", "c", "d"]
out = utils.categorical_order(x)
nt.assert_equal(out, ["a", "c", "b", "d"])
out = utils.categorical_order(x, order)
nt.assert_equal(out, order)
out = utils.categorical_order(x, ["b", "a"])
nt.assert_equal(out, ["b", "a"])
out = utils.categorical_order(np.array(x))
nt.assert_equal(out, ["a", "c", "b", "d"])
out = utils.categorical_order(pd.Series(x))
nt.assert_equal(out, ["a", "c", "b", "d"])
out = utils.categorical_order(y)
nt.assert_equal(out, [1, 2, 3, 4, 5])
out = utils.categorical_order(np.array(y))
nt.assert_equal(out, [1, 2, 3, 4, 5])
out = utils.categorical_order(pd.Series(y))
nt.assert_equal(out, [1, 2, 3, 4, 5])
if pandas_has_categoricals:
x = pd.Categorical(x, order)
out = utils.categorical_order(x)
nt.assert_equal(out, list(x.categories))
x = pd.Series(x)
out = utils.categorical_order(x)
nt.assert_equal(out, list(x.cat.categories))
out = utils.categorical_order(x, ["b", "a"])
nt.assert_equal(out, ["b", "a"])
x = ["a", np.nan, "c", "c", "b", "a", "d"]
out = utils.categorical_order(x)
nt.assert_equal(out, ["a", "c", "b", "d"])
if LooseVersion(pd.__version__) >= "0.15":
def check_load_dataset(name):
ds = load_dataset(name, cache=False)
assert(isinstance(ds, pd.DataFrame))
def check_load_cached_dataset(name):
# Test the cacheing using a temporary file.
# With Python 3.2+, we could use the tempfile.TemporaryDirectory()
# context manager instead of this try...finally statement
tmpdir = tempfile.mkdtemp()
try:
# download and cache
ds = load_dataset(name, cache=True, data_home=tmpdir)
# use cached version
ds2 = load_dataset(name, cache=True, data_home=tmpdir)
pdt.assert_frame_equal(ds, ds2)
finally:
shutil.rmtree(tmpdir)
@network(url="https://github.com/mwaskom/seaborn-data")
def test_get_dataset_names():
if not BeautifulSoup:
raise nose.SkipTest("No BeautifulSoup available for parsing html")
names = get_dataset_names()
assert(len(names) > 0)
assert(u"titanic" in names)
@network(url="https://github.com/mwaskom/seaborn-data")
def test_load_datasets():
if not BeautifulSoup:
raise nose.SkipTest("No BeautifulSoup available for parsing html")
# Heavy test to verify that we can load all available datasets
for name in get_dataset_names():
# unfortunately @network somehow obscures this generator so it
# does not get in effect, so we need to call explicitly
# yield check_load_dataset, name
check_load_dataset(name)
@network(url="https://github.com/mwaskom/seaborn-data")
def test_load_cached_datasets():
if not BeautifulSoup:
raise nose.SkipTest("No BeautifulSoup available for parsing html")
# Heavy test to verify that we can load all available datasets
for name in get_dataset_names():
# unfortunately @network somehow obscures this generator so it
# does not get in effect, so we need to call explicitly
# yield check_load_dataset, name
check_load_cached_dataset(name)
| bsd-3-clause |
Hbl15/ThinkStats2 | code/hinc.py | 67 | 1494 | """This file contains code used in "Think Stats",
by Allen B. Downey, available from greenteapress.com
Copyright 2014 Allen B. Downey
License: GNU GPLv3 http://www.gnu.org/licenses/gpl.html
"""
from __future__ import print_function
import numpy as np
import pandas
import thinkplot
import thinkstats2
def Clean(s):
"""Converts dollar amounts to integers."""
try:
return int(s.lstrip('$').replace(',', ''))
except ValueError:
if s == 'Under':
return 0
elif s == 'over':
return np.inf
return None
def ReadData(filename='hinc06.csv'):
"""Reads filename and returns populations in thousands
filename: string
returns: pandas Series of populations in thousands
"""
data = pandas.read_csv(filename, header=None, skiprows=9)
cols = data[[0, 1]]
res = []
for _, row in cols.iterrows():
label, freq = row.values
freq = int(freq.replace(',', ''))
t = label.split()
low, high = Clean(t[0]), Clean(t[-1])
res.append((high, freq))
df = pandas.DataFrame(res)
# correct the first range
df[0][0] -= 1
# compute the cumulative sum of the freqs
df[2] = df[1].cumsum()
# normalize the cumulative freqs
total = df[2][41]
df[3] = df[2] / total
# add column names
df.columns = ['income', 'freq', 'cumsum', 'ps']
return df
def main():
df = ReadData()
print(df)
if __name__ == "__main__":
main()
| gpl-3.0 |
higex/qpath | scripts/wsi_bot_codebook4_l1.py | 1 | 4554 | #!/usr/bin/env python2
# Takes a level-0 coding and constructs the level-1 coding, applying the
# same B-o-F approach, but on larger neighborhoods and per subtype.
from __future__ import print_function
import numpy as np
import numpy.linalg
import glob
import argparse as opt
from sklearn.cluster import MiniBatchKMeans
from util.storage import ModelPersistence
from util.explore import sliding_window
__version__ = 0.1
__author__ = 'Vlad Popovici'
def load_image_l0_codes(img_path, encoding_type, subtype, w):
"""
Loads the l0-coding for a series of images, apply max-pooling for
constructing a local descriptor and saves the results.
Args:
img_path: string
the images are loaded from a path of the form
<img_path>/encoded_<encoding_type>_l0/<subtype>/
encoding_type: string
see img_path
subtype: string
see img_path
w: int
local neighborhood size (w x w), in coordinates of the l0-image
Returns:
"""
# get the IDs for the subtype:
sample_paths = glob.glob(img_path +
'/encoded_{}_l0/'.format(encoding_type) +
subtype + '/*.npz')
# print(img_path + '/encoded_{}_l0/'.format(encoding_type) + subtype + '/*.npz')
if len(sample_paths) == 0:
return None
xlist = list()
for sp in sample_paths:
sid = sp.split('/')[-1].split('_')[0]
# print(sp)
# print(sid)
# read data l0
Xl0 = np.load(sp)['Xl0'] # W x H x K
coords_l0 = np.load(sp)['coords']
wsize_l0 = np.load(sp)['wsize']
r, c, k = Xl0.shape
# work on l0-image
st_r = int(np.floor((r % w) / 2))
st_c = int(np.floor((c % w) / 2))
nr = int(np.floor(r / w))
nc = int(np.floor(c / w))
Xl0_mp = np.zeros((nr, nc, k))
it = sliding_window( (r, c), (w, w), start=(st_r, st_c), step=(w, w) )
for r0, r1, c0, c1 in it:
# max-pooling over W x W
Xl0_mp[int((r0-st_r)/w), int((c0-st_c)/w), :] = Xl0[r0:r1, c0:c1, :].reshape((w*w, k)).max(axis=0)
# save l1-image
dst_file = img_path + '/encoded_{}_l1/'.format(encoding_type) + subtype + '/' + sid + '_l1.npz'
# print(dst_file)
np.savez_compressed(dst_file,
Xl0_maxpool=Xl0_mp,
coords_l0=coords_l0,
coords_l1=np.array([st_r, st_r+nr*w, st_c, st_c+nc*w]),
wsize_l0=wsize_l0,
wsize_l1=np.array([w]))
xlist.append(Xl0_mp.reshape((nr*nc, k)))
return xlist
def main():
p = opt.ArgumentParser(description="""
Builds an L0-maxpool representation based on a l0 local features and learns
an l1 codebook.
""")
p.add_argument('data_folder', action='store',
help='path to a folder containing all the files for the subtypes')
p.add_argument('encoding_type', action='store', choices=['sda', 'cnn'],
help='specifies the encoding type for the data file (mainly for file name inference')
p.add_argument('subtype', action='store', choices=['a','b','c','d','e','outlier'],
help='subtype')
p.add_argument('codebook_file', action='store', help='resulting l1-codebook model file name')
p.add_argument('codebook_size', action='store', help='codebook size', type=int)
p.add_argument('-v', '--verbose', action='store_true', help='verbose?')
args = p.parse_args()
x = load_image_l0_codes(args.data_folder, args.encoding_type, args.subtype, 16)
Z = np.vstack(x)
rng = np.random.RandomState(123456)
vq = MiniBatchKMeans(n_clusters=args.codebook_size, random_state=rng, batch_size=500,
compute_labels=True, verbose=False) # vector quantizer
vq.fit(Z)
# compute the average distance and std.dev. of the points in each cluster:
avg_dist = np.zeros(args.codebook_size)
sd_dist = np.zeros(args.codebook_size)
for k in range(0, args.codebook_size):
d = numpy.linalg.norm(Z[vq.labels_ == k, :] - vq.cluster_centers_[k, :], axis=1)
avg_dist[k] = d.mean()
sd_dist[k] = d.std()
with ModelPersistence(args.codebook_file, 'c', format='pickle') as d:
d['codebook'] = vq
d['avg_dist_to_centroid'] = avg_dist
d['stddev_dist_to_centroid'] = sd_dist
return 0
if __name__ == '__main__':
main() | mit |
vshtanko/scikit-learn | sklearn/linear_model/tests/test_base.py | 120 | 10082 | # Author: Alexandre Gramfort <[email protected]>
# Fabian Pedregosa <[email protected]>
#
# License: BSD 3 clause
import numpy as np
from scipy import sparse
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_equal
from sklearn.linear_model.base import LinearRegression
from sklearn.linear_model.base import center_data, sparse_center_data
from sklearn.utils import check_random_state
from sklearn.datasets.samples_generator import make_sparse_uncorrelated
from sklearn.datasets.samples_generator import make_regression
def test_linear_regression():
# Test LinearRegression on a simple dataset.
# a simple dataset
X = [[1], [2]]
Y = [1, 2]
clf = LinearRegression()
clf.fit(X, Y)
assert_array_almost_equal(clf.coef_, [1])
assert_array_almost_equal(clf.intercept_, [0])
assert_array_almost_equal(clf.predict(X), [1, 2])
# test it also for degenerate input
X = [[1]]
Y = [0]
clf = LinearRegression()
clf.fit(X, Y)
assert_array_almost_equal(clf.coef_, [0])
assert_array_almost_equal(clf.intercept_, [0])
assert_array_almost_equal(clf.predict(X), [0])
def test_fit_intercept():
# Test assertions on betas shape.
X2 = np.array([[0.38349978, 0.61650022],
[0.58853682, 0.41146318]])
X3 = np.array([[0.27677969, 0.70693172, 0.01628859],
[0.08385139, 0.20692515, 0.70922346]])
y = np.array([1, 1])
lr2_without_intercept = LinearRegression(fit_intercept=False).fit(X2, y)
lr2_with_intercept = LinearRegression(fit_intercept=True).fit(X2, y)
lr3_without_intercept = LinearRegression(fit_intercept=False).fit(X3, y)
lr3_with_intercept = LinearRegression(fit_intercept=True).fit(X3, y)
assert_equal(lr2_with_intercept.coef_.shape,
lr2_without_intercept.coef_.shape)
assert_equal(lr3_with_intercept.coef_.shape,
lr3_without_intercept.coef_.shape)
assert_equal(lr2_without_intercept.coef_.ndim,
lr3_without_intercept.coef_.ndim)
def test_linear_regression_sparse(random_state=0):
"Test that linear regression also works with sparse data"
random_state = check_random_state(random_state)
for i in range(10):
n = 100
X = sparse.eye(n, n)
beta = random_state.rand(n)
y = X * beta[:, np.newaxis]
ols = LinearRegression()
ols.fit(X, y.ravel())
assert_array_almost_equal(beta, ols.coef_ + ols.intercept_)
assert_array_almost_equal(ols.residues_, 0)
def test_linear_regression_multiple_outcome(random_state=0):
"Test multiple-outcome linear regressions"
X, y = make_regression(random_state=random_state)
Y = np.vstack((y, y)).T
n_features = X.shape[1]
clf = LinearRegression(fit_intercept=True)
clf.fit((X), Y)
assert_equal(clf.coef_.shape, (2, n_features))
Y_pred = clf.predict(X)
clf.fit(X, y)
y_pred = clf.predict(X)
assert_array_almost_equal(np.vstack((y_pred, y_pred)).T, Y_pred, decimal=3)
def test_linear_regression_sparse_multiple_outcome(random_state=0):
"Test multiple-outcome linear regressions with sparse data"
random_state = check_random_state(random_state)
X, y = make_sparse_uncorrelated(random_state=random_state)
X = sparse.coo_matrix(X)
Y = np.vstack((y, y)).T
n_features = X.shape[1]
ols = LinearRegression()
ols.fit(X, Y)
assert_equal(ols.coef_.shape, (2, n_features))
Y_pred = ols.predict(X)
ols.fit(X, y.ravel())
y_pred = ols.predict(X)
assert_array_almost_equal(np.vstack((y_pred, y_pred)).T, Y_pred, decimal=3)
def test_center_data():
n_samples = 200
n_features = 2
rng = check_random_state(0)
X = rng.rand(n_samples, n_features)
y = rng.rand(n_samples)
expected_X_mean = np.mean(X, axis=0)
# XXX: currently scaled to variance=n_samples
expected_X_std = np.std(X, axis=0) * np.sqrt(X.shape[0])
expected_y_mean = np.mean(y, axis=0)
Xt, yt, X_mean, y_mean, X_std = center_data(X, y, fit_intercept=False,
normalize=False)
assert_array_almost_equal(X_mean, np.zeros(n_features))
assert_array_almost_equal(y_mean, 0)
assert_array_almost_equal(X_std, np.ones(n_features))
assert_array_almost_equal(Xt, X)
assert_array_almost_equal(yt, y)
Xt, yt, X_mean, y_mean, X_std = center_data(X, y, fit_intercept=True,
normalize=False)
assert_array_almost_equal(X_mean, expected_X_mean)
assert_array_almost_equal(y_mean, expected_y_mean)
assert_array_almost_equal(X_std, np.ones(n_features))
assert_array_almost_equal(Xt, X - expected_X_mean)
assert_array_almost_equal(yt, y - expected_y_mean)
Xt, yt, X_mean, y_mean, X_std = center_data(X, y, fit_intercept=True,
normalize=True)
assert_array_almost_equal(X_mean, expected_X_mean)
assert_array_almost_equal(y_mean, expected_y_mean)
assert_array_almost_equal(X_std, expected_X_std)
assert_array_almost_equal(Xt, (X - expected_X_mean) / expected_X_std)
assert_array_almost_equal(yt, y - expected_y_mean)
def test_center_data_multioutput():
n_samples = 200
n_features = 3
n_outputs = 2
rng = check_random_state(0)
X = rng.rand(n_samples, n_features)
y = rng.rand(n_samples, n_outputs)
expected_y_mean = np.mean(y, axis=0)
args = [(center_data, X), (sparse_center_data, sparse.csc_matrix(X))]
for center, X in args:
_, yt, _, y_mean, _ = center(X, y, fit_intercept=False,
normalize=False)
assert_array_almost_equal(y_mean, np.zeros(n_outputs))
assert_array_almost_equal(yt, y)
_, yt, _, y_mean, _ = center(X, y, fit_intercept=True,
normalize=False)
assert_array_almost_equal(y_mean, expected_y_mean)
assert_array_almost_equal(yt, y - y_mean)
_, yt, _, y_mean, _ = center(X, y, fit_intercept=True,
normalize=True)
assert_array_almost_equal(y_mean, expected_y_mean)
assert_array_almost_equal(yt, y - y_mean)
def test_center_data_weighted():
n_samples = 200
n_features = 2
rng = check_random_state(0)
X = rng.rand(n_samples, n_features)
y = rng.rand(n_samples)
sample_weight = rng.rand(n_samples)
expected_X_mean = np.average(X, axis=0, weights=sample_weight)
expected_y_mean = np.average(y, axis=0, weights=sample_weight)
# XXX: if normalize=True, should we expect a weighted standard deviation?
# Currently not weighted, but calculated with respect to weighted mean
# XXX: currently scaled to variance=n_samples
expected_X_std = (np.sqrt(X.shape[0]) *
np.mean((X - expected_X_mean) ** 2, axis=0) ** .5)
Xt, yt, X_mean, y_mean, X_std = center_data(X, y, fit_intercept=True,
normalize=False,
sample_weight=sample_weight)
assert_array_almost_equal(X_mean, expected_X_mean)
assert_array_almost_equal(y_mean, expected_y_mean)
assert_array_almost_equal(X_std, np.ones(n_features))
assert_array_almost_equal(Xt, X - expected_X_mean)
assert_array_almost_equal(yt, y - expected_y_mean)
Xt, yt, X_mean, y_mean, X_std = center_data(X, y, fit_intercept=True,
normalize=True,
sample_weight=sample_weight)
assert_array_almost_equal(X_mean, expected_X_mean)
assert_array_almost_equal(y_mean, expected_y_mean)
assert_array_almost_equal(X_std, expected_X_std)
assert_array_almost_equal(Xt, (X - expected_X_mean) / expected_X_std)
assert_array_almost_equal(yt, y - expected_y_mean)
def test_sparse_center_data():
n_samples = 200
n_features = 2
rng = check_random_state(0)
# random_state not supported yet in sparse.rand
X = sparse.rand(n_samples, n_features, density=.5) # , random_state=rng
X = X.tolil()
y = rng.rand(n_samples)
XA = X.toarray()
# XXX: currently scaled to variance=n_samples
expected_X_std = np.std(XA, axis=0) * np.sqrt(X.shape[0])
Xt, yt, X_mean, y_mean, X_std = sparse_center_data(X, y,
fit_intercept=False,
normalize=False)
assert_array_almost_equal(X_mean, np.zeros(n_features))
assert_array_almost_equal(y_mean, 0)
assert_array_almost_equal(X_std, np.ones(n_features))
assert_array_almost_equal(Xt.A, XA)
assert_array_almost_equal(yt, y)
Xt, yt, X_mean, y_mean, X_std = sparse_center_data(X, y,
fit_intercept=True,
normalize=False)
assert_array_almost_equal(X_mean, np.mean(XA, axis=0))
assert_array_almost_equal(y_mean, np.mean(y, axis=0))
assert_array_almost_equal(X_std, np.ones(n_features))
assert_array_almost_equal(Xt.A, XA)
assert_array_almost_equal(yt, y - np.mean(y, axis=0))
Xt, yt, X_mean, y_mean, X_std = sparse_center_data(X, y,
fit_intercept=True,
normalize=True)
assert_array_almost_equal(X_mean, np.mean(XA, axis=0))
assert_array_almost_equal(y_mean, np.mean(y, axis=0))
assert_array_almost_equal(X_std, expected_X_std)
assert_array_almost_equal(Xt.A, XA / expected_X_std)
assert_array_almost_equal(yt, y - np.mean(y, axis=0))
def test_csr_sparse_center_data():
# Test output format of sparse_center_data, when input is csr
X, y = make_regression()
X[X < 2.5] = 0.0
csr = sparse.csr_matrix(X)
csr_, y, _, _, _ = sparse_center_data(csr, y, True)
assert_equal(csr_.getformat(), 'csr')
| bsd-3-clause |
frank-tancf/scikit-learn | benchmarks/bench_sample_without_replacement.py | 397 | 8008 | """
Benchmarks for sampling without replacement of integer.
"""
from __future__ import division
from __future__ import print_function
import gc
import sys
import optparse
from datetime import datetime
import operator
import matplotlib.pyplot as plt
import numpy as np
import random
from sklearn.externals.six.moves import xrange
from sklearn.utils.random import sample_without_replacement
def compute_time(t_start, delta):
mu_second = 0.0 + 10 ** 6 # number of microseconds in a second
return delta.seconds + delta.microseconds / mu_second
def bench_sample(sampling, n_population, n_samples):
gc.collect()
# start time
t_start = datetime.now()
sampling(n_population, n_samples)
delta = (datetime.now() - t_start)
# stop time
time = compute_time(t_start, delta)
return time
if __name__ == "__main__":
###########################################################################
# Option parser
###########################################################################
op = optparse.OptionParser()
op.add_option("--n-times",
dest="n_times", default=5, type=int,
help="Benchmark results are average over n_times experiments")
op.add_option("--n-population",
dest="n_population", default=100000, type=int,
help="Size of the population to sample from.")
op.add_option("--n-step",
dest="n_steps", default=5, type=int,
help="Number of step interval between 0 and n_population.")
default_algorithms = "custom-tracking-selection,custom-auto," \
"custom-reservoir-sampling,custom-pool,"\
"python-core-sample,numpy-permutation"
op.add_option("--algorithm",
dest="selected_algorithm",
default=default_algorithms,
type=str,
help="Comma-separated list of transformer to benchmark. "
"Default: %default. \nAvailable: %default")
# op.add_option("--random-seed",
# dest="random_seed", default=13, type=int,
# help="Seed used by the random number generators.")
(opts, args) = op.parse_args()
if len(args) > 0:
op.error("this script takes no arguments.")
sys.exit(1)
selected_algorithm = opts.selected_algorithm.split(',')
for key in selected_algorithm:
if key not in default_algorithms.split(','):
raise ValueError("Unknown sampling algorithm \"%s\" not in (%s)."
% (key, default_algorithms))
###########################################################################
# List sampling algorithm
###########################################################################
# We assume that sampling algorithm has the following signature:
# sample(n_population, n_sample)
#
sampling_algorithm = {}
###########################################################################
# Set Python core input
sampling_algorithm["python-core-sample"] = \
lambda n_population, n_sample: \
random.sample(xrange(n_population), n_sample)
###########################################################################
# Set custom automatic method selection
sampling_algorithm["custom-auto"] = \
lambda n_population, n_samples, random_state=None: \
sample_without_replacement(n_population,
n_samples,
method="auto",
random_state=random_state)
###########################################################################
# Set custom tracking based method
sampling_algorithm["custom-tracking-selection"] = \
lambda n_population, n_samples, random_state=None: \
sample_without_replacement(n_population,
n_samples,
method="tracking_selection",
random_state=random_state)
###########################################################################
# Set custom reservoir based method
sampling_algorithm["custom-reservoir-sampling"] = \
lambda n_population, n_samples, random_state=None: \
sample_without_replacement(n_population,
n_samples,
method="reservoir_sampling",
random_state=random_state)
###########################################################################
# Set custom reservoir based method
sampling_algorithm["custom-pool"] = \
lambda n_population, n_samples, random_state=None: \
sample_without_replacement(n_population,
n_samples,
method="pool",
random_state=random_state)
###########################################################################
# Numpy permutation based
sampling_algorithm["numpy-permutation"] = \
lambda n_population, n_sample: \
np.random.permutation(n_population)[:n_sample]
###########################################################################
# Remove unspecified algorithm
sampling_algorithm = dict((key, value)
for key, value in sampling_algorithm.items()
if key in selected_algorithm)
###########################################################################
# Perform benchmark
###########################################################################
time = {}
n_samples = np.linspace(start=0, stop=opts.n_population,
num=opts.n_steps).astype(np.int)
ratio = n_samples / opts.n_population
print('Benchmarks')
print("===========================")
for name in sorted(sampling_algorithm):
print("Perform benchmarks for %s..." % name, end="")
time[name] = np.zeros(shape=(opts.n_steps, opts.n_times))
for step in xrange(opts.n_steps):
for it in xrange(opts.n_times):
time[name][step, it] = bench_sample(sampling_algorithm[name],
opts.n_population,
n_samples[step])
print("done")
print("Averaging results...", end="")
for name in sampling_algorithm:
time[name] = np.mean(time[name], axis=1)
print("done\n")
# Print results
###########################################################################
print("Script arguments")
print("===========================")
arguments = vars(opts)
print("%s \t | %s " % ("Arguments".ljust(16),
"Value".center(12),))
print(25 * "-" + ("|" + "-" * 14) * 1)
for key, value in arguments.items():
print("%s \t | %s " % (str(key).ljust(16),
str(value).strip().center(12)))
print("")
print("Sampling algorithm performance:")
print("===============================")
print("Results are averaged over %s repetition(s)." % opts.n_times)
print("")
fig = plt.figure('scikit-learn sample w/o replacement benchmark results')
plt.title("n_population = %s, n_times = %s" %
(opts.n_population, opts.n_times))
ax = fig.add_subplot(111)
for name in sampling_algorithm:
ax.plot(ratio, time[name], label=name)
ax.set_xlabel('ratio of n_sample / n_population')
ax.set_ylabel('Time (s)')
ax.legend()
# Sort legend labels
handles, labels = ax.get_legend_handles_labels()
hl = sorted(zip(handles, labels), key=operator.itemgetter(1))
handles2, labels2 = zip(*hl)
ax.legend(handles2, labels2, loc=0)
plt.show()
| bsd-3-clause |
SU-ECE-17-7/hotspotter | _graveyard/_broken/MainWindow.py | 2 | 17501 | from PyQt4 import QtCore, QtGui
from PyQt4.Qt import QObject, pyqtSignal, QFileDialog
from MainSkel import Ui_mainSkel
import multiprocessing
from PyQt4.Qt import QMainWindow, QTableWidgetItem, QMessageBox, \
QAbstractItemView, QWidget, Qt, pyqtSlot, pyqtSignal, \
QStandardItem, QStandardItemModel, QString, QObject
from _tpl.other.matplotlibwidget import MatplotlibWidget
# http://stackoverflow.com/questions/2312210/window-icon-of-exe-in-pyqt4
#-------------------------------------------
def gui_log(fn):
'log what happens in the GUI for debugging'
def gui_log_wrapper(hsgui, *args, **kwargs):
try:
function_name = fn.func_name
into_str = 'In hsgui.'+function_name
outo_str = 'Out hsgui.'+function_name+'\n'
hsgui.logdbgSignal.emit(into_str)
ret = fn(hsgui, *args, **kwargs)
hsgui.logdbgSignal.emit(outo_str)
return ret
except Exception as ex:
import traceback
logmsg('\n\n *!!* HotSpotter GUI Raised Exception: '+str(ex))
logmsg('\n\n *!!* HotSpotter GUI Exception Traceback: \n\n'+traceback.format_exc())
return gui_log_wrapper
class EditPrefWidget(QWidget):
'The Settings Pane; Subclass of Main Windows.'
def __init__(epw, fac):
super( EditPrefWidget, epw ).__init__()
epw.pref_skel = Ui_editPrefSkel()
epw.pref_skel.setupUi(epw)
epw.pref_model = None
epw.pref_skel.redrawBUT.clicked.connect(fac.redraw)
epw.pref_skel.defaultPrefsBUT.clicked.connect(fac.default_prefs)
epw.pref_skel.unloadFeaturesAndModelsBUT.clicked.connect(fac.unload_features_and_models)
@pyqtSlot(Pref, name='populatePrefTreeSlot')
def populatePrefTreeSlot(epw, pref_struct):
'Populates the Preference Tree Model'
logdbg('Bulding Preference Model of: '+repr(pref_struct))
epw.pref_model = pref_struct.createQPreferenceModel()
logdbg('Built: '+repr(epw.pref_model))
epw.pref_skel.prefTreeView.setModel(epw.pref_model)
epw.pref_skel.prefTreeView.header().resizeSection(0,250)
class MainWindow(QtGui.QMainWindow):
populateChipTblSignal = pyqtSignal(list, list, list, list)
def __init__(self, hs=None):
super(HotSpotterMainWindow, self).__init__()
self.hs = None
self.ui=Ui_mainSkel()
self.ui.setupUi(self)
self.show()
if hs is None:
self.connect_api(hs)
def connect_api(self, hs):
print('[win] connecting api')
self.hs = hs
hsgui.epw = EditPrefWidget(fac)
hsgui.plotWidget = MatplotlibWidget(hsgui.main_skel.centralwidget)
hsgui.plotWidget.setObjectName(_fromUtf8('plotWidget'))
hsgui.main_skel.root_hlayout.addWidget(hsgui.plotWidget)
hsgui.prev_tbl_item = None
hsgui.prev_cid = None
hsgui.prev_gid = None
hsgui.non_modal_qt_handles = []
def connectSignals(hsgui, fac):
'Connects GUI signals to Facade Actions'
logdbg('Connecting GUI >> to >> Facade')
# Base Signals
hsgui.selectCidSignal.connect(fac.selc)
hsgui.selectGidSignal.connect(fac.selg)
hsgui.renameChipIdSignal.connect(fac.rename_cid)
hsgui.changeChipPropSignal.connect(fac.change_chip_prop)
hsgui.logdbgSignal.connect(fac.logdbgSlot)
# SKEL SIGNALS
main_skel = hsgui.main_skel
# Widget
hsgui.main_skel.fignumSPIN.valueChanged.connect(
fac.set_fignum)
# File
main_skel.actionOpen_Database.triggered.connect(
fac.open_db)
main_skel.actionSave_Database.triggered.connect(
fac.save_db)
main_skel.actionImport_Images.triggered.connect(
fac.import_images)
main_skel.actionQuit.triggered.connect(
hsgui.close)
# Actions
main_skel.actionQuery.triggered.connect(
fac.query)
main_skel.actionAdd_ROI.triggered.connect(
fac.add_chip)
main_skel.actionReselect_Orientation.triggered.connect(
fac.reselect_orientation)
main_skel.actionReselect_ROI.triggered.connect(
fac.reselect_roi)
main_skel.actionRemove_Chip.triggered.connect(
fac.remove_cid)
main_skel.actionNext.triggered.connect(
fac.select_next)
# Options
main_skel.actionTogEll.triggered.connect(
fac.toggle_ellipse)
main_skel.actionTogPts.triggered.connect(
fac.toggle_points)
main_skel.actionTogPlt.triggered.connect(
hsgui.setPlotWidgetVisibleSlot)
main_skel.actionPreferences.triggered.connect(
hsgui.epw.show )
# Help
main_skel.actionView_Documentation.triggered.connect(
fac.view_documentation)
main_skel.actionHelpCMD.triggered.connect(
lambda:hsgui.msgbox('Command Line Help', cmd_help))
main_skel.actionHelpWorkflow.triggered.connect(
lambda:hsgui.msgbox('Workflow HOWTO', workflow_help))
main_skel.actionHelpTroubles.triggered.connect(
lambda:hsgui.msgbox('Troubleshooting Help', troubles_help))
main_skel.actionWriteLogs.triggered.connect(
fac.write_logs)
# Convinience
main_skel.actionOpen_Source_Directory.triggered.connect(
fac.vd)
main_skel.actionOpen_Data_Directory.triggered.connect(
fac.vdd)
main_skel.actionOpen_Internal_Directory.triggered.connect(
fac.vdi)
main_skel.actionConvertImage2Chip.triggered.connect(
fac.convert_all_images_to_chips)
main_skel.actionBatch_Change_Name.triggered.connect(
fac._quick_and_dirty_batch_rename)
main_skel.actionAdd_Metadata_Property.triggered.connect(
fac.add_new_prop)
main_skel.actionAssign_Matches_Above_Threshold.triggered.connect(
fac.match_all_above_thresh)
main_skel.actionIncrease_ROI_Size.triggered.connect(
fac.expand_rois)
# Experiments
main_skel.actionMatching_Experiment.triggered.connect(
fac.run_matching_experiment)
main_skel.actionName_Consistency_Experiment.triggered.connect(
fac.run_name_consistency_experiment)
#
# Gui Components
# Tables Widgets
main_skel.chip_TBL.itemClicked.connect(
hsgui.chipTableClickedSlot)
main_skel.chip_TBL.itemChanged.connect(
hsgui.chipTableChangedSlot)
main_skel.image_TBL.itemClicked.connect(
hsgui.imageTableClickedSlot)
main_skel.res_TBL.itemChanged.connect(
hsgui.resultTableChangedSlot)
# Tab Widget
# This signal slot setup is very bad. Needs rewrite
main_skel.tablesTabWidget.currentChanged.connect(
fac.change_view)
main_skel.chip_TBL.sortByColumn(0, Qt.AscendingOrder)
main_skel.res_TBL.sortByColumn(0, Qt.AscendingOrder)
main_skel.image_TBL.sortByColumn(0, Qt.AscendingOrder)
@pyqtSlot(name='setPlotWidgetVisible')
def setPlotWidgetVisibleSlot(hsgui, bit=None): #None = toggle
if hsgui.plotWidget != None:
logdbg('Disabling Plot Widget')
if bit is None: bit = not hsgui.plotWidget.isVisible()
was_visible = hsgui.plotWidget.setVisible(bit)
if was_visible != bit:
if bit:
hsgui.main_skel.fignumSPIN.setValue(0)
else:
hsgui.main_skel.fignumSPIN.setValue(1)
#hsgui.setFignumSignal.emit(int(1 - bit)) # plotwidget fignum = 0
# Internal GUI Functions
def populate_tbl_helper(hsgui, tbl, col_headers, col_editable, row_list, row2_data_tup ):
#tbl = main_skel.chip_TBL
hheader = tbl.horizontalHeader()
sort_col = hheader.sortIndicatorSection()
sort_ord = hheader.sortIndicatorOrder()
tbl.sortByColumn(0, Qt.AscendingOrder) # Basic Sorting
prevBlockSignals = tbl.blockSignals(True)
tbl.clear()
tbl.setColumnCount(len(col_headers))
tbl.setRowCount(len(row_list))
tbl.verticalHeader().hide()
tbl.setHorizontalHeaderLabels(col_headers)
tbl.setSelectionMode( QAbstractItemView.SingleSelection )
tbl.setSelectionBehavior( QAbstractItemView.SelectRows)
tbl.setSortingEnabled(False)
for row in iter(row_list):
data_tup = row2_data_tup[row]
for col, data in enumerate(data_tup):
item = QTableWidgetItem()
try:
int_data = int(data)
item.setData(Qt.DisplayRole, int_data)
except ValueError: # for strings
item.setText(str(data))
except TypeError: #for lists
item.setText(str(data))
item.setTextAlignment(Qt.AlignHCenter)
if col_editable[col]: item.setFlags(item.flags() | Qt.ItemIsEditable)
else: item.setFlags(item.flags() ^ Qt.ItemIsEditable)
tbl.setItem(row, col, item)
tbl.setSortingEnabled(True)
tbl.sortByColumn(sort_col,sort_ord) # Move back to old sorting
tbl.show()
tbl.blockSignals(prevBlockSignals)
@pyqtSlot(dict, name='updateDBStatsSlot')
@gui_log
def updateDBStatsSlot(hsgui, stats):
hsgui.setWindowTitle(stats['title'])
def updateSelSpinsSlot(hsgui, cid, gid):
hsgui.prev_cid = cid
hsgui.prev_gid = gid
hsgui.main_skel.sel_cid_SPIN.setValue(cid)
hsgui.main_skel.sel_gid_SPIN.setValue(gid)
def redrawGuiSlot(hsgui):
hsgui.show()
if hsgui.plotWidget != None and\
hsgui.plotWidget.isVisible():
hsgui.plotWidget.show()
hsgui.plotWidget.draw()
def updateStateLabelSlot(hsgui, state):
hsgui.main_skel.state_LBL.setText(state)
@pyqtSlot(list, list, list, list, name='populateChipTblSlot')
def populateChipTblSlot(hsgui, col_headers, col_editable, row_list, row2_data_tup):
hsgui.populate_tbl_helper(hsgui.main_skel.chip_TBL, col_headers, col_editable, row_list, row2_data_tup)
@pyqtSlot(list, list, list, list, name='populateImageTblSlot')
def populateImageTblSlot(hsgui, col_headers, col_editable, row_list, row2_data_tup):
hsgui.populate_tbl_helper(hsgui.main_skel.image_TBL, col_headers, col_editable, row_list, row2_data_tup)
@pyqtSlot(list, list, list, list, name='populateResultTblSlot')
def populateResultTblSlot(hsgui, col_headers, col_editable, row_list, row2_data_tup):
hsgui.populate_tbl_helper(hsgui.main_skel.res_TBL, col_headers, col_editable, row_list, row2_data_tup)
@gui_log
def chipTableChangedSlot(hsgui, item):
'A Chip had a data member changed '
hsgui.logdbgSignal.emit('chip table changed')
sel_row = item.row()
sel_col = item.column()
sel_cid = int(hsgui.main_skel.chip_TBL.item(sel_row,0).text())
new_val = str(item.text()).replace(',',';;')
header_lbl = str(hsgui.main_skel.chip_TBL.horizontalHeaderItem(sel_col).text())
hsgui.selectCidSignal.emit(sel_cid)
# Rename the chip!
if header_lbl == 'Chip Name':
hsgui.renameChipIdSignal.emit(new_val, sel_cid)
# Change the user property instead
else:
hsgui.changeChipPropSignal.emit(header_lbl, new_val, sel_cid)
@gui_log
def resultTableChangedSlot(hsgui, item):
'A Chip was Renamed in Result View'
hsgui.logdbgSignal.emit('result table changed')
sel_row = item.row()
sel_cid = int(hsgui.main_skel.res_TBL.item(sel_row,1).text())
new_name = str(item.text())
hsgui.renameChipIdSignal.emit(new_name, int(sel_cid))
def imageTableClickedSlot(hsgui, item):
'Select Image ID'
if item == hsgui.prev_tbl_item: return
hsgui.prev_tbl_item = item
sel_row = item.row()
sel_gid = int(hsgui.main_skel.image_TBL.item(sel_row,0).text())
hsgui.selectGidSignal.emit(sel_gid)
def chipTableClickedSlot(hsgui, item):
'Select Chip ID'
hsgui.logdbgSignal.emit('chip table clicked')
if item == hsgui.prev_tbl_item: return
hsgui.prev_tbl_item = item
sel_row = item.row()
sel_cid = int(hsgui.main_skel.chip_TBL.item(sel_row,0).text())
hsgui.selectCidSignal.emit(sel_cid)
def update_image_table(self):
uim.populateImageTblSignal.connect( uim.hsgui.populateImageTblSlot )
pass
def select_tab(uim, tabname, block_draw=False):
logdbg('Selecting the '+tabname+' Tab')
if block_draw:
prevBlock = uim.hsgui.main_skel.tablesTabWidget.blockSignals(True)
tab_index = uim.tab_order.index(tabname)
uim.selectTabSignal.emit(tab_index)
if block_draw:
uim.hsgui.main_skel.tablesTabWidget.blockSignals(prevBlock)
def get_gui_figure(uim):
'returns the matplotlib.pyplot.figure'
if uim.hsgui != None and uim.hsgui.plotWidget != None:
fig = uim.hsgui.plotWidget.figure
fig.show = lambda: uim.hsgui.plotWidget.show() #HACKY HACK HACK
return fig
return None
@func_log
def redraw_gui(uim):
if not uim.hsgui is None and uim.hsgui.isVisible():
uim.redrawGuiSignal.emit()
# --- UIManager things that deal with the GUI Through Signals
@func_log
def populate_chip_table(uim):
#tbl = uim.hsgui.main_skel.chip_TBL
cm = uim.hs.cm
col_headers = ['Chip ID', 'Chip Name', 'Name ID', 'Image ID', 'Other CIDS']
col_editable = [ False , True , False , False , False ]
# Add User Properties to headers
col_headers += cm.user_props.keys()
col_editable += [True for key in cm.user_props.keys()]
# Create Data List
cx_list = cm.get_valid_cxs()
data_list = [None]*len(cx_list)
row_list = range(len(cx_list))
for (i,cx) in enumerate(cx_list):
# Get Indexing Data
cid = cm.cx2_cid[cx]
gid = cm.cx2_gid(cx)
nid = cm.cx2_nid(cx)
# Get Useful Data
name = cm.cx2_name(cx)
other_cxs_ = setdiff1d(cm.cx2_other_cxs([cx])[0], cx)
other_cids = cm.cx2_cid[other_cxs_]
# Get User Data
cm.user_props.keys()
user_data = [cm.user_props[key][cx] for key in
cm.user_props.iterkeys()]
# Pack data to sent to Qt
data_list[i] = (cid, name, nid, gid, other_cids)+tuple(user_data)
#(cid, name, nid, gid, other_cids, *user_data)
uim.populateChipTblSignal.emit(col_headers, col_editable, row_list, data_list)
@func_log
def populate_image_table(uim):
col_headers = ['Image ID', 'Image Name', 'Chip IDs', 'Chip Names']
col_editable = [ False , False , False , False ]
# Populate table with valid image indexes
cm, gm = uim.hs.get_managers('cm','gm')
gx_list = gm.get_valid_gxs()
data_list = [None]*len(gx_list)
row_list = range(len(gx_list))
for (i,gx) in enumerate(gx_list):
gid = gm.gx2_gid[gx]
gname = gm.gx2_gname[gx]
cid_list = gm.gx2_cids(gx)
name_list = str([cm.cid2_(cid, 'name') for cid in cid_list])
data_list[i] = (gid, gname, cid_list, name_list)
uim.populateImageTblSignal.emit(col_headers, col_editable, row_list, data_list)
@func_log
def populate_result_table(uim):
col_headers = ['Rank', 'Chip ID', 'Chip Name', 'score']
col_editable = [False , False , True , False ]
# Check to see if results exist
res = uim.sel_res
if res is None:
logdbg('Not populating. selected results are None.')
return None
logmsg(res)
gm, cm, am = uim.hs.get_managers('gm','cm','am')
dynargs =\
('cid', 'name' )
(qcid , qname ) = res.qcid2_(*dynargs)
(tcid , tname , tscore ) = res.tcid2_(*dynargs+('score',))
num_results = len(tcid)
data_list = [None]*(num_results+1)
row_list = range(num_results+1)
data_list[0] = [0, qcid, qname, 'Queried Chip']
for (ix, (cid, name, score)) in enumerate(zip(tcid, tname, tscore)):
rank = ix+1
data_list[ix+1] = (rank, cid, name, score)
uim.populateResultTblSignal.emit(col_headers, col_editable, row_list, data_list)
def populate_algo_settings(uim):
logdbg('Populating the Preference Tree... Sending Signal')
uim.populatePrefTreeSignal.emit(uim.hs.all_pref)
def set_fignum(uim, fignum):
if uim.hsgui != None:
prevBlockSignals = uim.hsgui.main_skel.fignumSPIN.blockSignals(True)
uim.setfignumSignal.emit(fignum)
uim.hsgui.main_skel.fignumSPIN.blockSignals(prevBlockSignals)
if __name__ == '__main__':
import sys
multiprocessing.freeze_support()
def test():
app = QtGui.QApplication(sys.argv)
main_win = HotSpotterMainWindow()
app.setActiveWindow(main_win)
sys.exit(app.exec_())
test()
| apache-2.0 |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.