repo_name
stringlengths 6
112
| path
stringlengths 4
204
| copies
stringlengths 1
3
| size
stringlengths 4
6
| content
stringlengths 714
810k
| license
stringclasses 15
values |
---|---|---|---|---|---|
CassioAmador/profile_tcabr | visualization_tools/test_phase.py | 1 | 2768 | """
Test phase and group delay for both bands.
TODO: Show overlap with different colors
"""
import matplotlib.pyplot as plt
from matplotlib.widgets import Slider, Button
import numpy as np
import scipy
import sys
sys.path.insert(0, './../src/')
import proc_profile_bottollier as ppb
if len(sys.argv) < 1:
shot_number = int(open('shot_number.txt', 'r').read())
else:
if (len(sys.argv) == 1) & ("py" in sys.argv[0]):
shot_number = int(open('shot_number.txt', 'r').read())
else:
shot_number = int(sys.argv[1])
shot = ppb.ProcProfile(shot_number)
sweeps_average = 8
shot.reference_gd(all_shot=1, sw_clustersize=sweeps_average)
shot.eval_freq_overlap()
shot.plasma_gd(5000, sweeps_average, 1)
shot.eval_gd_overlap()
shot.init_gd()
shot.eval_phase()
fig = plt.figure()
fig.subplots_adjust(bottom=0.25)
ax1 = fig.add_subplot(111)
lns1 = ax1.plot(shot.freqs, shot.gd, marker='.',
linestyle='-', color='k', label="gd")
lns3 = ax1.plot(shot.freqs_over, shot.gd_over, marker='o',
linestyle='-', color='r', label="gd_overlap")
lns4 = ax1.plot(shot.freq_ini, shot.ini_t, marker='o',
linestyle='-', color='g', label="gd_init")
plt.xlabel("probing freq (GHz)")
plt.ylabel("group delay (ns)")
plt.title("# %s - time: %s ms" % (shot.shot, shot.sweep2time(shot.sweep_cur)))
ax1.set_ylim(0, 2)
ax2 = ax1.twinx()
lns2 = ax2.plot(shot.freqs, shot.phi, marker='.',
linestyle='-', color='b', label="phase")
plt.ylabel("phase (rad)")
plt.xlim(0, 40)
# point_over=np.where(shot.freqs==shot.freqs_over[0])[0][0]
#lns4= ax1.axvline(shot.freqs[point_over],color='r',label="band overlap")
# lns3=ax1.axhline(shot.gd[point_over],color='r')
# added these three lines
lns = lns1 + lns2 + lns3+lns4
labs = [l.get_label() for l in lns]
ax1.legend(lns, labs, loc=2)
axcolor = 'lightgoldenrodyellow'
axfreq = plt.axes([0.13, 0.1, 0.77, 0.03], axisbg=axcolor)
sweep = Slider(axfreq, 'Sweep', 1, len(shot.points) - 1 -
sweeps_average, valinit=5000, valfmt='%1.f')
def update(val):
shot.plasma_gd(int(sweep.val), sweeps_average, all_shot=1)
shot.eval_gd_overlap()
shot.init_gd()
shot.eval_phase()
lns1[0].set_ydata(shot.gd)
lns2[0].set_ydata(shot.phi)
lns3[0].set_ydata(shot.gd_over)
lns4[0].set_ydata(shot.ini_t)
# lns3.set_ydata(shot.gd[point_over])
ax2.set_ylim(min(shot.phi), max(shot.phi))
ax1.set_title("# %s - time: %.3f ms" %
(shot.shot, shot.sweep2time(shot.sweep_cur)))
fig.canvas.draw_idle()
sweep.on_changed(update)
resetax = plt.axes([0.8, 0.025, 0.1, 0.04])
button = Button(resetax, 'Reset', color=axcolor, hovercolor='0.975')
def reset(event):
sweep.reset()
button.on_clicked(reset)
# plt.show()
| mit |
zihua/scikit-learn | sklearn/externals/joblib/__init__.py | 10 | 5100 | """ Joblib is a set of tools to provide **lightweight pipelining in
Python**. In particular, joblib offers:
1. transparent disk-caching of the output values and lazy re-evaluation
(memoize pattern)
2. easy simple parallel computing
3. logging and tracing of the execution
Joblib is optimized to be **fast** and **robust** in particular on large
data and has specific optimizations for `numpy` arrays. It is
**BSD-licensed**.
============================== ============================================
**User documentation**: http://pythonhosted.org/joblib
**Download packages**: http://pypi.python.org/pypi/joblib#downloads
**Source code**: http://github.com/joblib/joblib
**Report issues**: http://github.com/joblib/joblib/issues
============================== ============================================
Vision
--------
The vision is to provide tools to easily achieve better performance and
reproducibility when working with long running jobs.
* **Avoid computing twice the same thing**: code is rerun over an
over, for instance when prototyping computational-heavy jobs (as in
scientific development), but hand-crafted solution to alleviate this
issue is error-prone and often leads to unreproducible results
* **Persist to disk transparently**: persisting in an efficient way
arbitrary objects containing large data is hard. Using
joblib's caching mechanism avoids hand-written persistence and
implicitly links the file on disk to the execution context of
the original Python object. As a result, joblib's persistence is
good for resuming an application status or computational job, eg
after a crash.
Joblib strives to address these problems while **leaving your code and
your flow control as unmodified as possible** (no framework, no new
paradigms).
Main features
------------------
1) **Transparent and fast disk-caching of output value:** a memoize or
make-like functionality for Python functions that works well for
arbitrary Python objects, including very large numpy arrays. Separate
persistence and flow-execution logic from domain logic or algorithmic
code by writing the operations as a set of steps with well-defined
inputs and outputs: Python functions. Joblib can save their
computation to disk and rerun it only if necessary::
>>> from sklearn.externals.joblib import Memory
>>> mem = Memory(cachedir='/tmp/joblib')
>>> import numpy as np
>>> a = np.vander(np.arange(3)).astype(np.float)
>>> square = mem.cache(np.square)
>>> b = square(a) # doctest: +ELLIPSIS
________________________________________________________________________________
[Memory] Calling square...
square(array([[ 0., 0., 1.],
[ 1., 1., 1.],
[ 4., 2., 1.]]))
___________________________________________________________square - 0...s, 0.0min
>>> c = square(a)
>>> # The above call did not trigger an evaluation
2) **Embarrassingly parallel helper:** to make it easy to write readable
parallel code and debug it quickly::
>>> from sklearn.externals.joblib import Parallel, delayed
>>> from math import sqrt
>>> Parallel(n_jobs=1)(delayed(sqrt)(i**2) for i in range(10))
[0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0]
3) **Logging/tracing:** The different functionalities will
progressively acquire better logging mechanism to help track what
has been ran, and capture I/O easily. In addition, Joblib will
provide a few I/O primitives, to easily define logging and
display streams, and provide a way of compiling a report.
We want to be able to quickly inspect what has been run.
4) **Fast compressed Persistence**: a replacement for pickle to work
efficiently on Python objects containing large data (
*joblib.dump* & *joblib.load* ).
..
>>> import shutil ; shutil.rmtree('/tmp/joblib/')
"""
# PEP0440 compatible formatted version, see:
# https://www.python.org/dev/peps/pep-0440/
#
# Generic release markers:
# X.Y
# X.Y.Z # For bugfix releases
#
# Admissible pre-release markers:
# X.YaN # Alpha release
# X.YbN # Beta release
# X.YrcN # Release Candidate
# X.Y # Final release
#
# Dev branch marker is: 'X.Y.dev' or 'X.Y.devN' where N is an integer.
# 'X.Y.dev0' is the canonical version of 'X.Y.dev'
#
__version__ = '0.10.2'
from .memory import Memory, MemorizedResult
from .logger import PrintTime
from .logger import Logger
from .hashing import hash
from .numpy_pickle import dump
from .numpy_pickle import load
from .parallel import Parallel
from .parallel import delayed
from .parallel import cpu_count
from .parallel import register_parallel_backend
from .parallel import parallel_backend
from .parallel import effective_n_jobs
__all__ = ['Memory', 'MemorizedResult', 'PrintTime', 'Logger', 'hash', 'dump',
'load', 'Parallel', 'delayed', 'cpu_count', 'effective_n_jobs',
'register_parallel_backend', 'parallel_backend']
| bsd-3-clause |
ngoix/OCRF | sklearn/ensemble/tests/test_gradient_boosting.py | 43 | 39945 | """
Testing for the gradient boosting module (sklearn.ensemble.gradient_boosting).
"""
import warnings
import numpy as np
from itertools import product
from scipy.sparse import csr_matrix
from scipy.sparse import csc_matrix
from scipy.sparse import coo_matrix
from sklearn import datasets
from sklearn.base import clone
from sklearn.ensemble import GradientBoostingClassifier
from sklearn.ensemble import GradientBoostingRegressor
from sklearn.ensemble.gradient_boosting import ZeroEstimator
from sklearn.metrics import mean_squared_error
from sklearn.utils import check_random_state, tosequence
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_greater
from sklearn.utils.testing import assert_less
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_warns
from sklearn.utils.testing import skip_if_32bit
from sklearn.exceptions import DataConversionWarning
from sklearn.exceptions import NotFittedError
# toy sample
X = [[-2, -1], [-1, -1], [-1, -2], [1, 1], [1, 2], [2, 1]]
y = [-1, -1, -1, 1, 1, 1]
T = [[-1, -1], [2, 2], [3, 2]]
true_result = [-1, 1, 1]
rng = np.random.RandomState(0)
# also load the boston dataset
# and randomly permute it
boston = datasets.load_boston()
perm = rng.permutation(boston.target.size)
boston.data = boston.data[perm]
boston.target = boston.target[perm]
# also load the iris dataset
# and randomly permute it
iris = datasets.load_iris()
perm = rng.permutation(iris.target.size)
iris.data = iris.data[perm]
iris.target = iris.target[perm]
def check_classification_toy(presort, loss):
# Check classification on a toy dataset.
clf = GradientBoostingClassifier(loss=loss, n_estimators=10,
random_state=1, presort=presort)
assert_raises(ValueError, clf.predict, T)
clf.fit(X, y)
assert_array_equal(clf.predict(T), true_result)
assert_equal(10, len(clf.estimators_))
deviance_decrease = (clf.train_score_[:-1] - clf.train_score_[1:])
assert_true(np.any(deviance_decrease >= 0.0))
leaves = clf.apply(X)
assert_equal(leaves.shape, (6, 10, 1))
def test_classification_toy():
for presort, loss in product(('auto', True, False),
('deviance', 'exponential')):
yield check_classification_toy, presort, loss
def test_parameter_checks():
# Check input parameter validation.
assert_raises(ValueError,
GradientBoostingClassifier(n_estimators=0).fit, X, y)
assert_raises(ValueError,
GradientBoostingClassifier(n_estimators=-1).fit, X, y)
assert_raises(ValueError,
GradientBoostingClassifier(learning_rate=0.0).fit, X, y)
assert_raises(ValueError,
GradientBoostingClassifier(learning_rate=-1.0).fit, X, y)
assert_raises(ValueError,
GradientBoostingClassifier(loss='foobar').fit, X, y)
assert_raises(ValueError,
GradientBoostingClassifier(min_samples_split=0.0).fit, X, y)
assert_raises(ValueError,
GradientBoostingClassifier(min_samples_split=-1.0).fit, X, y)
assert_raises(ValueError,
GradientBoostingClassifier(min_samples_split=1.1).fit, X, y)
assert_raises(ValueError,
GradientBoostingClassifier(min_samples_leaf=0).fit, X, y)
assert_raises(ValueError,
GradientBoostingClassifier(min_samples_leaf=-1.0).fit, X, y)
assert_raises(ValueError,
GradientBoostingClassifier(min_weight_fraction_leaf=-1.).fit,
X, y)
assert_raises(ValueError,
GradientBoostingClassifier(min_weight_fraction_leaf=0.6).fit,
X, y)
assert_raises(ValueError,
GradientBoostingClassifier(subsample=0.0).fit, X, y)
assert_raises(ValueError,
GradientBoostingClassifier(subsample=1.1).fit, X, y)
assert_raises(ValueError,
GradientBoostingClassifier(subsample=-0.1).fit, X, y)
assert_raises(ValueError,
GradientBoostingClassifier(max_depth=-0.1).fit, X, y)
assert_raises(ValueError,
GradientBoostingClassifier(max_depth=0).fit, X, y)
assert_raises(ValueError,
GradientBoostingClassifier(init={}).fit, X, y)
# test fit before feature importance
assert_raises(ValueError,
lambda: GradientBoostingClassifier().feature_importances_)
# deviance requires ``n_classes >= 2``.
assert_raises(ValueError,
lambda X, y: GradientBoostingClassifier(
loss='deviance').fit(X, y),
X, [0, 0, 0, 0])
def test_loss_function():
assert_raises(ValueError,
GradientBoostingClassifier(loss='ls').fit, X, y)
assert_raises(ValueError,
GradientBoostingClassifier(loss='lad').fit, X, y)
assert_raises(ValueError,
GradientBoostingClassifier(loss='quantile').fit, X, y)
assert_raises(ValueError,
GradientBoostingClassifier(loss='huber').fit, X, y)
assert_raises(ValueError,
GradientBoostingRegressor(loss='deviance').fit, X, y)
assert_raises(ValueError,
GradientBoostingRegressor(loss='exponential').fit, X, y)
def check_classification_synthetic(presort, loss):
# Test GradientBoostingClassifier on synthetic dataset used by
# Hastie et al. in ESLII Example 12.7.
X, y = datasets.make_hastie_10_2(n_samples=12000, random_state=1)
X_train, X_test = X[:2000], X[2000:]
y_train, y_test = y[:2000], y[2000:]
gbrt = GradientBoostingClassifier(n_estimators=100, min_samples_split=2,
max_depth=1, loss=loss,
learning_rate=1.0, random_state=0)
gbrt.fit(X_train, y_train)
error_rate = (1.0 - gbrt.score(X_test, y_test))
assert_less(error_rate, 0.09)
gbrt = GradientBoostingClassifier(n_estimators=200, min_samples_split=2,
max_depth=1, loss=loss,
learning_rate=1.0, subsample=0.5,
random_state=0,
presort=presort)
gbrt.fit(X_train, y_train)
error_rate = (1.0 - gbrt.score(X_test, y_test))
assert_less(error_rate, 0.08)
def test_classification_synthetic():
for presort, loss in product(('auto', True, False), ('deviance', 'exponential')):
yield check_classification_synthetic, presort, loss
def check_boston(presort, loss, subsample):
# Check consistency on dataset boston house prices with least squares
# and least absolute deviation.
ones = np.ones(len(boston.target))
last_y_pred = None
for sample_weight in None, ones, 2 * ones:
clf = GradientBoostingRegressor(n_estimators=100,
loss=loss,
max_depth=4,
subsample=subsample,
min_samples_split=2,
random_state=1,
presort=presort)
assert_raises(ValueError, clf.predict, boston.data)
clf.fit(boston.data, boston.target,
sample_weight=sample_weight)
leaves = clf.apply(boston.data)
assert_equal(leaves.shape, (506, 100))
y_pred = clf.predict(boston.data)
mse = mean_squared_error(boston.target, y_pred)
assert_less(mse, 6.0)
if last_y_pred is not None:
assert_array_almost_equal(last_y_pred, y_pred)
last_y_pred = y_pred
def test_boston():
for presort, loss, subsample in product(('auto', True, False),
('ls', 'lad', 'huber'),
(1.0, 0.5)):
yield check_boston, presort, loss, subsample
def check_iris(presort, subsample, sample_weight):
# Check consistency on dataset iris.
clf = GradientBoostingClassifier(n_estimators=100,
loss='deviance',
random_state=1,
subsample=subsample,
presort=presort)
clf.fit(iris.data, iris.target, sample_weight=sample_weight)
score = clf.score(iris.data, iris.target)
assert_greater(score, 0.9)
leaves = clf.apply(iris.data)
assert_equal(leaves.shape, (150, 100, 3))
def test_iris():
ones = np.ones(len(iris.target))
for presort, subsample, sample_weight in product(('auto', True, False),
(1.0, 0.5),
(None, ones)):
yield check_iris, presort, subsample, sample_weight
def test_regression_synthetic():
# Test on synthetic regression datasets used in Leo Breiman,
# `Bagging Predictors?. Machine Learning 24(2): 123-140 (1996).
random_state = check_random_state(1)
regression_params = {'n_estimators': 100, 'max_depth': 4,
'min_samples_split': 2, 'learning_rate': 0.1,
'loss': 'ls'}
# Friedman1
X, y = datasets.make_friedman1(n_samples=1200,
random_state=random_state,
noise=1.0)
X_train, y_train = X[:200], y[:200]
X_test, y_test = X[200:], y[200:]
for presort in True, False:
clf = GradientBoostingRegressor(presort=presort)
clf.fit(X_train, y_train)
mse = mean_squared_error(y_test, clf.predict(X_test))
assert_less(mse, 5.0)
# Friedman2
X, y = datasets.make_friedman2(n_samples=1200, random_state=random_state)
X_train, y_train = X[:200], y[:200]
X_test, y_test = X[200:], y[200:]
for presort in True, False:
regression_params['presort'] = presort
clf = GradientBoostingRegressor(**regression_params)
clf.fit(X_train, y_train)
mse = mean_squared_error(y_test, clf.predict(X_test))
assert_less(mse, 1700.0)
# Friedman3
X, y = datasets.make_friedman3(n_samples=1200, random_state=random_state)
X_train, y_train = X[:200], y[:200]
X_test, y_test = X[200:], y[200:]
for presort in True, False:
regression_params['presort'] = presort
clf = GradientBoostingRegressor(**regression_params)
clf.fit(X_train, y_train)
mse = mean_squared_error(y_test, clf.predict(X_test))
assert_less(mse, 0.015)
def test_feature_importances():
X = np.array(boston.data, dtype=np.float32)
y = np.array(boston.target, dtype=np.float32)
for presort in True, False:
clf = GradientBoostingRegressor(n_estimators=100, max_depth=5,
min_samples_split=2, random_state=1,
presort=presort)
clf.fit(X, y)
assert_true(hasattr(clf, 'feature_importances_'))
# XXX: Remove this test in 0.19 after transform support to estimators
# is removed.
X_new = assert_warns(
DeprecationWarning, clf.transform, X, threshold="mean")
assert_less(X_new.shape[1], X.shape[1])
feature_mask = (
clf.feature_importances_ > clf.feature_importances_.mean())
assert_array_almost_equal(X_new, X[:, feature_mask])
def test_probability_log():
# Predict probabilities.
clf = GradientBoostingClassifier(n_estimators=100, random_state=1)
assert_raises(ValueError, clf.predict_proba, T)
clf.fit(X, y)
assert_array_equal(clf.predict(T), true_result)
# check if probabilities are in [0, 1].
y_proba = clf.predict_proba(T)
assert_true(np.all(y_proba >= 0.0))
assert_true(np.all(y_proba <= 1.0))
# derive predictions from probabilities
y_pred = clf.classes_.take(y_proba.argmax(axis=1), axis=0)
assert_array_equal(y_pred, true_result)
def test_check_inputs():
# Test input checks (shape and type of X and y).
clf = GradientBoostingClassifier(n_estimators=100, random_state=1)
assert_raises(ValueError, clf.fit, X, y + [0, 1])
clf = GradientBoostingClassifier(n_estimators=100, random_state=1)
assert_raises(ValueError, clf.fit, X, y,
sample_weight=([1] * len(y)) + [0, 1])
def test_check_inputs_predict():
# X has wrong shape
clf = GradientBoostingClassifier(n_estimators=100, random_state=1)
clf.fit(X, y)
x = np.array([1.0, 2.0])[:, np.newaxis]
assert_raises(ValueError, clf.predict, x)
x = np.array([[]])
assert_raises(ValueError, clf.predict, x)
x = np.array([1.0, 2.0, 3.0])[:, np.newaxis]
assert_raises(ValueError, clf.predict, x)
clf = GradientBoostingRegressor(n_estimators=100, random_state=1)
clf.fit(X, rng.rand(len(X)))
x = np.array([1.0, 2.0])[:, np.newaxis]
assert_raises(ValueError, clf.predict, x)
x = np.array([[]])
assert_raises(ValueError, clf.predict, x)
x = np.array([1.0, 2.0, 3.0])[:, np.newaxis]
assert_raises(ValueError, clf.predict, x)
def test_check_max_features():
# test if max_features is valid.
clf = GradientBoostingRegressor(n_estimators=100, random_state=1,
max_features=0)
assert_raises(ValueError, clf.fit, X, y)
clf = GradientBoostingRegressor(n_estimators=100, random_state=1,
max_features=(len(X[0]) + 1))
assert_raises(ValueError, clf.fit, X, y)
clf = GradientBoostingRegressor(n_estimators=100, random_state=1,
max_features=-0.1)
assert_raises(ValueError, clf.fit, X, y)
def test_max_feature_regression():
# Test to make sure random state is set properly.
X, y = datasets.make_hastie_10_2(n_samples=12000, random_state=1)
X_train, X_test = X[:2000], X[2000:]
y_train, y_test = y[:2000], y[2000:]
gbrt = GradientBoostingClassifier(n_estimators=100, min_samples_split=5,
max_depth=2, learning_rate=.1,
max_features=2, random_state=1)
gbrt.fit(X_train, y_train)
deviance = gbrt.loss_(y_test, gbrt.decision_function(X_test))
assert_true(deviance < 0.5, "GB failed with deviance %.4f" % deviance)
def test_max_feature_auto():
# Test if max features is set properly for floats and str.
X, y = datasets.make_hastie_10_2(n_samples=12000, random_state=1)
_, n_features = X.shape
X_train = X[:2000]
y_train = y[:2000]
gbrt = GradientBoostingClassifier(n_estimators=1, max_features='auto')
gbrt.fit(X_train, y_train)
assert_equal(gbrt.max_features_, int(np.sqrt(n_features)))
gbrt = GradientBoostingRegressor(n_estimators=1, max_features='auto')
gbrt.fit(X_train, y_train)
assert_equal(gbrt.max_features_, n_features)
gbrt = GradientBoostingRegressor(n_estimators=1, max_features=0.3)
gbrt.fit(X_train, y_train)
assert_equal(gbrt.max_features_, int(n_features * 0.3))
gbrt = GradientBoostingRegressor(n_estimators=1, max_features='sqrt')
gbrt.fit(X_train, y_train)
assert_equal(gbrt.max_features_, int(np.sqrt(n_features)))
gbrt = GradientBoostingRegressor(n_estimators=1, max_features='log2')
gbrt.fit(X_train, y_train)
assert_equal(gbrt.max_features_, int(np.log2(n_features)))
gbrt = GradientBoostingRegressor(n_estimators=1,
max_features=0.01 / X.shape[1])
gbrt.fit(X_train, y_train)
assert_equal(gbrt.max_features_, 1)
def test_staged_predict():
# Test whether staged decision function eventually gives
# the same prediction.
X, y = datasets.make_friedman1(n_samples=1200,
random_state=1, noise=1.0)
X_train, y_train = X[:200], y[:200]
X_test = X[200:]
clf = GradientBoostingRegressor()
# test raise ValueError if not fitted
assert_raises(ValueError, lambda X: np.fromiter(
clf.staged_predict(X), dtype=np.float64), X_test)
clf.fit(X_train, y_train)
y_pred = clf.predict(X_test)
# test if prediction for last stage equals ``predict``
for y in clf.staged_predict(X_test):
assert_equal(y.shape, y_pred.shape)
assert_array_equal(y_pred, y)
def test_staged_predict_proba():
# Test whether staged predict proba eventually gives
# the same prediction.
X, y = datasets.make_hastie_10_2(n_samples=1200,
random_state=1)
X_train, y_train = X[:200], y[:200]
X_test, y_test = X[200:], y[200:]
clf = GradientBoostingClassifier(n_estimators=20)
# test raise NotFittedError if not fitted
assert_raises(NotFittedError, lambda X: np.fromiter(
clf.staged_predict_proba(X), dtype=np.float64), X_test)
clf.fit(X_train, y_train)
# test if prediction for last stage equals ``predict``
for y_pred in clf.staged_predict(X_test):
assert_equal(y_test.shape, y_pred.shape)
assert_array_equal(clf.predict(X_test), y_pred)
# test if prediction for last stage equals ``predict_proba``
for staged_proba in clf.staged_predict_proba(X_test):
assert_equal(y_test.shape[0], staged_proba.shape[0])
assert_equal(2, staged_proba.shape[1])
assert_array_equal(clf.predict_proba(X_test), staged_proba)
def test_staged_functions_defensive():
# test that staged_functions make defensive copies
rng = np.random.RandomState(0)
X = rng.uniform(size=(10, 3))
y = (4 * X[:, 0]).astype(np.int) + 1 # don't predict zeros
for estimator in [GradientBoostingRegressor(),
GradientBoostingClassifier()]:
estimator.fit(X, y)
for func in ['predict', 'decision_function', 'predict_proba']:
staged_func = getattr(estimator, "staged_" + func, None)
if staged_func is None:
# regressor has no staged_predict_proba
continue
with warnings.catch_warnings(record=True):
staged_result = list(staged_func(X))
staged_result[1][:] = 0
assert_true(np.all(staged_result[0] != 0))
def test_serialization():
# Check model serialization.
clf = GradientBoostingClassifier(n_estimators=100, random_state=1)
clf.fit(X, y)
assert_array_equal(clf.predict(T), true_result)
assert_equal(100, len(clf.estimators_))
try:
import cPickle as pickle
except ImportError:
import pickle
serialized_clf = pickle.dumps(clf, protocol=pickle.HIGHEST_PROTOCOL)
clf = None
clf = pickle.loads(serialized_clf)
assert_array_equal(clf.predict(T), true_result)
assert_equal(100, len(clf.estimators_))
def test_degenerate_targets():
# Check if we can fit even though all targets are equal.
clf = GradientBoostingClassifier(n_estimators=100, random_state=1)
# classifier should raise exception
assert_raises(ValueError, clf.fit, X, np.ones(len(X)))
clf = GradientBoostingRegressor(n_estimators=100, random_state=1)
clf.fit(X, np.ones(len(X)))
clf.predict([rng.rand(2)])
assert_array_equal(np.ones((1,), dtype=np.float64),
clf.predict([rng.rand(2)]))
def test_quantile_loss():
# Check if quantile loss with alpha=0.5 equals lad.
clf_quantile = GradientBoostingRegressor(n_estimators=100, loss='quantile',
max_depth=4, alpha=0.5,
random_state=7)
clf_quantile.fit(boston.data, boston.target)
y_quantile = clf_quantile.predict(boston.data)
clf_lad = GradientBoostingRegressor(n_estimators=100, loss='lad',
max_depth=4, random_state=7)
clf_lad.fit(boston.data, boston.target)
y_lad = clf_lad.predict(boston.data)
assert_array_almost_equal(y_quantile, y_lad, decimal=4)
def test_symbol_labels():
# Test with non-integer class labels.
clf = GradientBoostingClassifier(n_estimators=100, random_state=1)
symbol_y = tosequence(map(str, y))
clf.fit(X, symbol_y)
assert_array_equal(clf.predict(T), tosequence(map(str, true_result)))
assert_equal(100, len(clf.estimators_))
def test_float_class_labels():
# Test with float class labels.
clf = GradientBoostingClassifier(n_estimators=100, random_state=1)
float_y = np.asarray(y, dtype=np.float32)
clf.fit(X, float_y)
assert_array_equal(clf.predict(T),
np.asarray(true_result, dtype=np.float32))
assert_equal(100, len(clf.estimators_))
def test_shape_y():
# Test with float class labels.
clf = GradientBoostingClassifier(n_estimators=100, random_state=1)
y_ = np.asarray(y, dtype=np.int32)
y_ = y_[:, np.newaxis]
# This will raise a DataConversionWarning that we want to
# "always" raise, elsewhere the warnings gets ignored in the
# later tests, and the tests that check for this warning fail
assert_warns(DataConversionWarning, clf.fit, X, y_)
assert_array_equal(clf.predict(T), true_result)
assert_equal(100, len(clf.estimators_))
def test_mem_layout():
# Test with different memory layouts of X and y
X_ = np.asfortranarray(X)
clf = GradientBoostingClassifier(n_estimators=100, random_state=1)
clf.fit(X_, y)
assert_array_equal(clf.predict(T), true_result)
assert_equal(100, len(clf.estimators_))
X_ = np.ascontiguousarray(X)
clf = GradientBoostingClassifier(n_estimators=100, random_state=1)
clf.fit(X_, y)
assert_array_equal(clf.predict(T), true_result)
assert_equal(100, len(clf.estimators_))
y_ = np.asarray(y, dtype=np.int32)
y_ = np.ascontiguousarray(y_)
clf = GradientBoostingClassifier(n_estimators=100, random_state=1)
clf.fit(X, y_)
assert_array_equal(clf.predict(T), true_result)
assert_equal(100, len(clf.estimators_))
y_ = np.asarray(y, dtype=np.int32)
y_ = np.asfortranarray(y_)
clf = GradientBoostingClassifier(n_estimators=100, random_state=1)
clf.fit(X, y_)
assert_array_equal(clf.predict(T), true_result)
assert_equal(100, len(clf.estimators_))
def test_oob_improvement():
# Test if oob improvement has correct shape and regression test.
clf = GradientBoostingClassifier(n_estimators=100, random_state=1,
subsample=0.5)
clf.fit(X, y)
assert_equal(clf.oob_improvement_.shape[0], 100)
# hard-coded regression test - change if modification in OOB computation
assert_array_almost_equal(clf.oob_improvement_[:5],
np.array([0.19, 0.15, 0.12, -0.12, -0.11]),
decimal=2)
def test_oob_improvement_raise():
# Test if oob improvement has correct shape.
clf = GradientBoostingClassifier(n_estimators=100, random_state=1,
subsample=1.0)
clf.fit(X, y)
assert_raises(AttributeError, lambda: clf.oob_improvement_)
def test_oob_multilcass_iris():
# Check OOB improvement on multi-class dataset.
clf = GradientBoostingClassifier(n_estimators=100, loss='deviance',
random_state=1, subsample=0.5)
clf.fit(iris.data, iris.target)
score = clf.score(iris.data, iris.target)
assert_greater(score, 0.9)
assert_equal(clf.oob_improvement_.shape[0], clf.n_estimators)
# hard-coded regression test - change if modification in OOB computation
# FIXME: the following snippet does not yield the same results on 32 bits
# assert_array_almost_equal(clf.oob_improvement_[:5],
# np.array([12.68, 10.45, 8.18, 6.43, 5.13]),
# decimal=2)
def test_verbose_output():
# Check verbose=1 does not cause error.
from sklearn.externals.six.moves import cStringIO as StringIO
import sys
old_stdout = sys.stdout
sys.stdout = StringIO()
clf = GradientBoostingClassifier(n_estimators=100, random_state=1,
verbose=1, subsample=0.8)
clf.fit(X, y)
verbose_output = sys.stdout
sys.stdout = old_stdout
# check output
verbose_output.seek(0)
header = verbose_output.readline().rstrip()
# with OOB
true_header = ' '.join(['%10s'] + ['%16s'] * 3) % (
'Iter', 'Train Loss', 'OOB Improve', 'Remaining Time')
assert_equal(true_header, header)
n_lines = sum(1 for l in verbose_output.readlines())
# one for 1-10 and then 9 for 20-100
assert_equal(10 + 9, n_lines)
def test_more_verbose_output():
# Check verbose=2 does not cause error.
from sklearn.externals.six.moves import cStringIO as StringIO
import sys
old_stdout = sys.stdout
sys.stdout = StringIO()
clf = GradientBoostingClassifier(n_estimators=100, random_state=1,
verbose=2)
clf.fit(X, y)
verbose_output = sys.stdout
sys.stdout = old_stdout
# check output
verbose_output.seek(0)
header = verbose_output.readline().rstrip()
# no OOB
true_header = ' '.join(['%10s'] + ['%16s'] * 2) % (
'Iter', 'Train Loss', 'Remaining Time')
assert_equal(true_header, header)
n_lines = sum(1 for l in verbose_output.readlines())
# 100 lines for n_estimators==100
assert_equal(100, n_lines)
def test_warm_start():
# Test if warm start equals fit.
X, y = datasets.make_hastie_10_2(n_samples=100, random_state=1)
for Cls in [GradientBoostingRegressor, GradientBoostingClassifier]:
est = Cls(n_estimators=200, max_depth=1)
est.fit(X, y)
est_ws = Cls(n_estimators=100, max_depth=1, warm_start=True)
est_ws.fit(X, y)
est_ws.set_params(n_estimators=200)
est_ws.fit(X, y)
assert_array_almost_equal(est_ws.predict(X), est.predict(X))
def test_warm_start_n_estimators():
# Test if warm start equals fit - set n_estimators.
X, y = datasets.make_hastie_10_2(n_samples=100, random_state=1)
for Cls in [GradientBoostingRegressor, GradientBoostingClassifier]:
est = Cls(n_estimators=300, max_depth=1)
est.fit(X, y)
est_ws = Cls(n_estimators=100, max_depth=1, warm_start=True)
est_ws.fit(X, y)
est_ws.set_params(n_estimators=300)
est_ws.fit(X, y)
assert_array_almost_equal(est_ws.predict(X), est.predict(X))
def test_warm_start_max_depth():
# Test if possible to fit trees of different depth in ensemble.
X, y = datasets.make_hastie_10_2(n_samples=100, random_state=1)
for Cls in [GradientBoostingRegressor, GradientBoostingClassifier]:
est = Cls(n_estimators=100, max_depth=1, warm_start=True)
est.fit(X, y)
est.set_params(n_estimators=110, max_depth=2)
est.fit(X, y)
# last 10 trees have different depth
assert_equal(est.estimators_[0, 0].max_depth, 1)
for i in range(1, 11):
assert_equal(est.estimators_[-i, 0].max_depth, 2)
def test_warm_start_clear():
# Test if fit clears state.
X, y = datasets.make_hastie_10_2(n_samples=100, random_state=1)
for Cls in [GradientBoostingRegressor, GradientBoostingClassifier]:
est = Cls(n_estimators=100, max_depth=1)
est.fit(X, y)
est_2 = Cls(n_estimators=100, max_depth=1, warm_start=True)
est_2.fit(X, y) # inits state
est_2.set_params(warm_start=False)
est_2.fit(X, y) # clears old state and equals est
assert_array_almost_equal(est_2.predict(X), est.predict(X))
def test_warm_start_zero_n_estimators():
# Test if warm start with zero n_estimators raises error
X, y = datasets.make_hastie_10_2(n_samples=100, random_state=1)
for Cls in [GradientBoostingRegressor, GradientBoostingClassifier]:
est = Cls(n_estimators=100, max_depth=1, warm_start=True)
est.fit(X, y)
est.set_params(n_estimators=0)
assert_raises(ValueError, est.fit, X, y)
def test_warm_start_smaller_n_estimators():
# Test if warm start with smaller n_estimators raises error
X, y = datasets.make_hastie_10_2(n_samples=100, random_state=1)
for Cls in [GradientBoostingRegressor, GradientBoostingClassifier]:
est = Cls(n_estimators=100, max_depth=1, warm_start=True)
est.fit(X, y)
est.set_params(n_estimators=99)
assert_raises(ValueError, est.fit, X, y)
def test_warm_start_equal_n_estimators():
# Test if warm start with equal n_estimators does nothing
X, y = datasets.make_hastie_10_2(n_samples=100, random_state=1)
for Cls in [GradientBoostingRegressor, GradientBoostingClassifier]:
est = Cls(n_estimators=100, max_depth=1)
est.fit(X, y)
est2 = clone(est)
est2.set_params(n_estimators=est.n_estimators, warm_start=True)
est2.fit(X, y)
assert_array_almost_equal(est2.predict(X), est.predict(X))
def test_warm_start_oob_switch():
# Test if oob can be turned on during warm start.
X, y = datasets.make_hastie_10_2(n_samples=100, random_state=1)
for Cls in [GradientBoostingRegressor, GradientBoostingClassifier]:
est = Cls(n_estimators=100, max_depth=1, warm_start=True)
est.fit(X, y)
est.set_params(n_estimators=110, subsample=0.5)
est.fit(X, y)
assert_array_equal(est.oob_improvement_[:100], np.zeros(100))
# the last 10 are not zeros
assert_array_equal(est.oob_improvement_[-10:] == 0.0,
np.zeros(10, dtype=np.bool))
def test_warm_start_oob():
# Test if warm start OOB equals fit.
X, y = datasets.make_hastie_10_2(n_samples=100, random_state=1)
for Cls in [GradientBoostingRegressor, GradientBoostingClassifier]:
est = Cls(n_estimators=200, max_depth=1, subsample=0.5,
random_state=1)
est.fit(X, y)
est_ws = Cls(n_estimators=100, max_depth=1, subsample=0.5,
random_state=1, warm_start=True)
est_ws.fit(X, y)
est_ws.set_params(n_estimators=200)
est_ws.fit(X, y)
assert_array_almost_equal(est_ws.oob_improvement_[:100],
est.oob_improvement_[:100])
def early_stopping_monitor(i, est, locals):
"""Returns True on the 10th iteration. """
if i == 9:
return True
else:
return False
def test_monitor_early_stopping():
# Test if monitor return value works.
X, y = datasets.make_hastie_10_2(n_samples=100, random_state=1)
for Cls in [GradientBoostingRegressor, GradientBoostingClassifier]:
est = Cls(n_estimators=20, max_depth=1, random_state=1, subsample=0.5)
est.fit(X, y, monitor=early_stopping_monitor)
assert_equal(est.n_estimators, 20) # this is not altered
assert_equal(est.estimators_.shape[0], 10)
assert_equal(est.train_score_.shape[0], 10)
assert_equal(est.oob_improvement_.shape[0], 10)
# try refit
est.set_params(n_estimators=30)
est.fit(X, y)
assert_equal(est.n_estimators, 30)
assert_equal(est.estimators_.shape[0], 30)
assert_equal(est.train_score_.shape[0], 30)
est = Cls(n_estimators=20, max_depth=1, random_state=1, subsample=0.5,
warm_start=True)
est.fit(X, y, monitor=early_stopping_monitor)
assert_equal(est.n_estimators, 20)
assert_equal(est.estimators_.shape[0], 10)
assert_equal(est.train_score_.shape[0], 10)
assert_equal(est.oob_improvement_.shape[0], 10)
# try refit
est.set_params(n_estimators=30, warm_start=False)
est.fit(X, y)
assert_equal(est.n_estimators, 30)
assert_equal(est.train_score_.shape[0], 30)
assert_equal(est.estimators_.shape[0], 30)
assert_equal(est.oob_improvement_.shape[0], 30)
def test_complete_classification():
# Test greedy trees with max_depth + 1 leafs.
from sklearn.tree._tree import TREE_LEAF
X, y = datasets.make_hastie_10_2(n_samples=100, random_state=1)
k = 4
est = GradientBoostingClassifier(n_estimators=20, max_depth=None,
random_state=1, max_leaf_nodes=k + 1)
est.fit(X, y)
tree = est.estimators_[0, 0].tree_
assert_equal(tree.max_depth, k)
assert_equal(tree.children_left[tree.children_left == TREE_LEAF].shape[0],
k + 1)
def test_complete_regression():
# Test greedy trees with max_depth + 1 leafs.
from sklearn.tree._tree import TREE_LEAF
k = 4
est = GradientBoostingRegressor(n_estimators=20, max_depth=None,
random_state=1, max_leaf_nodes=k + 1)
est.fit(boston.data, boston.target)
tree = est.estimators_[-1, 0].tree_
assert_equal(tree.children_left[tree.children_left == TREE_LEAF].shape[0],
k + 1)
def test_zero_estimator_reg():
# Test if ZeroEstimator works for regression.
est = GradientBoostingRegressor(n_estimators=20, max_depth=1,
random_state=1, init=ZeroEstimator())
est.fit(boston.data, boston.target)
y_pred = est.predict(boston.data)
mse = mean_squared_error(boston.target, y_pred)
assert_almost_equal(mse, 33.0, decimal=0)
est = GradientBoostingRegressor(n_estimators=20, max_depth=1,
random_state=1, init='zero')
est.fit(boston.data, boston.target)
y_pred = est.predict(boston.data)
mse = mean_squared_error(boston.target, y_pred)
assert_almost_equal(mse, 33.0, decimal=0)
est = GradientBoostingRegressor(n_estimators=20, max_depth=1,
random_state=1, init='foobar')
assert_raises(ValueError, est.fit, boston.data, boston.target)
def test_zero_estimator_clf():
# Test if ZeroEstimator works for classification.
X = iris.data
y = np.array(iris.target)
est = GradientBoostingClassifier(n_estimators=20, max_depth=1,
random_state=1, init=ZeroEstimator())
est.fit(X, y)
assert_greater(est.score(X, y), 0.96)
est = GradientBoostingClassifier(n_estimators=20, max_depth=1,
random_state=1, init='zero')
est.fit(X, y)
assert_greater(est.score(X, y), 0.96)
# binary clf
mask = y != 0
y[mask] = 1
y[~mask] = 0
est = GradientBoostingClassifier(n_estimators=20, max_depth=1,
random_state=1, init='zero')
est.fit(X, y)
assert_greater(est.score(X, y), 0.96)
est = GradientBoostingClassifier(n_estimators=20, max_depth=1,
random_state=1, init='foobar')
assert_raises(ValueError, est.fit, X, y)
def test_max_leaf_nodes_max_depth():
# Test precedence of max_leaf_nodes over max_depth.
X, y = datasets.make_hastie_10_2(n_samples=100, random_state=1)
all_estimators = [GradientBoostingRegressor,
GradientBoostingClassifier]
k = 4
for GBEstimator in all_estimators:
est = GBEstimator(max_depth=1, max_leaf_nodes=k).fit(X, y)
tree = est.estimators_[0, 0].tree_
assert_greater(tree.max_depth, 1)
est = GBEstimator(max_depth=1).fit(X, y)
tree = est.estimators_[0, 0].tree_
assert_equal(tree.max_depth, 1)
def test_warm_start_wo_nestimators_change():
# Test if warm_start does nothing if n_estimators is not changed.
# Regression test for #3513.
clf = GradientBoostingClassifier(n_estimators=10, warm_start=True)
clf.fit([[0, 1], [2, 3]], [0, 1])
assert_equal(clf.estimators_.shape[0], 10)
clf.fit([[0, 1], [2, 3]], [0, 1])
assert_equal(clf.estimators_.shape[0], 10)
def test_probability_exponential():
# Predict probabilities.
clf = GradientBoostingClassifier(loss='exponential',
n_estimators=100, random_state=1)
assert_raises(ValueError, clf.predict_proba, T)
clf.fit(X, y)
assert_array_equal(clf.predict(T), true_result)
# check if probabilities are in [0, 1].
y_proba = clf.predict_proba(T)
assert_true(np.all(y_proba >= 0.0))
assert_true(np.all(y_proba <= 1.0))
score = clf.decision_function(T).ravel()
assert_array_almost_equal(y_proba[:, 1],
1.0 / (1.0 + np.exp(-2 * score)))
# derive predictions from probabilities
y_pred = clf.classes_.take(y_proba.argmax(axis=1), axis=0)
assert_array_equal(y_pred, true_result)
def test_non_uniform_weights_toy_edge_case_reg():
X = [[1, 0],
[1, 0],
[1, 0],
[0, 1]]
y = [0, 0, 1, 0]
# ignore the first 2 training samples by setting their weight to 0
sample_weight = [0, 0, 1, 1]
for loss in ('huber', 'ls', 'lad', 'quantile'):
gb = GradientBoostingRegressor(learning_rate=1.0, n_estimators=2,
loss=loss)
gb.fit(X, y, sample_weight=sample_weight)
assert_greater(gb.predict([[1, 0]])[0], 0.5)
def test_non_uniform_weights_toy_edge_case_clf():
X = [[1, 0],
[1, 0],
[1, 0],
[0, 1]]
y = [0, 0, 1, 0]
# ignore the first 2 training samples by setting their weight to 0
sample_weight = [0, 0, 1, 1]
for loss in ('deviance', 'exponential'):
gb = GradientBoostingClassifier(n_estimators=5)
gb.fit(X, y, sample_weight=sample_weight)
assert_array_equal(gb.predict([[1, 0]]), [1])
def check_sparse_input(EstimatorClass, X, X_sparse, y):
dense = EstimatorClass(n_estimators=10, random_state=0,
max_depth=2).fit(X, y)
sparse = EstimatorClass(n_estimators=10, random_state=0, max_depth=2,
presort=False).fit(X_sparse, y)
auto = EstimatorClass(n_estimators=10, random_state=0, max_depth=2,
presort='auto').fit(X_sparse, y)
assert_array_almost_equal(sparse.apply(X), dense.apply(X))
assert_array_almost_equal(sparse.predict(X), dense.predict(X))
assert_array_almost_equal(sparse.feature_importances_,
dense.feature_importances_)
assert_array_almost_equal(sparse.apply(X), auto.apply(X))
assert_array_almost_equal(sparse.predict(X), auto.predict(X))
assert_array_almost_equal(sparse.feature_importances_,
auto.feature_importances_)
if isinstance(EstimatorClass, GradientBoostingClassifier):
assert_array_almost_equal(sparse.predict_proba(X),
dense.predict_proba(X))
assert_array_almost_equal(sparse.predict_log_proba(X),
dense.predict_log_proba(X))
assert_array_almost_equal(sparse.predict_proba(X),
auto.predict_proba(X))
assert_array_almost_equal(sparse.predict_log_proba(X),
auto.predict_log_proba(X))
@skip_if_32bit
def test_sparse_input():
ests = (GradientBoostingClassifier, GradientBoostingRegressor)
sparse_matrices = (csr_matrix, csc_matrix, coo_matrix)
y, X = datasets.make_multilabel_classification(random_state=0,
n_samples=50,
n_features=1,
n_classes=20)
y = y[:, 0]
for EstimatorClass, sparse_matrix in product(ests, sparse_matrices):
yield check_sparse_input, EstimatorClass, X, sparse_matrix(X), y
| bsd-3-clause |
HighEnergyDataScientests/bnpcompetition | feature_analysis/correlation.py | 1 | 1843 | # -----------------------------------------------------------------------------
# Name: correlation
# Purpose: Calculate correlations and covariance
#
#
# -----------------------------------------------------------------------------
"""
Calculate correlations and covariance
"""
import pandas as pd
import numpy as np
import xgboost as xgb
import operator
from sklearn import preprocessing
from sklearn.cross_validation import train_test_split
import matplotlib
matplotlib.use("Agg") # Needed to save figures
import matplotlib.pyplot as plt
# --- Import data ---
print("## Loading Data")
train = pd.read_csv('../inputs/train.csv')
# --- Process data ---
print("## Data Processing")
# Define parameters
output_col_name = "target"
id_col_name = "ID"
train = train.drop(id_col_name, axis=1)
# --- Calculate matrices and save to csv ---
print("## Calculating matrices")
print(" - Pearson correlation matrix")
correlation_p = train.corr() # Pearson method
correlation_p.to_csv('stats/correlation_matrix_pearson.csv')
# print(" - Kendall Tau correlation matrix")
# correlation_k = train.corr(method='kendall') # Kendall Tau
# correlation_k.to_csv('stats/correlation_matrix_kendall.csv')
print(" - Spearman correlation matrix")
correlation_s = train.corr(method='spearman') # Spearman
correlation_s.to_csv('stats/correlation_matrix_spearman.csv')
covariance = train.cov()
covariance.to_csv('stats/covariance_matrix.csv')
# --- Plot matrices ---
print("## Plotting")
plt.matshow(correlation_p)
plt.savefig('stats/correlation_matrix_pearson.png')
plt.clf()
# plt.matshow(correlation_k)
# plt.savefig('stats/correlation_matrix_kendall.png')
# plt.clf()
plt.matshow(correlation_s)
plt.savefig('stats/correlation_matrix_spearman.png')
plt.clf()
plt.matshow(covariance)
plt.savefig('stats/covariance_matrix.png')
plt.clf() | apache-2.0 |
keras-team/autokeras | docs/py/structured_data_regression.py | 1 | 7361 | """shell
pip install autokeras
"""
import numpy as np
import pandas as pd
import tensorflow as tf
from sklearn.datasets import fetch_california_housing
import autokeras as ak
"""
## A Simple Example
The first step is to prepare your data. Here we use the [California housing
dataset](
https://scikit-learn.org/stable/datasets/index.html#california-housing-dataset)
as an example.
"""
house_dataset = fetch_california_housing()
df = pd.DataFrame(
np.concatenate(
(house_dataset.data, house_dataset.target.reshape(-1, 1)), axis=1
),
columns=house_dataset.feature_names + ["Price"],
)
train_size = int(df.shape[0] * 0.9)
df[:train_size].to_csv("train.csv", index=False)
df[train_size:].to_csv("eval.csv", index=False)
train_file_path = "train.csv"
test_file_path = "eval.csv"
"""
The second step is to run the
[StructuredDataRegressor](/structured_data_regressor).
As a quick demo, we set epochs to 10.
You can also leave the epochs unspecified for an adaptive number of epochs.
"""
# Initialize the structured data regressor.
reg = ak.StructuredDataRegressor(
overwrite=True, max_trials=3
) # It tries 3 different models.
# Feed the structured data regressor with training data.
reg.fit(
# The path to the train.csv file.
train_file_path,
# The name of the label column.
"Price",
epochs=10,
)
# Predict with the best model.
predicted_y = reg.predict(test_file_path)
# Evaluate the best model with testing data.
print(reg.evaluate(test_file_path, "Price"))
"""
## Data Format
The AutoKeras StructuredDataRegressor is quite flexible for the data format.
The example above shows how to use the CSV files directly. Besides CSV files,
it also supports numpy.ndarray, pandas.DataFrame or [tf.data.Dataset](
https://www.tensorflow.org/api_docs/python/tf/data/Dataset?version=stable). The
data should be two-dimensional with numerical or categorical values.
For the regression targets, it should be a vector of numerical values.
AutoKeras accepts numpy.ndarray, pandas.DataFrame, or pandas.Series.
The following examples show how the data can be prepared with numpy.ndarray,
pandas.DataFrame, and tensorflow.data.Dataset.
"""
# x_train as pandas.DataFrame, y_train as pandas.Series
x_train = pd.read_csv(train_file_path)
print(type(x_train)) # pandas.DataFrame
y_train = x_train.pop("Price")
print(type(y_train)) # pandas.Series
# You can also use pandas.DataFrame for y_train.
y_train = pd.DataFrame(y_train)
print(type(y_train)) # pandas.DataFrame
# You can also use numpy.ndarray for x_train and y_train.
x_train = x_train.to_numpy()
y_train = y_train.to_numpy()
print(type(x_train)) # numpy.ndarray
print(type(y_train)) # numpy.ndarray
# Preparing testing data.
x_test = pd.read_csv(test_file_path)
y_test = x_test.pop("Price")
# It tries 10 different models.
reg = ak.StructuredDataRegressor(max_trials=3, overwrite=True)
# Feed the structured data regressor with training data.
reg.fit(x_train, y_train, epochs=10)
# Predict with the best model.
predicted_y = reg.predict(x_test)
# Evaluate the best model with testing data.
print(reg.evaluate(x_test, y_test))
"""
The following code shows how to convert numpy.ndarray to tf.data.Dataset.
"""
train_set = tf.data.Dataset.from_tensor_slices((x_train, y_train))
test_set = tf.data.Dataset.from_tensor_slices((x_test, y_test))
reg = ak.StructuredDataRegressor(max_trials=3, overwrite=True)
# Feed the tensorflow Dataset to the regressor.
reg.fit(train_set, epochs=10)
# Predict with the best model.
predicted_y = reg.predict(test_set)
# Evaluate the best model with testing data.
print(reg.evaluate(test_set))
"""
You can also specify the column names and types for the data as follows. The
`column_names` is optional if the training data already have the column names,
e.g. pandas.DataFrame, CSV file. Any column, whose type is not specified will
be inferred from the training data.
"""
# Initialize the structured data regressor.
reg = ak.StructuredDataRegressor(
column_names=[
"MedInc",
"HouseAge",
"AveRooms",
"AveBedrms",
"Population",
"AveOccup",
"Latitude",
"Longitude",
],
column_types={"MedInc": "numerical", "Latitude": "numerical"},
max_trials=10, # It tries 10 different models.
overwrite=True,
)
"""
## Validation Data
By default, AutoKeras use the last 20% of training data as validation data. As
shown in the example below, you can use `validation_split` to specify the
percentage.
"""
reg.fit(
x_train,
y_train,
# Split the training data and use the last 15% as validation data.
validation_split=0.15,
epochs=10,
)
"""
You can also use your own validation set
instead of splitting it from the training data with `validation_data`.
"""
split = 500
x_val = x_train[split:]
y_val = y_train[split:]
x_train = x_train[:split]
y_train = y_train[:split]
reg.fit(
x_train,
y_train,
# Use your own validation set.
validation_data=(x_val, y_val),
epochs=10,
)
"""
## Customized Search Space
For advanced users, you may customize your search space by using
[AutoModel](/auto_model/#automodel-class) instead of
[StructuredDataRegressor](/structured_data_regressor). You can configure the
[StructuredDataBlock](/block/#structureddatablock-class) for some high-level
configurations, e.g., `categorical_encoding` for whether to use the
[CategoricalToNumerical](/block/#categoricaltonumerical-class). You can also do
not specify these arguments, which would leave the different choices to be
tuned automatically. See the following example for detail.
"""
input_node = ak.StructuredDataInput()
output_node = ak.StructuredDataBlock(categorical_encoding=True)(input_node)
output_node = ak.RegressionHead()(output_node)
reg = ak.AutoModel(
inputs=input_node, outputs=output_node, overwrite=True, max_trials=3
)
reg.fit(x_train, y_train, epochs=10)
"""
The usage of [AutoModel](/auto_model/#automodel-class) is similar to the
[functional API](https://www.tensorflow.org/guide/keras/functional) of Keras.
Basically, you are building a graph, whose edges are blocks and the nodes are
intermediate outputs of blocks. To add an edge from `input_node` to
`output_node` with `output_node = ak.[some_block]([block_args])(input_node)`.
You can even also use more fine grained blocks to customize the search space
even further. See the following example.
"""
input_node = ak.StructuredDataInput()
output_node = ak.CategoricalToNumerical()(input_node)
output_node = ak.DenseBlock()(output_node)
output_node = ak.RegressionHead()(output_node)
reg = ak.AutoModel(
inputs=input_node, outputs=output_node, max_trials=3, overwrite=True
)
reg.fit(x_train, y_train, epochs=10)
"""
You can also export the best model found by AutoKeras as a Keras Model.
"""
model = reg.export_model()
model.summary()
# numpy array in object (mixed type) is not supported.
# you need convert it to unicode or float first.
model.predict(x_train)
"""
## Reference
[StructuredDataRegressor](/structured_data_regressor),
[AutoModel](/auto_model/#automodel-class),
[StructuredDataBlock](/block/#structureddatablock-class),
[DenseBlock](/block/#denseblock-class),
[StructuredDataInput](/node/#structureddatainput-class),
[RegressionHead](/block/#regressionhead-class),
[CategoricalToNumerical](/block/#categoricaltonumerical-class).
"""
| apache-2.0 |
dicortazar/xen-code-review-analysis | data-analysis/xen_analysis_ps_datetime.py | 1 | 5096 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright (C) 2015-2016 Bitergia
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
#
# Authors:
# Daniel Izquierdo <[email protected]>
# Santiago Dueñas <[email protected]>
#
"""
Xen Code Review Kibana-dashboard builder
This script aims at building a Kibana dashboard focused on the
several aspects of interest for the Xen community.
This builds the following panels:
* Time-focused panel at the level of patch series
* People/Organizations panel at the level of patches/comments/reviews
* Backlog/commits panel at the level of patch series
The time focused panel plays with the following data:
* Filters:
* Patches per patch serie
* Loops per patch serie
* Evolutionary charts:
* Time to merge
* Time to commit
* Time to re-work a patch serie
* Cycle time
* Time to first review
"""
from __future__ import absolute_import
import argparse
import pandas
from mappings import PS_TIMEFOCUSED_MAPPING
from queries import (QUERY_PATCH_SERIE,
QUERY_TIME2MERGE,
QUERY_TIME2COMMIT)
from utils import (create_mysql_connection,
create_elasticsearch_connection,
execute_mysql_query,
read_config_file,
to_dict)
XEN_INDEX = 'xen-patchseries-timefocused'
DF_PATCH_COLUMNS = ["patchserie_id", "message_id", "subject", "sender",
"sender_domain", "sent_date", "num_patches",
"num_versions", "num_comments", "num_commenters"]
DF_TIME2MERGE_COLUMNS = ["patchserie_id", "time2merge", "sent_date", "mergetime"]
DF_TIME2COMMIT_COLUMNS = ["patchserie_id", "time2commit",
"lastcommentdate", "committime"]
def main():
parser = argparse.ArgumentParser()
parser.add_argument('-c', '--config', dest='config_file',
default='./settings')
parser.add_argument('-i', '--index', dest='xen_index',
default=XEN_INDEX)
args = parser.parse_args()
config = read_config_file(args.config_file)
cursor = connect_to_mysql(**config['mysql'])
dfs = load_dataframes(cursor)
data = calculate(dfs)
es_conn = connect_to_elasticsearch(**config['elasticsearch'])
write_to_elasticsearch(es_conn, data, args.xen_index)
def load_dataframes(cursor):
data_patchserie = list(execute_mysql_query(cursor, QUERY_PATCH_SERIE))
data_time2merge = list(execute_mysql_query(cursor, QUERY_TIME2MERGE))
data_time2commit = list(execute_mysql_query(cursor, QUERY_TIME2COMMIT))
dfs = {
'patchseries' : pandas.DataFrame(data_patchserie, columns=DF_PATCH_COLUMNS),
'time2merge' : pandas.DataFrame(data_time2merge, columns=DF_TIME2MERGE_COLUMNS),
'time2commit' : pandas.DataFrame(data_time2commit, columns=DF_TIME2COMMIT_COLUMNS)
}
return dfs
def calculate(dfs):
patchseries = dfs['patchseries']
time2merge = dfs['time2merge']
time2commit = dfs['time2commit']
patchseries_df = pandas.merge(patchseries, time2merge,
on='patchserie_id', how='left')
patchseries_df = pandas.merge(patchseries_df, time2commit,
on='patchserie_id', how='left')
patchseries_df = patchseries_df.fillna(-1)
patchseries_df['time2commit'] = (patchseries_df['time2commit'] / 3600.0) / 24.0
patchseries_df['time2merge'] = (patchseries_df['time2merge'] / 3600.0) / 24.0
return patchseries_df
def connect_to_mysql(**params):
user = params['user']
password = params['password']
host = params['host']
db = params['code_review_db']
cursor = create_mysql_connection(user, password, host, db)
return cursor
def connect_to_elasticsearch(**params):
user = params['user']
password = params['password']
url = params['url']
conn = create_elasticsearch_connection(url, user, password)
return conn
def write_to_elasticsearch(conn, data, xen_index):
conn.indices.create(index=xen_index, body=PS_TIMEFOCUSED_MAPPING,
ignore=400)
columns = data.columns.values.tolist()
for row in data.itertuples():
uniq_id = row[0]
doc = to_dict(row, columns)
_ = conn.index(index=xen_index,
doc_type='patchserie',
id=uniq_id, body=doc)
if __name__ == '__main__':
main()
| artistic-2.0 |
shareactorIO/pipeline | source.ml/jupyterhub.ml/notebooks/zz_old/TensorFlow/SkFlow_DEPRECATED/text_classification_character_cnn.py | 6 | 3495 | # Copyright 2015-present Scikit Flow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
This is an example of using convolutional networks over characters
for DBpedia dataset to predict class from description of an entity.
This model is similar to one described in this paper:
"Character-level Convolutional Networks for Text Classification"
http://arxiv.org/abs/1509.01626
and is somewhat alternative to the Lua code from here:
https://github.com/zhangxiangxiao/Crepe
"""
import numpy as np
from sklearn import metrics
import pandas
import tensorflow as tf
import skflow
### Training data
# Download dbpedia_csv.tar.gz from
# https://drive.google.com/folderview?id=0Bz8a_Dbh9Qhbfll6bVpmNUtUcFdjYmF2SEpmZUZUcVNiMUw1TWN6RDV3a0JHT3kxLVhVR2M
# Unpack: tar -xvf dbpedia_csv.tar.gz
train = pandas.read_csv('dbpedia_csv/train.csv', header=None)
X_train, y_train = train[2], train[0]
test = pandas.read_csv('dbpedia_csv/test.csv', header=None)
X_test, y_test = test[2], test[0]
### Process vocabulary
MAX_DOCUMENT_LENGTH = 100
char_processor = skflow.preprocessing.ByteProcessor(MAX_DOCUMENT_LENGTH)
X_train = np.array(list(char_processor.fit_transform(X_train)))
X_test = np.array(list(char_processor.transform(X_test)))
### Models
N_FILTERS = 10
FILTER_SHAPE1 = [20, 256]
FILTER_SHAPE2 = [20, N_FILTERS]
POOLING_WINDOW = 4
POOLING_STRIDE = 2
def char_cnn_model(X, y):
"""Character level convolutional neural network model to predict classes."""
byte_list = tf.reshape(skflow.ops.one_hot_matrix(X, 256),
[-1, MAX_DOCUMENT_LENGTH, 256, 1])
with tf.variable_scope('CNN_Layer1'):
# Apply Convolution filtering on input sequence.
conv1 = skflow.ops.conv2d(byte_list, N_FILTERS, FILTER_SHAPE1, padding='VALID')
# Add a RELU for non linearity.
conv1 = tf.nn.relu(conv1)
# Max pooling across output of Convlution+Relu.
pool1 = tf.nn.max_pool(conv1, ksize=[1, POOLING_WINDOW, 1, 1],
strides=[1, POOLING_STRIDE, 1, 1], padding='SAME')
# Transpose matrix so that n_filters from convolution becomes width.
pool1 = tf.transpose(pool1, [0, 1, 3, 2])
with tf.variable_scope('CNN_Layer2'):
# Second level of convolution filtering.
conv2 = skflow.ops.conv2d(pool1, N_FILTERS, FILTER_SHAPE2,
padding='VALID')
# Max across each filter to get useful features for classification.
pool2 = tf.squeeze(tf.reduce_max(conv2, 1), squeeze_dims=[1])
# Apply regular WX + B and classification.
return skflow.models.logistic_regression(pool2, y)
classifier = skflow.TensorFlowEstimator(model_fn=char_cnn_model, n_classes=15,
steps=100, optimizer='Adam', learning_rate=0.01, continue_training=True)
# Continuously train for 1000 steps & predict on test set.
while True:
classifier.fit(X_train, y_train)
score = metrics.accuracy_score(y_test, classifier.predict(X_test))
print("Accuracy: %f" % score)
| apache-2.0 |
pauldeng/nilmtk | nilmtk/metrics.py | 5 | 13373 | '''Metrics to compare disaggregation performance against ground truth
data.
All metrics functions have the same interface. Each function takes
`predictions` and `ground_truth` parameters. Both of which are
nilmtk.MeterGroup objects. Each function returns one of two types:
either a pd.Series or a single float. Most functions return a
pd.Series where each index element is a meter instance int or a tuple
of ints for MeterGroups.
Notation
--------
Below is the notation used to mathematically define each metric.
:math:`T` - number of time slices.
:math:`t` - a time slice.
:math:`N` - number of appliances.
:math:`n` - an appliance.
:math:`y^{(n)}_t` - ground truth power of appliance :math:`n` in time slice :math:`t`.
:math:`\\hat{y}^{(n)}_t` - estimated power of appliance :math:`n` in time slice :math:`t`.
:math:`x^{(n)}_t` - ground truth state of appliance :math:`n` in time slice :math:`t`.
:math:`\\hat{x}^{(n)}_t` - estimated state of appliance :math:`n` in time slice :math:`t`.
Functions
---------
'''
from __future__ import print_function, division
import numpy as np
import pandas as pd
import math
from .metergroup import MeterGroup, iterate_through_submeters_of_two_metergroups
from .electric import align_two_meters
def error_in_assigned_energy(predictions, ground_truth):
"""Compute error in assigned energy.
.. math::
error^{(n)} =
\\left | \\sum_t y^{(n)}_t - \\sum_t \\hat{y}^{(n)}_t \\right |
Parameters
----------
predictions, ground_truth : nilmtk.MeterGroup
Returns
-------
errors : pd.Series
Each index is an meter instance int (or tuple for MeterGroups).
Each value is the absolute error in assigned energy for that appliance,
in kWh.
"""
errors = {}
both_sets_of_meters = iterate_through_submeters_of_two_metergroups(
predictions, ground_truth)
for pred_meter, ground_truth_meter in both_sets_of_meters:
sections = pred_meter.good_sections()
ground_truth_energy = ground_truth_meter.total_energy(sections=sections)
predicted_energy = pred_meter.total_energy(sections=sections)
errors[pred_meter.instance()] = np.abs(ground_truth_energy - predicted_energy)
return pd.Series(errors)
def fraction_energy_assigned_correctly(predictions, ground_truth):
'''Compute fraction of energy assigned correctly
.. math::
fraction =
\\sum_n min \\left (
\\frac{\\sum_n y}{\\sum_{n,t} y},
\\frac{\\sum_n \\hat{y}}{\\sum_{n,t} \\hat{y}}
\\right )
Ignores distinction between different AC types, instead if there are
multiple AC types for each meter then we just take the max value across
the AC types.
Parameters
----------
predictions, ground_truth : nilmtk.MeterGroup
Returns
-------
fraction : float in the range [0,1]
Fraction of Energy Correctly Assigned.
'''
predictions_submeters = MeterGroup(meters=predictions.submeters().meters)
ground_truth_submeters = MeterGroup(meters=ground_truth.submeters().meters)
fraction_per_meter_predictions = predictions_submeters.fraction_per_meter()
fraction_per_meter_ground_truth = ground_truth_submeters.fraction_per_meter()
fraction_per_meter_ground_truth.index = fraction_per_meter_ground_truth.index.map(lambda meter: meter.instance)
fraction_per_meter_predictions.index = fraction_per_meter_predictions.index.map(lambda meter: meter.instance)
fraction = 0
for meter_instance in predictions_submeters.instance():
fraction += min(fraction_per_meter_ground_truth[meter_instance],
fraction_per_meter_predictions[meter_instance])
return fraction
def mean_normalized_error_power(predictions, ground_truth):
'''Compute mean normalized error in assigned power
.. math::
error^{(n)} =
\\frac
{ \\sum_t {\\left | y_t^{(n)} - \\hat{y}_t^{(n)} \\right |} }
{ \\sum_t y_t^{(n)} }
Parameters
----------
predictions, ground_truth : nilmtk.MeterGroup
Returns
-------
mne : pd.Series
Each index is an meter instance int (or tuple for MeterGroups).
Each value is the MNE for that appliance.
'''
mne = {}
both_sets_of_meters = iterate_through_submeters_of_two_metergroups(
predictions, ground_truth)
for pred_meter, ground_truth_meter in both_sets_of_meters:
total_abs_diff = 0.0
sum_of_ground_truth_power = 0.0
for aligned_meters_chunk in align_two_meters(pred_meter,
ground_truth_meter):
diff = aligned_meters_chunk.icol(0) - aligned_meters_chunk.icol(1)
total_abs_diff += sum(abs(diff.dropna()))
sum_of_ground_truth_power += aligned_meters_chunk.icol(1).sum()
mne[pred_meter.instance()] = total_abs_diff / sum_of_ground_truth_power
return pd.Series(mne)
def rms_error_power(predictions, ground_truth):
'''Compute RMS error in assigned power
.. math::
error^{(n)} = \\sqrt{ \\frac{1}{T} \\sum_t{ \\left ( y_t - \\hat{y}_t \\right )^2 } }
Parameters
----------
predictions, ground_truth : nilmtk.MeterGroup
Returns
-------
error : pd.Series
Each index is an meter instance int (or tuple for MeterGroups).
Each value is the RMS error in predicted power for that appliance.
'''
error = {}
both_sets_of_meters = iterate_through_submeters_of_two_metergroups(
predictions, ground_truth)
for pred_meter, ground_truth_meter in both_sets_of_meters:
sum_of_squared_diff = 0.0
n_samples = 0
for aligned_meters_chunk in align_two_meters(pred_meter,
ground_truth_meter):
diff = aligned_meters_chunk.icol(0) - aligned_meters_chunk.icol(1)
diff.dropna(inplace=True)
sum_of_squared_diff += (diff ** 2).sum()
n_samples += len(diff)
error[pred_meter.instance()] = math.sqrt(sum_of_squared_diff / n_samples)
return pd.Series(error)
def f1_score(predictions, ground_truth):
'''Compute F1 scores.
.. math::
F_{score}^{(n)} = \\frac
{2 * Precision * Recall}
{Precision + Recall}
Parameters
----------
predictions, ground_truth : nilmtk.MeterGroup
Returns
-------
f1_scores : pd.Series
Each index is an meter instance int (or tuple for MeterGroups).
Each value is the F1 score for that appliance. If there are multiple
chunks then the value is the weighted mean of the F1 score for
each chunk.
'''
# If we import sklearn at top of file then sphinx breaks.
from sklearn.metrics import f1_score as sklearn_f1_score
# sklearn produces lots of DepreciationWarnings with PyTables
import warnings
warnings.filterwarnings("ignore", category=DeprecationWarning)
f1_scores = {}
both_sets_of_meters = iterate_through_submeters_of_two_metergroups(
predictions, ground_truth)
for pred_meter, ground_truth_meter in both_sets_of_meters:
scores_for_meter = pd.DataFrame(columns=['score', 'n_samples'])
for aligned_states_chunk in align_two_meters(pred_meter,
ground_truth_meter,
'when_on'):
aligned_states_chunk.dropna(inplace=True)
aligned_states_chunk = aligned_states_chunk.astype(int)
score = sklearn_f1_score(aligned_states_chunk.icol(0),
aligned_states_chunk.icol(1))
scores_for_meter = scores_for_meter.append(
{'score': score, 'n_samples': len(aligned_states_chunk)},
ignore_index=True)
# Calculate weighted mean
tot_samples = scores_for_meter['n_samples'].sum()
scores_for_meter['proportion'] = (scores_for_meter['n_samples'] /
tot_samples)
avg_score = (scores_for_meter['score'] *
scores_for_meter['proportion']).sum()
f1_scores[pred_meter.instance()] = avg_score
return pd.Series(f1_scores)
##### FUNCTIONS BELOW THIS LINE HAVE NOT YET BEEN CONVERTED TO NILMTK v0.2 #####
"""
def confusion_matrices(predicted_states, ground_truth_states):
'''Compute confusion matrix between appliance states for each appliance
Parameters
----------
predicted_state: Pandas DataFrame of type {appliance :
[array of predicted states]}
ground_truth_state: Pandas DataFrame of type {appliance :
[array of ground truth states]}
Returns
-------
dict of type {appliance : confusion matrix}
'''
re = {}
for appliance in predicted_states:
matrix = np.zeros([np.max(ground_truth_states[appliance]) + 1,
np.max(ground_truth_states[appliance]) + 1])
for time in predicted_states[appliance]:
matrix[predicted_states.values[time, appliance],
ground_truth_states.values[time, appliance]] += 1
re[appliance] = matrix
return re
def tp_fp_fn_tn(predicted_states, ground_truth_states):
'''Compute counts of True Positives, False Positives, False Negatives, True Negatives
.. math::
TP^{(n)} =
\\sum_{t}
and \\left ( x^{(n)}_t = on, \\hat{x}^{(n)}_t = on \\right )
FP^{(n)} =
\\sum_{t}
and \\left ( x^{(n)}_t = off, \\hat{x}^{(n)}_t = on \\right )
FN^{(n)} =
\\sum_{t}
and \\left ( x^{(n)}_t = on, \\hat{x}^{(n)}_t = off \\right )
TN^{(n)} =
\\sum_{t}
and \\left ( x^{(n)}_t = off, \\hat{x}^{(n)}_t = off \\right )
Parameters
----------
predicted_state: Pandas DataFrame of type {appliance :
[array of predicted states]}
ground_truth_state: Pandas DataFrame of type {appliance :
[array of ground truth states]}
Returns
-------
numpy array where columns represent appliances and rows represent: [TP, FP, FN, TN]
'''
# assumes state 0 = off, all other states = on
predicted_states_on = predicted_states > 0
ground_truth_states_on = ground_truth_states > 0
tp = np.sum(np.logical_and(predicted_states_on.values == True,
ground_truth_states_on.values == True), axis=0)
fp = np.sum(np.logical_and(predicted_states_on.values == True,
ground_truth_states_on.values == False), axis=0)
fn = np.sum(np.logical_and(predicted_states_on.values == False,
ground_truth_states_on.values == True), axis=0)
tn = np.sum(np.logical_and(predicted_states_on.values == False,
ground_truth_states_on.values == False), axis=0)
return np.array([tp, fp, fn, tn]).astype(float)
def tpr_fpr(predicted_states, ground_truth_states):
'''Compute True Positive Rate and False Negative Rate
.. math::
TPR^{(n)} = \\frac{TP}{\\left ( TP + FN \\right )}
FPR^{(n)} = \\frac{FP}{\\left ( FP + TN \\right )}
Parameters
----------
predicted_state: Pandas DataFrame of type {appliance :
[array of predicted states]}
ground_truth_state: Pandas DataFrame of type {appliance :
[array of ground truth states]}
Returns
-------
numpy array where columns represent appliances and rows represent: [TPR, FPR]
'''
tfpn = tp_fp_fn_tn(predicted_states, ground_truth_states)
tpr = tfpn[0, :] / (tfpn[0, :] + tfpn[2, :])
fpr = tfpn[1, :] / (tfpn[1, :] + tfpn[3, :])
return np.array([tpr, fpr])
def precision_recall(predicted_states, ground_truth_states):
'''Compute Precision and Recall
.. math::
Precision^{(n)} = \\frac{TP}{\\left ( TP + FP \\right )}
Recall^{(n)} = \\frac{TP}{\\left ( TP + FN \\right )}
Parameters
----------
predicted_state: Pandas DataFrame of type {appliance :
[array of predicted states]}
ground_truth_state: Pandas DataFrame of type {appliance :
[array of ground truth states]}
Returns
-------
numpy array where columns represent appliances and rows represent: [Precision, Recall]
'''
tfpn = tp_fp_fn_tn(predicted_states, ground_truth_states)
prec = tfpn[0, :] / (tfpn[0, :] + tfpn[1, :])
rec = tfpn[0, :] / (tfpn[0, :] + tfpn[2, :])
return np.array([prec, rec])
def hamming_loss(predicted_state, ground_truth_state):
'''Compute Hamming loss
.. math::
HammingLoss =
\\frac{1}{T} \\sum_{t}
\\frac{1}{N} \\sum_{n}
xor \\left ( x^{(n)}_t, \\hat{x}^{(n)}_t \\right )
Parameters
----------
predicted_state: Pandas DataFrame of type {appliance :
[array of predicted states]}
ground_truth_state: Pandas DataFrame of type {appliance :
[array of ground truth states]}
Returns
-------
float of hamming_loss
'''
num_appliances = np.size(ground_truth_state.values, axis=1)
xors = np.sum((predicted_state.values != ground_truth_state.values),
axis=1) / num_appliances
return np.mean(xors)
"""
| apache-2.0 |
mapazarr/astropy_scripts | astropy_scripts/test_histograms.py | 1 | 11331 | from __future__ import (absolute_import, division, print_function,
unicode_literals) # python 2 as python 3
import numpy as np
from astropy.table import Table
from matplotlib import pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
from matplotlib.colors import LogNorm
from pylab import *
import matplotlib.mlab as mlab
GRAPH_DEBUG = 0
#################
# histogramming #
#################
data = np.array([1.,2.,3.,4.,5.])
print("data", repr(data))
bins = np.array([0., 2.5, 5.])
print("bins", repr(bins))
dig_data = np.digitize(data, bins)
h_data, edges_data = np.histogram(data, bins)
data_x = np.array([1.,2.,3.,4.,5.]) # column-wise # works for histogram2d
data_y = np.array([11.,12.,13.,14.,15.]) # works for histogram2d
print("data_x", repr(data_x))
print("data_y", repr(data_y))
data_2d = np.array([[1.,2.,3.,4.,5.], [11.,12.,13.,14.,15.]]) # column-wise # doesn't work!
data_2d_col = np.array([[1.,2.,3.,4.,5.], [11.,12.,13.,14.,15.]]) # column-wise # doesn't work!
data_2d_row = np.array([[1.,11.], [2.,12.], [3.,13.], [4.,14.], [5.,15.]]) # row-wise # works for histogramdd
print("data_2d", repr(data_2d))
#bins_2d = np.array([[0., 2.5, 5.], [10., 12.5, 15.]])
bins_2d = np.array([[0., 2., 4., 6.], [10., 12., 14., 16.]])
print("bins_2d", repr(bins_2d))
#np.digitize(data_2d, bins_2d) # doesn't work!
#np.histogram(data_2d, bins_2d) # doesn't work!
#np.histogram2d(data_2d, bins_2d) # doesn't work!
h2d_data, x_edges_data, y_edges_data = np.histogram2d(data_x, data_y, bins_2d)
hdd_data, edges_data = np.histogramdd(data_2d_row, bins_2d)
# convert column-wise data to row-wise data
data_2d_col_trans = np.transpose(data_2d_col)
# test histogram from astropy table
astro_table = Table()
astro_table['X'] = data_x
astro_table['Y'] = data_y
print("astro_table")
print(astro_table)
data = np.array(astro_table) # structured array incompatible with histogram functions!
print("structured data", data)
data = np.vstack([astro_table['X'], astro_table['Y']]).T
print("non-structured data")
print(data)
hdd_table, edges_table = np.histogramdd(data, bins_2d)
#import IPython; IPython.embed()
#dig_data
#h_data
#h2d_data
#hdd_data
#hdd_table
print("dig_data")
print(repr(dig_data))
print("h_data")
print(repr(h_data))
print("h2d_data")
print(repr(h2d_data))
print("hdd_data")
print(repr(hdd_data))
print("hdd_table")
print(repr(hdd_table))
# there is also methods to histogram directly when plotting with mpl (plt.hist)
# ref: http://matplotlib.org/examples/statistics/histogram_demo_features.html
print()
############
# plotting #
############
# Examples of 1D histogram plots
# ref: http://matplotlib.org/examples/statistics/histogram_demo_features.html
# http://matplotlib.org/examples/statistics/histogram_demo_histtypes.html
# 1st example
fig = plt.figure()
ax = fig.add_subplot(111)
# example data
mu = 100 # mean of distribution
sigma = 15 # standard deviation of distribution
x = mu + sigma * np.random.randn(10000)
num_bins = 50
# the histogram of the data
n, bins, patches = plt.hist(x, num_bins, normed=1, facecolor='green', alpha=0.5)
# add a 'best fit' line
y = mlab.normpdf(bins, mu, sigma)
plt.plot(bins, y, 'r--')
plt.xlabel('Smarts')
plt.ylabel('Probability')
plt.title(r'Histogram of IQ: $\mu=100$, $\sigma=15$')
# Tweak spacing to prevent clipping of ylabel
plt.subplots_adjust(left=0.15)
plt.draw()
if GRAPH_DEBUG:
plt.show() # wait until image is closed
# 2nd example
mu = 200
sigma = 25
x = mu + sigma*np.random.randn(10000)
fig, (ax0, ax1) = plt.subplots(ncols=2, figsize=(8, 4))
ax0.hist(x, 20, normed=1, histtype='stepfilled', facecolor='g', alpha=0.75)
ax0.set_title('stepfilled')
# Create a histogram by providing the bin edges (unequally spaced).
bins = [100, 150, 180, 195, 205, 220, 250, 300]
ax1.hist(x, bins, normed=1, histtype='bar', rwidth=0.8)
ax1.set_title('unequal bins')
plt.tight_layout()
plt.draw()
if GRAPH_DEBUG:
plt.show() # wait until image is closed
# TODO: try to make plots with my numpy 1D histogram objects: is it possible, or do I have to use the "hist" method of matplotlib/pyplot?
# plot 2D histogram in 3D (with bars)
# ref: http://matplotlib.org/examples/mplot3d/hist3d_demo.html
fig = plt.figure()
ax = fig.add_subplot(111, projection='3d')
x, y = np.random.rand(2, 100) * 4
hist, xedges, yedges = np.histogram2d(x, y, bins=4)
elements = (len(xedges) - 1) * (len(yedges) - 1)
xpos, ypos = np.meshgrid(xedges[:-1]+0.25, yedges[:-1]+0.25)
xpos = xpos.flatten()
ypos = ypos.flatten()
zpos = np.zeros(elements)
dx = 0.5 * np.ones_like(zpos)
dy = dx.copy()
dz = hist.flatten()
ax.bar3d(xpos, ypos, zpos, dx, dy, dz, color='b', zsort='average')
ax.set_xlabel('X')
ax.set_ylabel('Y')
ax.set_ylabel('HIST quantity')
plt.draw()
if GRAPH_DEBUG:
plt.show() # wait until image is closed
# plot 2D histogram in 2D (with squares)
# not working!!! I guess I need to do a scatter plot!!!
#fig = plt.figure()
###ax = fig.add_subplot(111, projection='2d')
#ax = fig.add_subplot(111)
#x, y = np.random.rand(2, 100) * 4
#hist, xedges, yedges = np.histogram2d(x, y, bins=4)
#
##elements = (len(xedges) - 1) * (len(yedges) - 1)
#xpos, ypos = np.meshgrid(xedges[:-1]+0.25, yedges[:-1]+0.25)
#
#xpos = xpos.flatten()
#ypos = ypos.flatten()
#dx = hist.flatten()
#dy = dx.copy()
#
##ax.bar2d(xpos, ypos, dx, dy, color='b', zsort='average')
##ax.bar(xpos, ypos, dx, dy, color='b', zsort='average')
##ax.imshow([dx,dy], origin='lower')
#ax.set_xlabel('X')
#ax.set_ylabel('Y')
#
#plt.draw()
#
#if GRAPH_DEBUG:
# plt.show() # wait until image is closed
# plot 2D histogram in 2D (scatter)
#(see a better example further below)
fig = plt.figure()
ax = fig.add_subplot(111)
x, y = np.random.rand(2, 100) * 4
hist, xedges, yedges = np.histogram2d(x, y, bins=4)
#elements = (len(xedges) - 1) * (len(yedges) - 1)
xpos, ypos = np.meshgrid(xedges[:-1]+0.25, yedges[:-1]+0.25)
xpos = xpos.flatten()
ypos = ypos.flatten()
dx = hist.flatten()
dy = dx.copy()
area = dx*dy
c = plt.scatter(xpos, ypos, c='b', s=area, cmap=plt.cm.hsv)
ax.set_xlabel('X')
ax.set_ylabel('Y')
plt.draw()
if GRAPH_DEBUG:
plt.show() # wait until image is closed
# plot 2D histogram in 2D (with colors, similar colz in root)
# ref: http://matplotlib.org/examples/pylab_examples/hist2d_log_demo.html
# not exportable to 3D, because there is no hist3d!!!
fig = plt.figure()
ax = fig.add_subplot(111)
#normal distribution center at x=0 and y=5
x = randn(100000)
y = randn(100000)+5
plt.hist2d(x, y, bins=40, norm=LogNorm())
ax.set_xlabel('X')
ax.set_ylabel('Y')
plt.colorbar(label='represented quantity')
plt.draw()
if GRAPH_DEBUG:
plt.show() # wait until image is closed
# For plotting 3D hist in box (cube) plot use:
# x, y, z as middle points of bins defiend as edges (1 point less as bin edges) (bin centers)
# data: cubes with side equal to the value of the histogram in the coresponding bin
# eventually scaled by a constant factor for better plot visibility
# plot 3D histogram in 3D (with boxes)
data_z = np.array([21.,22.,23.,24.,25.]) # works for histogram2d
bins_3d = np.array([[0., 2., 4., 6.], [10., 12., 14., 16.], [20., 22., 24., 26.]])
astro_table['Z'] = data_z
print("astro_table")
print(astro_table)
print("structured data", data)
data = np.vstack([astro_table['X'], astro_table['Y'], astro_table['Z']]).T
print("non-structured data")
print(data)
hdd_table, edges_table = np.histogramdd(data, bins_3d)
print("edges_table")
print(repr(edges_table))
print("hdd_table")
print(repr(hdd_table))
fig = plt.figure()
ax = fig.add_subplot(111, projection='3d')
# define point positions as bin centers
x_edges_low = edges_table[0][:-1]
x_edges_high = edges_table[0][1:]
xpos = (x_edges_low + x_edges_high)/2.
y_edges_low = edges_table[1][:-1]
y_edges_high = edges_table[1][1:]
ypos = (y_edges_low + y_edges_high)/2.
z_edges_low = edges_table[2][:-1]
z_edges_high = edges_table[2][1:]
zpos = (z_edges_low + z_edges_high)/2.
# define grid of points (i.e. coords for each 2D bin)
xpos, ypos, zpos = np.meshgrid(xpos, ypos, zpos)
xpos = xpos.flatten() # reduce to a list of coords
ypos = ypos.flatten() # reduce to a list of coords
zpos = zpos.flatten() # reduce to a list of coords
# dimensions of the boxes
dx = hdd_table.flatten()
#dx *= 0.5 # scale! # WARNING! if I scale, the boxes are shifted w.r.t. the bin centers!!! TODO: FIXME!!!!
dy = dx.copy()
dz = dx.copy()
ax.bar3d(xpos, ypos, zpos, dx, dy, dz, color='b', zsort='average')
ax.set_xlabel('X')
ax.set_ylabel('Y')
ax.set_zlabel('Z')
plt.draw()
if GRAPH_DEBUG:
plt.show() # wait until image is closed
# For plotting 2D hist in scatter plot use:
# x, y as middle points of bins defiend as edges (1 point less as bin edges) (bin centers)
# data: circles with radius equal to the value of the histogram in the coresponding bin
# eventually scaled by a constant factor for better plot visibility
# Analogous procedure for 3D hist scatter plot.
# plot 2D histogram in 2D (scatter)
# ref: http://matplotlib.org/examples/pie_and_polar_charts/polar_scatter_demo.html
fig = plt.figure()
ax = fig.add_subplot(111)
# define point positions as bin centers
x_edges_low = x_edges_data[:-1]
x_edges_high = x_edges_data[1:]
xpos = (x_edges_low + x_edges_high)/2.
y_edges_low = y_edges_data[:-1]
y_edges_high = y_edges_data[1:]
ypos = (y_edges_low + y_edges_high)/2.
# define grid of points (i.e. coords for each 2D bin)
xpos, ypos = np.meshgrid(xpos, ypos)
xpos = xpos.flatten() # reduce to a list of coords
ypos = ypos.flatten() # reduce to a list of coords
# area for the circles in the scatter plot
r = h2d_data.flatten() # reduce to a list of bin contents
area = r**2
area *= 100. # scale!
colormap = plt.cm.hsv
# ref: http://matplotlib.org/examples/color/colormaps_reference.html
# beware: scatter is called in "ax" for 3D plots, but in "plt" for 2D plots!
c = plt.scatter(xpos, ypos, c='b', s=area, cmap=colormap)
ax.set_xlabel('X')
ax.set_ylabel('Y')
plt.draw()
if GRAPH_DEBUG:
plt.show() # wait until image is closed
# plot 3D histogram in 3D (scatter)
# ref: http://matplotlib.org/examples/mplot3d/scatter3d_demo.html
fig = plt.figure()
ax = fig.add_subplot(111, projection='3d')
# define point positions as bin centers
x_edges_low = edges_table[0][:-1]
x_edges_high = edges_table[0][1:]
xpos = (x_edges_low + x_edges_high)/2.
y_edges_low = edges_table[1][:-1]
y_edges_high = edges_table[1][1:]
ypos = (y_edges_low + y_edges_high)/2.
z_edges_low = edges_table[2][:-1]
z_edges_high = edges_table[2][1:]
zpos = (z_edges_low + z_edges_high)/2.
# define grid of points (i.e. coords for each 2D bin)
xpos, ypos, zpos = np.meshgrid(xpos, ypos, zpos)
xpos = xpos.flatten() # reduce to a list of coords
ypos = ypos.flatten() # reduce to a list of coords
zpos = zpos.flatten() # reduce to a list of coords
# area for the circles in the scatter plot
r = hdd_table.flatten() # reduce to a list of bin contents
area = r**2
area *= 100. # scale!
colormap = plt.cm.hsv
# ref: http://matplotlib.org/examples/color/colormaps_reference.html
# beware: scatter is called in "ax" for 3D plots, but in "plt" for 2D plots!
c = ax.scatter(xpos, ypos, zpos, c='b', s=area, cmap=colormap)
ax.set_xlabel('X')
ax.set_ylabel('Y')
ax.set_zlabel('Z')
plt.draw()
if GRAPH_DEBUG:
plt.show() # wait until image is closed
plt.show() #don't quit at the end
| gpl-2.0 |
peter-kiechle/tactile-sensors | python/PyDSA-Konqueror/PyDSA-Konqueror.py | 1 | 7924 | # -*- coding: utf-8 -*-
#!/usr/bin/env python
import os, sys
import numpy as np
import matplotlib.pyplot as plt
from matplotlib.widgets import Slider
import matplotlib.gridspec as gridspec
import colorsys
import pygtk
pygtk.require('2.0')
import gtk
# Custom library
print("CWD: " + os.getcwd() )
lib_path = os.path.abspath('../../lib')
sys.path.append(lib_path)
import framemanager_python
##############
# Color stuff
##############
UIBK_blue = [0.0, 0.1765, 0.4392]
UIBK_orange = [1.0, 0.5, 0.0]
def change_brightness(colors, factor):
brighter_colors = []
for color in colors:
color = colorsys.rgb_to_hls(color[0], color[1], color[2])
# workaround immutable tuples
color = list(color)
color[1] = np.min([1.0, factor*color[1]])
color = tuple(color)
brighter_colors.append( colorsys.hls_to_rgb(color[0], color[1], color[2]) )
return brighter_colors
# Define YELLOW_RED colormap:
# For each RGB channel: each row consists of (x, y0, y1) where the x must increase from 0 to 1
#row i: x y0 y1
# /
# /
#row i+1: x y0 y1
cdict = {'red': ((0.0, 0.9, 0.9), # Red channel remains constant
(1.0, 0.9, 0.9)),
'green': ((0.0, 0.9, 0.9), # Green fades out
(1.0, 0.0, 0.0)),
'blue': ((0.0, 0.0, 0.0), # Blue is turned off
(1.0, 0.0, 0.0))}
plt.register_cmap(name='YELLOW_RED', data=cdict)
colormap = plt.get_cmap('YELLOW_RED')
#colormap = plt.get_cmap('gnuplot')
#colormap = plt.get_cmap('YlOrRd')
#colormap = plt.get_cmap('autumn')
#colormap = plt.get_cmap('afmhot')
#colormap = plt.get_cmap('gist_heat')
#colormap = plt.get_cmap('gray')
# Color of inactive cells
#colormap.set_under([0.0, 0.0, 0.0])
colormap.set_under([0.2, 0.2, 0.2])
# Categorical colors
colors = [(0.12156862745098039, 0.4666666666666667, 0.7058823529411765), # Blue
(1.0, 0.4980392156862745, 0.054901960784313725), # Orange
(0.17254901960784313, 0.6274509803921569, 0.17254901960784313), # Green
(0.8392156862745098, 0.15294117647058825, 0.1568627450980392), # Red
(0.5803921568627451, 0.403921568627451, 0.7411764705882353), # Purple
(0.5490196078431373, 0.33725490196078434, 0.29411764705882354), # Brown
(0.7372549019607844, 0.7411764705882353, 0.13333333333333333), # Greenish/yellow
(0.09019607843137255, 0.7450980392156863, 0.8117647058823529)] # Aquamarine
brighter_colors = change_brightness(colors, 1.5)
##############
# Filechooser
##############
def pick_file():
filename = None
# Check for new pygtk: this is new class in PyGtk 2.4
if gtk.pygtk_version < (2,3,90):
print "PyGtk 2.3.90 or later required for this example"
raise SystemExit
dialog = gtk.FileChooserDialog("Open..",
None,
gtk.FILE_CHOOSER_ACTION_OPEN,
(gtk.STOCK_CANCEL, gtk.RESPONSE_CANCEL,
gtk.STOCK_OPEN, gtk.RESPONSE_OK))
dialog.set_default_response(gtk.RESPONSE_OK)
dialog.set_current_folder(os.getcwd())
filter = gtk.FileFilter()
filter.set_name("DSA Pressure Profiles")
filter.add_pattern("*.dsa")
dialog.add_filter(filter)
filter = gtk.FileFilter()
filter.set_name("All files")
filter.add_pattern("*")
dialog.add_filter(filter)
response = dialog.run()
if response == gtk.RESPONSE_OK:
filename = dialog.get_filename()
#elif response == gtk.RESPONSE_CANCEL:
# sys.exit()
dialog.destroy()
return filename
###########################
# Called when slider moves
###########################
def update_frame(val):
frameID = int(slider_frameID.val)
for matrixID, name in enumerate(matrix_description):
frame = frameManager.get_tsframe(frameID, matrixID)
ax = axismapping[matrixID]
ax.cla()
ax.imshow(frame, cmap=colormap, vmin=0.001, vmax=maxValue, interpolation='nearest')
ax.text(0.5, 0.5, "%d" % matrixID, va="center", ha="center", color=[1.0, 1.0, 1.0, 0.5], fontsize=32, transform=ax.transAxes)
# Remove axis labels
for tl in ax.get_xticklabels() + ax.get_yticklabels():
tl.set_visible(False)
marker = ax_graph.axvline(x=timestamps[frameID], ymin=0.0, ymax = 1.0, lw=2, ls='--', color=[1.0, 1.0, 1.0], alpha=0.5)
plt.draw()
marker.remove()
# Load pressure profile
#file_name = "foam_ball.dsa"
if os.environ.get('DSA_PROFILE_NAME'):
file_name = os.environ['DSA_PROFILE_NAME']
else:
file_name = pick_file()
if file_name == None:
sys.exit()
print "Opening file: ", file_name
profileAbsPath = os.path.abspath(file_name)
profileName = os.path.basename(profileAbsPath)
frameManager = framemanager_python.FrameManagerWrapper()
frameManager.load_profile(profileAbsPath);
numFrames = frameManager.get_tsframe_count();
maxValue = np.max(frameManager.get_max_frame_list())
# Matrix averages
averages_matrix_0 = frameManager.get_average_matrix_list(0)
averages_matrix_1 = frameManager.get_average_matrix_list(1)
averages_matrix_2 = frameManager.get_average_matrix_list(2)
averages_matrix_3 = frameManager.get_average_matrix_list(3)
averages_matrix_4 = frameManager.get_average_matrix_list(4)
averages_matrix_5 = frameManager.get_average_matrix_list(5)
# Time stamps
timestamps = frameManager.get_tsframe_timestamp_list()
timestamps = (timestamps-timestamps[0]) / 1000.0 # Relative timestamps in seconds
############
# Plotting
############
# Grid-coordinates: (y, x)
gs = gridspec.GridSpec(1, 2, wspace=0.25, width_ratios=[1,1])
gs_left = gridspec.GridSpecFromSubplotSpec(2, 3, subplot_spec=gs[0])
gs_right = gridspec.GridSpecFromSubplotSpec(1, 1, subplot_spec=gs[1])
ax_finger_0_dist = plt.subplot(gs_left[0,0])
ax_finger_0_prox = plt.subplot(gs_left[1,0])
ax_finger_1_dist = plt.subplot(gs_left[0,1])
ax_finger_1_prox = plt.subplot(gs_left[1,1])
ax_finger_2_dist = plt.subplot(gs_left[0,2])
ax_finger_2_prox = plt.subplot(gs_left[1,2])
ax_graph = plt.subplot(gs_right[0])
axismapping = [ax_finger_0_prox, ax_finger_0_dist,
ax_finger_1_prox, ax_finger_1_dist,
ax_finger_2_prox, ax_finger_2_dist, ax_graph]
matrix_description = ["Finger 0: Proximal", "Finger 0: Distal",
"Finger 1: Proximal", "Finger 1: Distal",
"Finger 2: Proximal", "Finger 2: Distal"]
axismapping[0].set_xlabel("Finger 0")
axismapping[2].set_xlabel("Finger 1")
axismapping[4].set_xlabel("Finger 2")
axismapping[0].set_ylabel("Proximal")
axismapping[1].set_ylabel("Distal")
# Plot matrix averages
ax_graph.plot(timestamps, averages_matrix_0, lw=1, label="Average Matrix 0", color=colors[0])
ax_graph.plot(timestamps, averages_matrix_1, lw=1, label="Average Matrix 1", color=brighter_colors[0])
ax_graph.plot(timestamps, averages_matrix_2, lw=1, label="Average Matrix 2", color=colors[2])
ax_graph.plot(timestamps, averages_matrix_3, lw=1, label="Average Matrix 3", color=brighter_colors[2])
ax_graph.plot(timestamps, averages_matrix_4, lw=1, label="Average Matrix 4", color=colors[3])
ax_graph.plot(timestamps, averages_matrix_5, lw=1, label="Average Matrix 5", color=brighter_colors[3])
ax_graph.set_axis_bgcolor([0.2, 0.2, 0.2])
#ax_graph.legend()
#ax_graph.legend(loc = 'upper left')
ax_graph.set_xlabel("Time [s]")
ax_graph.set_ylabel("Matrix Average", rotation=90)
plt.subplots_adjust(top=0.90, left = 0.05, bottom=0.15, right = 0.95)
ax_slider = plt.axes([0.25, 0.02, 0.6, 0.03]) # left, bottom, width, height
slider_frameID = Slider(ax_slider, 'Frame ID', 0, numFrames-1, valfmt='%0.0f', valinit=0)
slider_frameID.on_changed(update_frame)
update_frame(0)
plt.suptitle("Profile: "+profileName, fontsize=16)
plt.show()
| gpl-3.0 |
sgenoud/scikit-learn | examples/linear_model/plot_sgd_separating_hyperplane.py | 8 | 1200 | """
=========================================
SGD: Maximum margin separating hyperplane
=========================================
Plot the maximum margin separating hyperplane within a two-class
separable dataset using a linear Support Vector Machines classifier
trained using SGD.
"""
print __doc__
import numpy as np
import pylab as pl
from sklearn.linear_model import SGDClassifier
from sklearn.datasets.samples_generator import make_blobs
# we create 50 separable points
X, Y = make_blobs(n_samples=50, centers=2, random_state=0, cluster_std=0.60)
# fit the model
clf = SGDClassifier(loss="hinge", alpha=0.01, n_iter=200, fit_intercept=True)
clf.fit(X, Y)
# plot the line, the points, and the nearest vectors to the plane
xx = np.linspace(-1, 5, 10)
yy = np.linspace(-1, 5, 10)
X1, X2 = np.meshgrid(xx, yy)
Z = np.empty(X1.shape)
for (i, j), val in np.ndenumerate(X1):
x1 = val
x2 = X2[i, j]
p = clf.decision_function([x1, x2])
Z[i, j] = p[0]
levels = [-1.0, 0.0, 1.0]
linestyles = ['dashed', 'solid', 'dashed']
colors = 'k'
pl.contour(X1, X2, Z, levels, colors=colors, linestyles=linestyles)
pl.scatter(X[:, 0], X[:, 1], c=Y, cmap=pl.cm.Paired)
pl.axis('tight')
pl.show()
| bsd-3-clause |
dyno/LMK | lmk/market/Market.py | 1 | 3765 | from time import strftime
from os.path import join, exists
from datetime import date, datetime, timedelta
from collections import namedtuple
from pandas import Series, Timestamp, to_datetime
from ..utils import env
from ..cache import Cache
TradeHour = namedtuple("TradeHour", ["open", "close"])
TradeTime = namedtuple("TradeTime", ["hour", "minute"])
class Market:
HISTORY_COLUMNS = ["Open", "High", "Low", "Close", "Volume", "Adj Close"]
tz = None
def __init__(self):
self._now = None
self.cache = Cache()
def _trading_day(self, dt=None):
if not dt:
dt = self.now.date()
# neither weekends nor holidays
return dt.weekday() not in (5, 6) and dt not in self.holidays
@property
def now(self):
return self._now if self._now else datetime.now(tz=self.tz)
@now.setter
def now(self, dt=None):
self._now = to_datetime(dt) if dt else None
if self._now:
self._now.replace(tzinfo=self.tz)
@property
def today(self):
return self.now.date()
@property
def _today(self):
return self.today.strftime("%Y-%m-%d")
@property
def last_trading_day(self):
if self.now.hour < self.trading_hour.open.hour:
dt = self.today - timedelta(1)
else:
dt = self.today
while not self._trading_day(dt):
dt -= timedelta(1)
return dt
@property
def open(self):
hour, minute = self.now.hour, self.now.minute
return self._trading_day() and self.trading_hour.open <= (hour, minute) <= self.trading_hour.close
@property
def closed(self):
return not self.open
def retrieve_history(self, symbol, _start, _end=env._today, normalize=True, patch_today=True):
start, end = to_datetime(_start).date(), to_datetime(_end).date()
h = self.cache.get(symbol, start, end)
if h is None:
do_patch_today = False
if end == self.today and self._trading_day():
do_patch_today = patch_today
# XXX: No today's data before market close. Even after the market close,
# the data might take some time to appear. We should not expect cache to have it.
end = self.today - timedelta(1)
h = self.cache.get(symbol, start, end)
if h is None:
h = self.datasource.retrieve_history(symbol, _start, _end)
if self.today in h.index:
end = self.today
self.cache.put(symbol, start, end, h)
if do_patch_today and self.today not in h.index:
r = self.datasource.get_quote_today(symbol)
if r:
h.loc[Timestamp(self._today)] = Series(r)
assert h is not None
if normalize:
# http://luminouslogic.com/how-to-normalize-historical-data-for-splits-dividends-etc.htm
h["_Open"] = h["Open"] * h["Adj Close"] / h["Close"]
h["_High"] = h["High"] * h["Adj Close"] / h["Close"]
h["_Low"] = h["Low"] * h["Adj Close"] / h["Close"]
h["_Close"] = h["Adj Close"]
del h["Open"], h["High"], h["Low"], h["Close"], h["Adj Close"]
h.rename(columns=lambda c: c.replace("_", ""), inplace=True)
return h
def get_symbol_name(self, symbol):
if symbol in self.cache.name.index:
name = self.cache.name.loc[symbol]
else:
name = self.datasource.get_symbol_name(symbol)
self.cache.name.loc[symbol] = name
self.cache.flush_name()
return name
def set_datasource(self, ds):
self.datasource = self.datasources[ds]
| mit |
trungnt13/scikit-learn | sklearn/decomposition/tests/test_incremental_pca.py | 297 | 8265 | """Tests for Incremental PCA."""
import numpy as np
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_raises
from sklearn import datasets
from sklearn.decomposition import PCA, IncrementalPCA
iris = datasets.load_iris()
def test_incremental_pca():
# Incremental PCA on dense arrays.
X = iris.data
batch_size = X.shape[0] // 3
ipca = IncrementalPCA(n_components=2, batch_size=batch_size)
pca = PCA(n_components=2)
pca.fit_transform(X)
X_transformed = ipca.fit_transform(X)
np.testing.assert_equal(X_transformed.shape, (X.shape[0], 2))
assert_almost_equal(ipca.explained_variance_ratio_.sum(),
pca.explained_variance_ratio_.sum(), 1)
for n_components in [1, 2, X.shape[1]]:
ipca = IncrementalPCA(n_components, batch_size=batch_size)
ipca.fit(X)
cov = ipca.get_covariance()
precision = ipca.get_precision()
assert_array_almost_equal(np.dot(cov, precision),
np.eye(X.shape[1]))
def test_incremental_pca_check_projection():
# Test that the projection of data is correct.
rng = np.random.RandomState(1999)
n, p = 100, 3
X = rng.randn(n, p) * .1
X[:10] += np.array([3, 4, 5])
Xt = 0.1 * rng.randn(1, p) + np.array([3, 4, 5])
# Get the reconstruction of the generated data X
# Note that Xt has the same "components" as X, just separated
# This is what we want to ensure is recreated correctly
Yt = IncrementalPCA(n_components=2).fit(X).transform(Xt)
# Normalize
Yt /= np.sqrt((Yt ** 2).sum())
# Make sure that the first element of Yt is ~1, this means
# the reconstruction worked as expected
assert_almost_equal(np.abs(Yt[0][0]), 1., 1)
def test_incremental_pca_inverse():
# Test that the projection of data can be inverted.
rng = np.random.RandomState(1999)
n, p = 50, 3
X = rng.randn(n, p) # spherical data
X[:, 1] *= .00001 # make middle component relatively small
X += [5, 4, 3] # make a large mean
# same check that we can find the original data from the transformed
# signal (since the data is almost of rank n_components)
ipca = IncrementalPCA(n_components=2, batch_size=10).fit(X)
Y = ipca.transform(X)
Y_inverse = ipca.inverse_transform(Y)
assert_almost_equal(X, Y_inverse, decimal=3)
def test_incremental_pca_validation():
# Test that n_components is >=1 and <= n_features.
X = [[0, 1], [1, 0]]
for n_components in [-1, 0, .99, 3]:
assert_raises(ValueError, IncrementalPCA(n_components,
batch_size=10).fit, X)
def test_incremental_pca_set_params():
# Test that components_ sign is stable over batch sizes.
rng = np.random.RandomState(1999)
n_samples = 100
n_features = 20
X = rng.randn(n_samples, n_features)
X2 = rng.randn(n_samples, n_features)
X3 = rng.randn(n_samples, n_features)
ipca = IncrementalPCA(n_components=20)
ipca.fit(X)
# Decreasing number of components
ipca.set_params(n_components=10)
assert_raises(ValueError, ipca.partial_fit, X2)
# Increasing number of components
ipca.set_params(n_components=15)
assert_raises(ValueError, ipca.partial_fit, X3)
# Returning to original setting
ipca.set_params(n_components=20)
ipca.partial_fit(X)
def test_incremental_pca_num_features_change():
# Test that changing n_components will raise an error.
rng = np.random.RandomState(1999)
n_samples = 100
X = rng.randn(n_samples, 20)
X2 = rng.randn(n_samples, 50)
ipca = IncrementalPCA(n_components=None)
ipca.fit(X)
assert_raises(ValueError, ipca.partial_fit, X2)
def test_incremental_pca_batch_signs():
# Test that components_ sign is stable over batch sizes.
rng = np.random.RandomState(1999)
n_samples = 100
n_features = 3
X = rng.randn(n_samples, n_features)
all_components = []
batch_sizes = np.arange(10, 20)
for batch_size in batch_sizes:
ipca = IncrementalPCA(n_components=None, batch_size=batch_size).fit(X)
all_components.append(ipca.components_)
for i, j in zip(all_components[:-1], all_components[1:]):
assert_almost_equal(np.sign(i), np.sign(j), decimal=6)
def test_incremental_pca_batch_values():
# Test that components_ values are stable over batch sizes.
rng = np.random.RandomState(1999)
n_samples = 100
n_features = 3
X = rng.randn(n_samples, n_features)
all_components = []
batch_sizes = np.arange(20, 40, 3)
for batch_size in batch_sizes:
ipca = IncrementalPCA(n_components=None, batch_size=batch_size).fit(X)
all_components.append(ipca.components_)
for i, j in zip(all_components[:-1], all_components[1:]):
assert_almost_equal(i, j, decimal=1)
def test_incremental_pca_partial_fit():
# Test that fit and partial_fit get equivalent results.
rng = np.random.RandomState(1999)
n, p = 50, 3
X = rng.randn(n, p) # spherical data
X[:, 1] *= .00001 # make middle component relatively small
X += [5, 4, 3] # make a large mean
# same check that we can find the original data from the transformed
# signal (since the data is almost of rank n_components)
batch_size = 10
ipca = IncrementalPCA(n_components=2, batch_size=batch_size).fit(X)
pipca = IncrementalPCA(n_components=2, batch_size=batch_size)
# Add one to make sure endpoint is included
batch_itr = np.arange(0, n + 1, batch_size)
for i, j in zip(batch_itr[:-1], batch_itr[1:]):
pipca.partial_fit(X[i:j, :])
assert_almost_equal(ipca.components_, pipca.components_, decimal=3)
def test_incremental_pca_against_pca_iris():
# Test that IncrementalPCA and PCA are approximate (to a sign flip).
X = iris.data
Y_pca = PCA(n_components=2).fit_transform(X)
Y_ipca = IncrementalPCA(n_components=2, batch_size=25).fit_transform(X)
assert_almost_equal(np.abs(Y_pca), np.abs(Y_ipca), 1)
def test_incremental_pca_against_pca_random_data():
# Test that IncrementalPCA and PCA are approximate (to a sign flip).
rng = np.random.RandomState(1999)
n_samples = 100
n_features = 3
X = rng.randn(n_samples, n_features) + 5 * rng.rand(1, n_features)
Y_pca = PCA(n_components=3).fit_transform(X)
Y_ipca = IncrementalPCA(n_components=3, batch_size=25).fit_transform(X)
assert_almost_equal(np.abs(Y_pca), np.abs(Y_ipca), 1)
def test_explained_variances():
# Test that PCA and IncrementalPCA calculations match
X = datasets.make_low_rank_matrix(1000, 100, tail_strength=0.,
effective_rank=10, random_state=1999)
prec = 3
n_samples, n_features = X.shape
for nc in [None, 99]:
pca = PCA(n_components=nc).fit(X)
ipca = IncrementalPCA(n_components=nc, batch_size=100).fit(X)
assert_almost_equal(pca.explained_variance_, ipca.explained_variance_,
decimal=prec)
assert_almost_equal(pca.explained_variance_ratio_,
ipca.explained_variance_ratio_, decimal=prec)
assert_almost_equal(pca.noise_variance_, ipca.noise_variance_,
decimal=prec)
def test_whitening():
# Test that PCA and IncrementalPCA transforms match to sign flip.
X = datasets.make_low_rank_matrix(1000, 10, tail_strength=0.,
effective_rank=2, random_state=1999)
prec = 3
n_samples, n_features = X.shape
for nc in [None, 9]:
pca = PCA(whiten=True, n_components=nc).fit(X)
ipca = IncrementalPCA(whiten=True, n_components=nc,
batch_size=250).fit(X)
Xt_pca = pca.transform(X)
Xt_ipca = ipca.transform(X)
assert_almost_equal(np.abs(Xt_pca), np.abs(Xt_ipca), decimal=prec)
Xinv_ipca = ipca.inverse_transform(Xt_ipca)
Xinv_pca = pca.inverse_transform(Xt_pca)
assert_almost_equal(X, Xinv_ipca, decimal=prec)
assert_almost_equal(X, Xinv_pca, decimal=prec)
assert_almost_equal(Xinv_pca, Xinv_ipca, decimal=prec)
| bsd-3-clause |
RexFuzzle/sfepy | script/plot_times.py | 1 | 1707 | #!/usr/bin/env python
"""
Plot time steps, times of time steps and time deltas in a HDF5 results file.
"""
import sys
sys.path.append('.')
from optparse import OptionParser
import numpy as nm
import matplotlib.pyplot as plt
from sfepy.postprocess.time_history import extract_times
usage = '%prog [options] filename\n' + __doc__.rstrip()
helps = {
'logarithmic' :
'plot time steps in logarithmic scale',
}
def main():
parser = OptionParser(usage=usage, version='%prog')
parser.add_option('-l', '--logarithmic',
action='store_true', dest='logarithmic',
default=False, help=helps['logarithmic'])
options, args = parser.parse_args()
if (len(args) == 1):
filename = args[0]
else:
parser.print_help()
return
plt.rcParams['lines.linewidth'] = 3
plt.rcParams['lines.markersize'] = 9
fontsize = 16
steps, times, nts, dts = extract_times(filename)
dts[-1] = nm.nan
ax = plt.subplot(211)
if options.logarithmic:
l1, = ax.semilogy(steps, dts, 'b')
else:
l1, = ax.plot(steps, dts, 'b')
ax.set_xlabel('step', fontsize=fontsize)
ax.set_ylabel(r'$\Delta t$', fontsize=fontsize)
ax.grid(True)
ax = ax.twinx()
l2, = ax.plot(steps, times, 'g')
ax.set_ylabel(r'$t$', fontsize=fontsize)
ax.legend([l1, l2], [r'$\Delta t$', r'$t$'], loc=0)
ax = plt.subplot(212)
if options.logarithmic:
ax.semilogy(times, dts, 'b+')
else:
ax.plot(times, dts, 'b+')
ax.set_xlabel(r'$t$', fontsize=fontsize)
ax.set_ylabel(r'$\Delta t$', fontsize=fontsize)
ax.grid(True)
plt.show()
if __name__ == '__main__':
main()
| bsd-3-clause |
saatvikshah1994/SmartMM | Sentiment Analysis/supervised/tf_bow.py | 1 | 1168 | from utilities import load_data,cross_validate
from utilities import DataClean
from sklearn.naive_bayes import BernoulliNB
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.pipeline import Pipeline
from sklearn.metrics import accuracy_score
if __name__ == '__main__':
ids,X,y = load_data("stanford")
pipeline = Pipeline([
('cleaner',DataClean(clean_list=[
["[^a-z]"," "], # only letters
[" [ ]+", " "], # remove extra spaces
],html_clean=True)),
('tf',TfidfVectorizer(use_idf=False,stop_words="english")),
('classifier',BernoulliNB())
])
cross_validate((X,y),pipeline,accuracy_score)
# Cornell
# accuracy_score : 0.561444222777 +/- 0.00476207774317
# Confusion Matrix
# [[ 744. 2936. 2872. 420. 100.]
# [ 967. 6398. 17320. 2216. 372.]
# [ 435. 4617. 68438. 5425. 667.]
# [ 271. 1767. 18586. 10745. 1558.]
# [ 71. 337. 2807. 4697. 1294.]]
# Stanford
# accuracy_score : 0.84216 +/- 0.00601916937791
# Confusion Matrix
# [[ 11085. 1415.]
# [ 2531. 9969.]] | mit |
valandil/msc_thesis | figs/active/sParametersAutocorrelation.py | 1 | 13410 | # -------------------- Information -------------------- #
# Author: Joey Dumont <[email protected]> #
# Date created: May 19th, 2014 #
# Date mod.: May 19th, 2014 #
# Description: We plot the scattering parameters of #
# fibre-antenna design as a function of #
# frequency. #
# We then study the autocorrelation of #
# each signal and analyze their form: #
# - exponential; #
# - Lorentzian, etc. #
# We also compute the Fourier transform #
# of each curve and see what it gives. #
# ----------------------------------------------------- #
# --------------- Modules Importation ----------------- #
# Importing numerical analysis package and
# morgenstemning color map.
import numpy as np
import matplotlib.pyplot as plt
from matplotlib import cm
from matplotlib import rcParams
from scipy import interpolate
from scipy.optimize import curve_fit
#from scipy.stats import pearsonr
#from numpy import convolve
import scipy.fftpack as fft
import morgenstemning as mrg
ms,msi = mrg.morgenstemning()
# Setting the rc parameters.
rcParams['text.usetex'] = True
rcParams['text.latex.preamble'] = [r"\usepackage[charter]{mathdesign}"]
rcParams['font.family'] = 'serif'
rcParams['font.size'] = 8
rcParams['axes.labelsize'] = 'large'
rcParams['xtick.labelsize'] = 'large'
rcParams['ytick.labelsize'] = 'large'
rcParams['legend.numpoints'] = 3
rcParams['figure.figsize'] = 7,3
# ---------------- Data Importation ------------------ #
# We import the data for each fibre and rescale the
# frequencies to GHz.
# RF10
rf10S22 = np.loadtxt("../xpData/RF10-S22-OneEnd-2.txt", skiprows=1)
rf10S22[:,0] /= 1.0e9
# RF21
rf21S22 = np.loadtxt("../xpData/RF21-S22-OneEnd.txt", skiprows=1)
rf21S22[:,0] /= 1.0e9
# RF27
rf27 = np.loadtxt("../xpData/RF27-Sparam-01-5GHz.txt", skiprows=1)
rf27[:,0] /= 1.0e9
# RF29
rf29 = np.loadtxt("../xpData/RF29-S-param.txt", skiprows=1)
rf29[:,0] /= 1.0e9
# RF33
rf33 = np.loadtxt("../xpData/RF33-S-parameters.txt", skiprows=1)
rf33[:,0] /= 1.0e9
# ------------------ Plotting Data -------------------- #
# -- We plot the S-parameters of each fibre-antenna.
# RF10
fig1 = plt.figure()
ax1 = fig1.add_subplot(111)
plt.plot(rf10S22[:,0],rf10S22[:,1], 'r-', lw=1.5)
ax1.set_xlabel("Frequency (GHz)")
ax1.set_ylabel(r"$S_{22}$ (dB)")
ax1.set_ylim((-50,0))
ax1.grid(True)
ax1.text(0.01,0.93, "RF10", transform=ax1.transAxes, fontsize=10, bbox=dict(facecolor='gray',alpha=0.2,lw=0.0))
plt.savefig("RF10-sParameters.pdf", bbox_inches='tight')
# RF21
fig2 = plt.figure()
ax2 = fig2.add_subplot(111)
plt.plot(rf21S22[:,0], rf21S22[:,1], 'r-', lw=1.5)
ax2.set_xlabel("Frequency (GHz)")
ax2.set_ylabel(r"$S_{22}$ (dB)")
ax2.set_ylim((-50,0))
ax2.grid(True)
ax2.text(0.01,0.93, "RF21", transform=ax2.transAxes, fontsize=10, bbox=dict(facecolor='gray',alpha=0.2,lw=0.0))
plt.savefig("RF21-sParameters.pdf", bbox_inches='tight')
# RF27
fig3 = plt.figure()
ax3 = fig3.add_subplot(111)
plt.plot(rf27[:,0], rf27[:,1], 'b-', lw=1.5, label=r"$S_{11}$")
plt.plot(rf27[:,0], rf27[:,2], 'k-', lw=1.5, label=r"$S_{12}$")
plt.plot(rf27[:,0], rf27[:,4], 'r-', lw=1.5, label=r"$S_{22}$")
ax3.set_xlabel("Frequency (GHz)")
ax3.set_ylabel(r"$S$-parameters")
ax3.set_ylim((-50,0))
ax3.grid(True)
ax3.text(0.01,0.93, "RF27", transform=ax3.transAxes, fontsize=10, bbox=dict(facecolor='gray',alpha=0.2,lw=0.0))
ax3.legend(loc=0)
plt.savefig("RF27-sParameters.pdf", bbox_inches='tight')
# RF29
fig4 = plt.figure()
ax4 = fig4.add_subplot(111)
plt.plot(rf29[:,0], rf29[:,1], 'b-', lw=1.5, label=r"$S_{11}$")
plt.plot(rf29[:,0], rf29[:,2], 'k-', lw=1.5, label=r"$S_{12}$")
plt.plot(rf29[:,0], rf29[:,4], 'r-', lw=1.5, label=r"$S_{22}$")
ax4.set_xlabel("Frequency (GHz)")
ax4.set_ylabel(r"$S$-parameters")
ax4.set_ylim((-50,0))
ax4.grid(True)
ax4.text(0.01,0.93, "RF29", transform=ax4.transAxes, fontsize=10, bbox=dict(facecolor='gray',alpha=0.2,lw=0.0))
ax4.legend(loc=0)
plt.savefig("RF29-sParameters.pdf", bbox_inches='tight')
# RF33
fig5 = plt.figure()
ax5 = fig5.add_subplot(111)
plt.plot(rf33[:,0], rf33[:,1], 'b-', lw=1.5, label=r"$S_{11}$")
plt.plot(rf33[:,0], rf33[:,2], 'k-', lw=1.5, label=r"$S_{12}$")
plt.plot(rf33[:,0], rf33[:,4], 'r-', lw=1.5, label=r"$S_{22}$")
ax5.set_xlabel("Frequency (GHz)")
ax5.set_ylabel(r"$S$-parameters")
ax5.set_ylim((-50,0))
ax5.grid(True)
ax5.text(0.01,0.93, "RF33", transform=ax5.transAxes, fontsize=10, bbox=dict(facecolor='gray',alpha=0.2,lw=0.0))
ax5.legend(loc=0)
plt.savefig("RF33-sParameters.pdf", bbox_inches='tight')
# ----------------- Data Manipulation ----------------- #
# We compute the autocorrelation of each data sets. #
# ----------------------------------------------------- #
# -- Function definition.
def lorentz(x,a,gamma):
return a*0.5*gamma/(x*x+gamma**2/4.0)
def exponentiel(x, a):
return exp(-a*x)
def gaussian(x,a):
return exp(-a*x*x)
# -- We compute the autocorrelations.
# We compute the vector of delays.
delays = np.zeros((rf10S22.shape[0]))
for i in range(rf10S22.shape[0]):
delays[i] = i*(rf10S22[1,0]-rf10S22[0,0])
halfDelay = len(delays)//2
pInit = [0.5, 0.5]
# RF10
meanRF10 = np.mean(rf10S22[:,1])
dataRF10 = rf10S22[:,1]-meanRF10
normRF10 = np.sum(dataRF10**2)
acorRF10 = np.correlate(dataRF10,dataRF10, "full")/normRF10
acorRF10 = acorRF10[len(acorRF10)/2:]
optLorentzRF10, covLorentzRF10 = curve_fit(lorentz, delays[:halfDelay], acorRF10[:halfDelay], p0=pInit)
fig6 = plt.figure()
ax6 = fig6.add_subplot(111)
plt.plot(delays,acorRF10, 'r')
plt.plot(delays[:halfDelay],lorentz(delays[:halfDelay],optLorentzRF10[0],optLorentzRF10[1]),
color='r', linestyle='--', marker='^',markevery=4, label=r'$S_{22}$: $A=%.3g$, $\Gamma=%.3g$' %(optLorentzRF10[0], optLorentzRF10[1]))
ax6.set_xlabel("Frequency Shift $\Delta f$ (GHz)")
ax6.set_ylabel(r"Autocorrelation of $S_{22}$")
ax6.legend(loc=0)
ax6.text(0.01,0.93, "RF10", transform=ax6.transAxes, fontsize=10, bbox=dict(facecolor='gray',alpha=0.2,lw=0.0))
plt.savefig("RF10-autoCorrelation.pdf", bbox_inches='tight')
# RF21
delays = np.zeros((rf21S22.shape[0]))
for i in range(rf10S22.shape[0]):
delays[i] = i*(rf21S22[1,0]-rf21S22[0,0])
halfDelay = len(delays)//2
pInit = [0.5, 0.5]
meanRF21 = np.mean(rf21S22[:,1])
dataRF21 = rf21S22[:,1]-meanRF21
normRF21 = np.sum(dataRF21**2)
acorRF21 = np.correlate(dataRF21,dataRF21, "full")/normRF21
acorRF21 = acorRF21[len(acorRF21)//2:]
optLorentzRF21, covLorentzRF21 = curve_fit(lorentz,delays[:halfDelay], acorRF21[:halfDelay], p0=pInit)
fig7 = plt.figure()
ax7 = fig7.add_subplot(111)
plt.plot(delays,acorRF21, 'r')
plt.plot(delays[:halfDelay],lorentz(delays[:halfDelay],optLorentzRF21[0],optLorentzRF21[1]),
color='r', linestyle='--', marker='^',markevery=4, label=r'$S_{22}$: $A=%.3g$, $\Gamma=%.3g$' %(optLorentzRF21[0], optLorentzRF21[1]))
ax7.set_xlabel("Frequency Shift $\Delta f$ (GHz)")
ax7.set_ylabel(r"Autocorrelation of $S_{22}$")
ax7.legend(loc=0)
ax7.text(0.01,0.93, "RF21", transform=ax7.transAxes, fontsize=10, bbox=dict(facecolor='gray',alpha=0.2,lw=0.0))
plt.savefig("RF21-autoCorrelation.pdf", bbox_inches='tight')
# RF27
delays = np.zeros((rf27.shape[0]))
for i in range(rf27.shape[0]):
delays[i] = i*(rf27[1,0]-rf27[0,0])
halfDelay = len(delays)//2
autoCorrRF27 = np.zeros((len(delays),3))
curveParamRF27 = np.zeros((2,3))
j=0
for i in [1,2,4]:
meanRF27 = np.mean(rf27[:,i])
dataRF27 = rf27[:,i]-meanRF27
normRF27 = np.sum(dataRF27**2)
acorRF27 = np.correlate(dataRF27,dataRF27,"full")/normRF27
acorRF27 = acorRF27[len(acorRF27)//2:]
optLorentzRF27, covLorentzRF27 = curve_fit(lorentz, delays[:halfDelay], acorRF27[:halfDelay], p0=pInit)
autoCorrRF27[:,j] = acorRF27
curveParamRF27[:,j] = optLorentzRF27
j += 1
fig8 = plt.figure()
ax8 = fig8.add_subplot(111)
plt.plot(delays,autoCorrRF27[:,0], 'b')
plt.plot(delays[:halfDelay],lorentz(delays[:halfDelay],curveParamRF27[0,0],curveParamRF27[1,0]),
color='b', linestyle='--', marker='o',markevery=8, label=r'$S_{11}$: $A=%.3g$, $\Gamma=%.3g$' %(curveParamRF27[0,0], curveParamRF27[1,0]))
plt.plot(delays,autoCorrRF27[:,1], 'k')
plt.plot(delays[:halfDelay],lorentz(delays[:halfDelay],curveParamRF27[0,1],curveParamRF27[1,1]),
color='k', linestyle='--', marker='d',markevery=8, label=r'$S_{12}$: $A=%.3g$, $\Gamma=%.3g$' %(curveParamRF27[0,1], curveParamRF27[1,1]))
plt.plot(delays,autoCorrRF27[:,2], 'r')
plt.plot(delays[:halfDelay],lorentz(delays[:halfDelay],curveParamRF27[0,2],curveParamRF27[1,2]),
color='r', linestyle='--', marker='^',markevery=8, label=r'$S_{22}$: $A=%.3g$, $\Gamma=%.3g$' %(curveParamRF27[0,2], curveParamRF27[1,2]))
ax8.set_xlabel("Frequency Shift $\Delta f$ (GHz)")
ax8.set_ylabel(r"Autocorrelation")
ax8.legend(loc=0)
ax8.text(0.01,0.93, "RF27", transform=ax8.transAxes, fontsize=10, bbox=dict(facecolor='gray',alpha=0.2,lw=0.0))
plt.savefig("RF27-autoCorrelation.pdf", bbox_inches='tight')
# RF29
delays = np.zeros((rf29.shape[0]))
for i in range(rf29.shape[0]):
delays[i] = i*(rf29[1,0]-rf29[0,0])
halfDelay = len(delays)//2
autoCorrRF29 = np.zeros((len(delays),3))
curveParamRF29 = np.zeros((2,3))
j=0
for i in [1,2,4]:
meanRF29 = np.mean(rf29[:,i])
dataRF29 = rf29[:,i]-meanRF29
normRF29 = np.sum(dataRF29**2)
acorRF29 = np.correlate(dataRF29,dataRF29,"full")/normRF29
acorRF29 = acorRF29[len(acorRF29)//2:]
optLorentzRF29, covLorentzRF29 = curve_fit(lorentz, delays[:halfDelay], acorRF29[:halfDelay], p0=pInit)
autoCorrRF29[:,j] = acorRF29
curveParamRF29[:,j] = optLorentzRF29
j += 1
fig9 = plt.figure()
ax9 = fig9.add_subplot(111)
plt.plot(delays,autoCorrRF29[:,0], 'b')
plt.plot(delays[:halfDelay],lorentz(delays[:halfDelay],curveParamRF29[0,0],curveParamRF29[1,0]),
color='b', linestyle='--', marker='o',markevery=8, label=r'$S_{11}$: $A=%.3g$, $\Gamma=%.3g$' %(curveParamRF29[0,0], curveParamRF29[1,0]))
plt.plot(delays,autoCorrRF29[:,1], 'k')
plt.plot(delays[:halfDelay],lorentz(delays[:halfDelay],curveParamRF29[0,1],curveParamRF29[1,1]),
color='k', linestyle='--', marker='d',markevery=8, label=r'$S_{12}$: $A=%.3g$, $\Gamma=%.3g$' %(curveParamRF29[0,1], curveParamRF29[1,1]))
plt.plot(delays,autoCorrRF29[:,2], 'r')
plt.plot(delays[:halfDelay],lorentz(delays[:halfDelay],curveParamRF29[0,2],curveParamRF29[1,2]),
color='r', linestyle='--', marker='^',markevery=8, label=r'$S_{22}$: $A=%.3g$, $\Gamma=%.3g$' %(curveParamRF29[0,2], curveParamRF29[1,2]))
ax9.set_xlabel("Frequency Shift $\Delta f$ (GHz)")
ax9.set_ylabel(r"Autocorrelation")
ax9.legend(loc=0)
ax9.text(0.01,0.93, "RF29", transform=ax9.transAxes, fontsize=10, bbox=dict(facecolor='gray',alpha=0.2,lw=0.0))
plt.savefig("RF29-autoCorrelation.pdf", bbox_inches='tight')
# RF33
delays = np.zeros((rf33.shape[0]))
for i in range(rf33.shape[0]):
delays[i] = i*(rf33[1,0]-rf33[0,0])
halfDelay = len(delays)//2
autoCorrRF33 = np.zeros((len(delays),3))
curveParamRF33 = np.zeros((2,3))
j=0
for i in [1,2,4]:
meanRF33 = np.mean(rf33[:,i])
dataRF33 = rf33[:,i]-meanRF33
normRF33 = np.sum(dataRF33**2)
acorRF33 = np.correlate(dataRF33,dataRF33,"full")/normRF33
acorRF33 = acorRF33[len(acorRF33)//2:]
optLorentzRF33, covLorentzRF33 = curve_fit(lorentz, delays[:halfDelay], acorRF33[:halfDelay], p0=pInit)
autoCorrRF33[:,j] = acorRF33
curveParamRF33[:,j] = optLorentzRF33
j += 1
fig10 = plt.figure()
ax10 = fig10.add_subplot(111)
plt.plot(delays,autoCorrRF33[:,0], 'b')
plt.plot(delays[:halfDelay],lorentz(delays[:halfDelay],curveParamRF33[0,0],curveParamRF33[1,0]),
color='b', linestyle='--', marker='o',markevery=8, label=r'$S_{11}$: $A=%.3g$, $\Gamma=%.3g$' %(curveParamRF33[0,0], curveParamRF33[1,0]))
plt.plot(delays,autoCorrRF33[:,1], 'k')
plt.plot(delays[:halfDelay],lorentz(delays[:halfDelay],curveParamRF33[0,1],curveParamRF33[1,1]),
color='k', linestyle='--', marker='d',markevery=8, label=r'$S_{12}$: $A=%.3g$, $\Gamma=%.3g$' %(curveParamRF33[0,1], curveParamRF33[1,1]))
plt.plot(delays,autoCorrRF33[:,2], 'r')
plt.plot(delays[:halfDelay],lorentz(delays[:halfDelay],curveParamRF33[0,2],curveParamRF33[1,2]),
color='r', linestyle='--', marker='^',markevery=8, label=r'$S_{22}$: $A=%.3g$, $\Gamma=%.3g$' %(curveParamRF33[0,2], curveParamRF33[1,2]))
ax10.set_xlabel("Frequency Shift $\Delta f$ (GHz)")
ax10.set_ylabel(r"Autocorrelation")
ax10.legend(loc=0)
ax10.text(0.01,0.93, "RF33", transform=ax10.transAxes, fontsize=10, bbox=dict(facecolor='gray',alpha=0.2,lw=0.0))
plt.savefig("RF33-autoCorrelation.pdf", bbox_inches='tight')
# ----------------- Data Manipulation ----------------- #
# We compute the Fourier transform of the data. #
# ----------------------------------------------------- #
fig11 = plt.figure()
ax11 = fig11.add_subplot(111)
plotArgs = [dict(linestyle='-', color='b', label=r"$S_{11}$"),
dict(linestyle='-', color='k', label=r"$S_{12}$"),
dict(linestyle='-', color='r', label=r"$S_{22}$")]
j=0
for i in [1,2,4]:
fftRF33 = fft.fftshift(fft.fft(rf33[:,i]))
fftRF33 /= len(fftRF33)
freqRF33 = fft.fftshift(fft.fftfreq(rf33.shape[0],d=rf33[1,0]-rf33[0,0]))
plt.plot(freqRF33,abs(fftRF33), **plotArgs[j])
j += 1
ax11.set_xlabel("FFT Frequency")
ax11.set_ylabel("Intensity of the FFT Spectrum")
ax11.set_xlim((0,20))
ax11.set_yscale('log')
ax11.legend(loc=0)
ax11.text(0.01,0.93, "RF33", transform=ax11.transAxes, fontsize=10, bbox=dict(facecolor='gray',alpha=0.2,lw=0.0))
ax11.set_xticks(np.arange(0,20.5,1.0))
ax11.grid(True)
plt.savefig("RF33-fft.pdf", bbox_inches='tight') | gpl-3.0 |
boomsbloom/dtm-fmri | DTM/for_gensim/lib/python2.7/site-packages/sklearn/metrics/__init__.py | 27 | 3527 | """
The :mod:`sklearn.metrics` module includes score functions, performance metrics
and pairwise metrics and distance computations.
"""
from .ranking import auc
from .ranking import average_precision_score
from .ranking import coverage_error
from .ranking import label_ranking_average_precision_score
from .ranking import label_ranking_loss
from .ranking import precision_recall_curve
from .ranking import roc_auc_score
from .ranking import roc_curve
from .classification import accuracy_score
from .classification import classification_report
from .classification import cohen_kappa_score
from .classification import confusion_matrix
from .classification import f1_score
from .classification import fbeta_score
from .classification import hamming_loss
from .classification import hinge_loss
from .classification import jaccard_similarity_score
from .classification import log_loss
from .classification import matthews_corrcoef
from .classification import precision_recall_fscore_support
from .classification import precision_score
from .classification import recall_score
from .classification import zero_one_loss
from .classification import brier_score_loss
from . import cluster
from .cluster import adjusted_mutual_info_score
from .cluster import adjusted_rand_score
from .cluster import completeness_score
from .cluster import consensus_score
from .cluster import homogeneity_completeness_v_measure
from .cluster import homogeneity_score
from .cluster import mutual_info_score
from .cluster import normalized_mutual_info_score
from .cluster import fowlkes_mallows_score
from .cluster import silhouette_samples
from .cluster import silhouette_score
from .cluster import calinski_harabaz_score
from .cluster import v_measure_score
from .pairwise import euclidean_distances
from .pairwise import pairwise_distances
from .pairwise import pairwise_distances_argmin
from .pairwise import pairwise_distances_argmin_min
from .pairwise import pairwise_kernels
from .regression import explained_variance_score
from .regression import mean_absolute_error
from .regression import mean_squared_error
from .regression import median_absolute_error
from .regression import r2_score
from .scorer import make_scorer
from .scorer import SCORERS
from .scorer import get_scorer
__all__ = [
'accuracy_score',
'adjusted_mutual_info_score',
'adjusted_rand_score',
'auc',
'average_precision_score',
'classification_report',
'cluster',
'completeness_score',
'confusion_matrix',
'consensus_score',
'coverage_error',
'euclidean_distances',
'explained_variance_score',
'f1_score',
'fbeta_score',
'get_scorer',
'hamming_loss',
'hinge_loss',
'homogeneity_completeness_v_measure',
'homogeneity_score',
'jaccard_similarity_score',
'label_ranking_average_precision_score',
'label_ranking_loss',
'log_loss',
'make_scorer',
'matthews_corrcoef',
'mean_absolute_error',
'mean_squared_error',
'median_absolute_error',
'mutual_info_score',
'normalized_mutual_info_score',
'pairwise_distances',
'pairwise_distances_argmin',
'pairwise_distances_argmin_min',
'pairwise_distances_argmin_min',
'pairwise_kernels',
'precision_recall_curve',
'precision_recall_fscore_support',
'precision_score',
'r2_score',
'recall_score',
'roc_auc_score',
'roc_curve',
'SCORERS',
'silhouette_samples',
'silhouette_score',
'v_measure_score',
'zero_one_loss',
'brier_score_loss',
]
| mit |
jakobzhao/wbcrawler3 | seg_training_3.py | 1 | 7151 | # !/usr/bin/python
# -*- coding: utf-8 -*-
#
# Created on Oct 16, 2015
# @author: Bo Zhao
# @email: [email protected]
# @website: http://yenching.org
# @organization: Harvard Kennedy School
import sys
# gensim modules
from gensim.models import Doc2Vec
from wbcrawler.log import *
import numpy
from pymongo import MongoClient
from wbcrawler.seg import seg_sentence
reload(sys)
sys.setdefaultencoding('utf-8')
project = 'insurance'
address = "localhost"
port = 27017
model = Doc2Vec.load('%s/d2v.bin' % project)
# model = Doc2Vec.load('d2v_sentiment.bin')
# line_count = 10000
line_count = 350
model_size = 200
# print model.docvecs.most_similar(['NEG_0'])
# for w, i in model.most_similar(u'赞'):
# print w + ' ' + str(i)
# pass
train_arrays = numpy.zeros((line_count * 2, model_size))
train_labels = numpy.zeros(line_count * 2)
for i in range(line_count):
prefix_train_pos = 'POS_' + str(i)
prefix_train_neg = 'NEG_' + str(i)
# prefix_train_mid = 'MID_' + str(i)
train_arrays[i] = model.docvecs[prefix_train_pos]
train_arrays[line_count + i] = model.docvecs[prefix_train_neg]
# train_arrays[line_count * 2 + i] = model.docvecs[prefix_train_mid]
train_labels[i] = 1.0
train_labels[line_count + i] = 0
# train_labels[line_count * 2 + i] = 5
# print train_arrays
# classifier
from sklearn.linear_model import LogisticRegression
from sklearn.naive_bayes import GaussianNB
from sklearn.svm import SVC
from sklearn.tree import DecisionTreeClassifier
from sklearn.neighbors import KNeighborsClassifier
from sklearn.naive_bayes import MultinomialNB
from sklearn.linear_model import SGDClassifier
from sklearn.ensemble import RandomForestClassifier
# from wbcrawler.NNet import NeuralNet
# from sklearn.naive_bayes import MultinomialNB
from sklearn.linear_model import SGDClassifier
# classifier = RandomForestClassifier(n_estimators=100)
# classifier = LogisticRegression(C=1.0, class_weight=None, dual=False, fit_intercept=True, intercept_scaling=1, penalty='l2', random_state=None, tol=0.0001)
# classifier = GaussianNB()
# classifier = SVC()
# classifier = KNeighborsClassifier()
# classifier = KNeighborsClassifier(algorithm='auto', leaf_size=30, metric='minkowski', n_neighbors=5, p=2, weights='uniform')
classifier = DecisionTreeClassifier()
# classifier = NeuralNet(50, learn_rate=1e-2)
# classifier = SGDClassifier(loss='log', penalty='l1')
# classifier.fit(train_arrays, train_labels, fine_tune=False, maxiter=500, SGD=True, batch=150, rho=0.9)
classifier.fit(train_arrays, train_labels)
# print train_arrays, train_labels
log(NOTICE, "score: %f" % classifier.score(train_arrays, train_labels))
# from sklearn.manifold import TSNE
# import numpy as np
# import matplotlib.pyplot as plt
#
# ts = TSNE(2)
# reduced_vecs = ts.fit_transform(np.concatenate((train_arrays[0:line_count], train_arrays[line_count:line_count * 2])))
#
# #color points by word group to see if Word2Vec can separate them
# for i in range(line_count * 2):
# if i < len(train_arrays[0:line_count]):
# #food words colored blue
# color = 'b'
# else:
# color = 'g'
# plt.plot(reduced_vecs[i, 1], reduced_vecs[i, 0], marker='o', color=color, markersize=4)
#
# plt.show()
# pred_probas = lr.predict_proba(test_vecs)[:,1]
#
# fpr,tpr,_ = roc_curve(y_test, pred_probas)
# roc_auc = auc(fpr,tpr)
# plt.plot(fpr,tpr,label='area = %.2f' %roc_auc)
# plt.plot([0, 1], [0, 1], 'k--')
# plt.xlim([0.0, 1.0])
# plt.ylim([0.0, 1.05])
# plt.legend(loc='lower right')
#
# plt.show()
client = MongoClient(address, port)
db = client[project]
words = []
# search_json = {'$or': [{'keyword': '社会保险'},{'keyword': '社保'}]}
# search_json = {'keyword': '医疗保险'}
# utc_end = datetime.datetime(2015, 10, 26, 0, 0, 0, 0, tzinfo=TZCHINA)
# utc_start = datetime.datetime(2015, 10, 1, 0, 0, 0, 0, tzinfo=TZCHINA)
# search_json = {'$and': [{"timestamp": {"$lt": utc_end}}, {"timestamp": {"$gt": utc_start}}]}
# search_json = {"timestamp": {"$gt": utc_end}}
search_json = {}
posts = db.posts.find(search_json)
count = db.posts.find(search_json).count()
# Round One, parsing those with replies
log(NOTICE, 'Round One')
# adding sentiment is 0 is a temporary strategy. since the number of replies are increasing.
posts = db.posts.find({"replies": {"$ne": []}, "sentiment": {"$eq": 0}})
count = db.posts.find({"replies": {"$ne": []}}).count()
i = 0
for post in posts:
line = seg_sentence(post['content'])
a = model.infer_vector(line.split(u' '))
s_index = classifier.predict(a)
log(NOTICE, '%d %s' % (s_index, post['content'].encode('gbk', 'ignore')))
f_flag = False
# db.posts.update({'mid': post['mid']}, {'$set': {'sentiment': s_index}})
re_count = len(post['replies'])
re_i = 1
for reply in post['replies']:
re = reply['content']
if len(reply['content']) >= 2:
if reply['content'][:2] == u'//':
f_flag = True
if len(reply['content']) >= 4:
if reply['content'][:4] == u'转发微博' or reply['content'][:4] == u'轉發微博' or str(reply['content'][:4]).lower() == u'repo':
f_flag = True
if reply['content'] == '':
f_flag = True
if f_flag is True:
re_index = s_index
else:
line = seg_sentence(reply['content'])
a = model.infer_vector(line.split(u' '))
re_index = classifier.predict(a)
log(NOTICE, '%d %s' % (re_index, post['content'].encode('gbk', 'ignore')))
# db.posts.update({'mid': reply['mid']}, {'$set': {'sentiment': re_index}})
# log(NOTICE, 'Reply #%d of post #%d, %d remains. Content: %s' % (re_i, i, count - re_i, re.encode('gbk', 'ignore')))
re_i += 1
# log(NOTICE, '#%d, %d remains. Content: %s' % (i, count - i, a.encode('gbk', 'ignore')))
i += 1
# Round Two, parsing those without replies
log(NOTICE, 'Round Two')
posts = db.posts.find({"replies": {"$eq": []}, "sentiment": {"$eq": 0}})
count = db.posts.find({"replies": {"$ne": []}}).count()
i = 1
for post in posts:
line = seg_sentence(post['content'])
a = model.infer_vector(line.split(u' '))
s_index = classifier.predict(a)
log(NOTICE, '%d %s' % (s_index, post['content'].encode('gbk', 'ignore')))
# db.posts.update({'mid': post['mid']}, {'$set': {'sentiment': s_index}})
# log(NOTICE, '#%d, %d remains. Content: %s' % (i, count - i, a.encode('gbk', 'ignore')))
i += 1
log(NOTICE, 'mission completes.')
# a = model.infer_vector(u'医疗 改革 不错'.split(u' '))
# b = model.infer_vector(u'最 恶毒 不足以 形容'.split(u' '))
# print classifier.score(train_arrays, train_labels)
# # print classifier.predict(train_arrays[130])
# print classifier.predict([a, b, model.docvecs['NEG_2'], model.docvecs['POS_2']])
# for i in range(100):
# print classifier.predict([model.docvecs['NEG_' + str(i)]])
# print "========POS=============="
# for i in range(100):
# print classifier.predict([model.docvecs['POS_' + str(i)]])
# # print classifier.predict_proba([a, b, model.docvecs['NEG_2'], model.docvecs['POS_2']])
| mit |
billy-inn/scikit-learn | examples/decomposition/plot_pca_3d.py | 354 | 2432 | #!/usr/bin/python
# -*- coding: utf-8 -*-
"""
=========================================================
Principal components analysis (PCA)
=========================================================
These figures aid in illustrating how a point cloud
can be very flat in one direction--which is where PCA
comes in to choose a direction that is not flat.
"""
print(__doc__)
# Authors: Gael Varoquaux
# Jaques Grobler
# Kevin Hughes
# License: BSD 3 clause
from sklearn.decomposition import PCA
from mpl_toolkits.mplot3d import Axes3D
import numpy as np
import matplotlib.pyplot as plt
from scipy import stats
###############################################################################
# Create the data
e = np.exp(1)
np.random.seed(4)
def pdf(x):
return 0.5 * (stats.norm(scale=0.25 / e).pdf(x)
+ stats.norm(scale=4 / e).pdf(x))
y = np.random.normal(scale=0.5, size=(30000))
x = np.random.normal(scale=0.5, size=(30000))
z = np.random.normal(scale=0.1, size=len(x))
density = pdf(x) * pdf(y)
pdf_z = pdf(5 * z)
density *= pdf_z
a = x + y
b = 2 * y
c = a - b + z
norm = np.sqrt(a.var() + b.var())
a /= norm
b /= norm
###############################################################################
# Plot the figures
def plot_figs(fig_num, elev, azim):
fig = plt.figure(fig_num, figsize=(4, 3))
plt.clf()
ax = Axes3D(fig, rect=[0, 0, .95, 1], elev=elev, azim=azim)
ax.scatter(a[::10], b[::10], c[::10], c=density[::10], marker='+', alpha=.4)
Y = np.c_[a, b, c]
# Using SciPy's SVD, this would be:
# _, pca_score, V = scipy.linalg.svd(Y, full_matrices=False)
pca = PCA(n_components=3)
pca.fit(Y)
pca_score = pca.explained_variance_ratio_
V = pca.components_
x_pca_axis, y_pca_axis, z_pca_axis = V.T * pca_score / pca_score.min()
x_pca_axis, y_pca_axis, z_pca_axis = 3 * V.T
x_pca_plane = np.r_[x_pca_axis[:2], - x_pca_axis[1::-1]]
y_pca_plane = np.r_[y_pca_axis[:2], - y_pca_axis[1::-1]]
z_pca_plane = np.r_[z_pca_axis[:2], - z_pca_axis[1::-1]]
x_pca_plane.shape = (2, 2)
y_pca_plane.shape = (2, 2)
z_pca_plane.shape = (2, 2)
ax.plot_surface(x_pca_plane, y_pca_plane, z_pca_plane)
ax.w_xaxis.set_ticklabels([])
ax.w_yaxis.set_ticklabels([])
ax.w_zaxis.set_ticklabels([])
elev = -40
azim = -80
plot_figs(1, elev, azim)
elev = 30
azim = 20
plot_figs(2, elev, azim)
plt.show()
| bsd-3-clause |
wzbozon/scikit-learn | examples/manifold/plot_lle_digits.py | 59 | 8576 | """
=============================================================================
Manifold learning on handwritten digits: Locally Linear Embedding, Isomap...
=============================================================================
An illustration of various embeddings on the digits dataset.
The RandomTreesEmbedding, from the :mod:`sklearn.ensemble` module, is not
technically a manifold embedding method, as it learn a high-dimensional
representation on which we apply a dimensionality reduction method.
However, it is often useful to cast a dataset into a representation in
which the classes are linearly-separable.
t-SNE will be initialized with the embedding that is generated by PCA in
this example, which is not the default setting. It ensures global stability
of the embedding, i.e., the embedding does not depend on random
initialization.
"""
# Authors: Fabian Pedregosa <[email protected]>
# Olivier Grisel <[email protected]>
# Mathieu Blondel <[email protected]>
# Gael Varoquaux
# License: BSD 3 clause (C) INRIA 2011
print(__doc__)
from time import time
import numpy as np
import matplotlib.pyplot as plt
from matplotlib import offsetbox
from sklearn import (manifold, datasets, decomposition, ensemble, lda,
random_projection)
digits = datasets.load_digits(n_class=6)
X = digits.data
y = digits.target
n_samples, n_features = X.shape
n_neighbors = 30
#----------------------------------------------------------------------
# Scale and visualize the embedding vectors
def plot_embedding(X, title=None):
x_min, x_max = np.min(X, 0), np.max(X, 0)
X = (X - x_min) / (x_max - x_min)
plt.figure()
ax = plt.subplot(111)
for i in range(X.shape[0]):
plt.text(X[i, 0], X[i, 1], str(digits.target[i]),
color=plt.cm.Set1(y[i] / 10.),
fontdict={'weight': 'bold', 'size': 9})
if hasattr(offsetbox, 'AnnotationBbox'):
# only print thumbnails with matplotlib > 1.0
shown_images = np.array([[1., 1.]]) # just something big
for i in range(digits.data.shape[0]):
dist = np.sum((X[i] - shown_images) ** 2, 1)
if np.min(dist) < 4e-3:
# don't show points that are too close
continue
shown_images = np.r_[shown_images, [X[i]]]
imagebox = offsetbox.AnnotationBbox(
offsetbox.OffsetImage(digits.images[i], cmap=plt.cm.gray_r),
X[i])
ax.add_artist(imagebox)
plt.xticks([]), plt.yticks([])
if title is not None:
plt.title(title)
#----------------------------------------------------------------------
# Plot images of the digits
n_img_per_row = 20
img = np.zeros((10 * n_img_per_row, 10 * n_img_per_row))
for i in range(n_img_per_row):
ix = 10 * i + 1
for j in range(n_img_per_row):
iy = 10 * j + 1
img[ix:ix + 8, iy:iy + 8] = X[i * n_img_per_row + j].reshape((8, 8))
plt.imshow(img, cmap=plt.cm.binary)
plt.xticks([])
plt.yticks([])
plt.title('A selection from the 64-dimensional digits dataset')
#----------------------------------------------------------------------
# Random 2D projection using a random unitary matrix
print("Computing random projection")
rp = random_projection.SparseRandomProjection(n_components=2, random_state=42)
X_projected = rp.fit_transform(X)
plot_embedding(X_projected, "Random Projection of the digits")
#----------------------------------------------------------------------
# Projection on to the first 2 principal components
print("Computing PCA projection")
t0 = time()
X_pca = decomposition.TruncatedSVD(n_components=2).fit_transform(X)
plot_embedding(X_pca,
"Principal Components projection of the digits (time %.2fs)" %
(time() - t0))
#----------------------------------------------------------------------
# Projection on to the first 2 linear discriminant components
print("Computing Linear Discriminant Analysis projection")
X2 = X.copy()
X2.flat[::X.shape[1] + 1] += 0.01 # Make X invertible
t0 = time()
X_lda = discriminant_analysis.LinearDiscriminantAnalysis(n_components=2).fit_transform(X2, y)
plot_embedding(X_lda,
"Linear Discriminant projection of the digits (time %.2fs)" %
(time() - t0))
#----------------------------------------------------------------------
# Isomap projection of the digits dataset
print("Computing Isomap embedding")
t0 = time()
X_iso = manifold.Isomap(n_neighbors, n_components=2).fit_transform(X)
print("Done.")
plot_embedding(X_iso,
"Isomap projection of the digits (time %.2fs)" %
(time() - t0))
#----------------------------------------------------------------------
# Locally linear embedding of the digits dataset
print("Computing LLE embedding")
clf = manifold.LocallyLinearEmbedding(n_neighbors, n_components=2,
method='standard')
t0 = time()
X_lle = clf.fit_transform(X)
print("Done. Reconstruction error: %g" % clf.reconstruction_error_)
plot_embedding(X_lle,
"Locally Linear Embedding of the digits (time %.2fs)" %
(time() - t0))
#----------------------------------------------------------------------
# Modified Locally linear embedding of the digits dataset
print("Computing modified LLE embedding")
clf = manifold.LocallyLinearEmbedding(n_neighbors, n_components=2,
method='modified')
t0 = time()
X_mlle = clf.fit_transform(X)
print("Done. Reconstruction error: %g" % clf.reconstruction_error_)
plot_embedding(X_mlle,
"Modified Locally Linear Embedding of the digits (time %.2fs)" %
(time() - t0))
#----------------------------------------------------------------------
# HLLE embedding of the digits dataset
print("Computing Hessian LLE embedding")
clf = manifold.LocallyLinearEmbedding(n_neighbors, n_components=2,
method='hessian')
t0 = time()
X_hlle = clf.fit_transform(X)
print("Done. Reconstruction error: %g" % clf.reconstruction_error_)
plot_embedding(X_hlle,
"Hessian Locally Linear Embedding of the digits (time %.2fs)" %
(time() - t0))
#----------------------------------------------------------------------
# LTSA embedding of the digits dataset
print("Computing LTSA embedding")
clf = manifold.LocallyLinearEmbedding(n_neighbors, n_components=2,
method='ltsa')
t0 = time()
X_ltsa = clf.fit_transform(X)
print("Done. Reconstruction error: %g" % clf.reconstruction_error_)
plot_embedding(X_ltsa,
"Local Tangent Space Alignment of the digits (time %.2fs)" %
(time() - t0))
#----------------------------------------------------------------------
# MDS embedding of the digits dataset
print("Computing MDS embedding")
clf = manifold.MDS(n_components=2, n_init=1, max_iter=100)
t0 = time()
X_mds = clf.fit_transform(X)
print("Done. Stress: %f" % clf.stress_)
plot_embedding(X_mds,
"MDS embedding of the digits (time %.2fs)" %
(time() - t0))
#----------------------------------------------------------------------
# Random Trees embedding of the digits dataset
print("Computing Totally Random Trees embedding")
hasher = ensemble.RandomTreesEmbedding(n_estimators=200, random_state=0,
max_depth=5)
t0 = time()
X_transformed = hasher.fit_transform(X)
pca = decomposition.TruncatedSVD(n_components=2)
X_reduced = pca.fit_transform(X_transformed)
plot_embedding(X_reduced,
"Random forest embedding of the digits (time %.2fs)" %
(time() - t0))
#----------------------------------------------------------------------
# Spectral embedding of the digits dataset
print("Computing Spectral embedding")
embedder = manifold.SpectralEmbedding(n_components=2, random_state=0,
eigen_solver="arpack")
t0 = time()
X_se = embedder.fit_transform(X)
plot_embedding(X_se,
"Spectral embedding of the digits (time %.2fs)" %
(time() - t0))
#----------------------------------------------------------------------
# t-SNE embedding of the digits dataset
print("Computing t-SNE embedding")
tsne = manifold.TSNE(n_components=2, init='pca', random_state=0)
t0 = time()
X_tsne = tsne.fit_transform(X)
plot_embedding(X_tsne,
"t-SNE embedding of the digits (time %.2fs)" %
(time() - t0))
plt.show()
| bsd-3-clause |
mhdella/scikit-learn | examples/linear_model/plot_lasso_coordinate_descent_path.py | 254 | 2639 | """
=====================
Lasso and Elastic Net
=====================
Lasso and elastic net (L1 and L2 penalisation) implemented using a
coordinate descent.
The coefficients can be forced to be positive.
"""
print(__doc__)
# Author: Alexandre Gramfort <[email protected]>
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
from sklearn.linear_model import lasso_path, enet_path
from sklearn import datasets
diabetes = datasets.load_diabetes()
X = diabetes.data
y = diabetes.target
X /= X.std(axis=0) # Standardize data (easier to set the l1_ratio parameter)
# Compute paths
eps = 5e-3 # the smaller it is the longer is the path
print("Computing regularization path using the lasso...")
alphas_lasso, coefs_lasso, _ = lasso_path(X, y, eps, fit_intercept=False)
print("Computing regularization path using the positive lasso...")
alphas_positive_lasso, coefs_positive_lasso, _ = lasso_path(
X, y, eps, positive=True, fit_intercept=False)
print("Computing regularization path using the elastic net...")
alphas_enet, coefs_enet, _ = enet_path(
X, y, eps=eps, l1_ratio=0.8, fit_intercept=False)
print("Computing regularization path using the positve elastic net...")
alphas_positive_enet, coefs_positive_enet, _ = enet_path(
X, y, eps=eps, l1_ratio=0.8, positive=True, fit_intercept=False)
# Display results
plt.figure(1)
ax = plt.gca()
ax.set_color_cycle(2 * ['b', 'r', 'g', 'c', 'k'])
l1 = plt.plot(-np.log10(alphas_lasso), coefs_lasso.T)
l2 = plt.plot(-np.log10(alphas_enet), coefs_enet.T, linestyle='--')
plt.xlabel('-Log(alpha)')
plt.ylabel('coefficients')
plt.title('Lasso and Elastic-Net Paths')
plt.legend((l1[-1], l2[-1]), ('Lasso', 'Elastic-Net'), loc='lower left')
plt.axis('tight')
plt.figure(2)
ax = plt.gca()
ax.set_color_cycle(2 * ['b', 'r', 'g', 'c', 'k'])
l1 = plt.plot(-np.log10(alphas_lasso), coefs_lasso.T)
l2 = plt.plot(-np.log10(alphas_positive_lasso), coefs_positive_lasso.T,
linestyle='--')
plt.xlabel('-Log(alpha)')
plt.ylabel('coefficients')
plt.title('Lasso and positive Lasso')
plt.legend((l1[-1], l2[-1]), ('Lasso', 'positive Lasso'), loc='lower left')
plt.axis('tight')
plt.figure(3)
ax = plt.gca()
ax.set_color_cycle(2 * ['b', 'r', 'g', 'c', 'k'])
l1 = plt.plot(-np.log10(alphas_enet), coefs_enet.T)
l2 = plt.plot(-np.log10(alphas_positive_enet), coefs_positive_enet.T,
linestyle='--')
plt.xlabel('-Log(alpha)')
plt.ylabel('coefficients')
plt.title('Elastic-Net and positive Elastic-Net')
plt.legend((l1[-1], l2[-1]), ('Elastic-Net', 'positive Elastic-Net'),
loc='lower left')
plt.axis('tight')
plt.show()
| bsd-3-clause |
nesterione/scikit-learn | examples/decomposition/plot_sparse_coding.py | 247 | 3846 | """
===========================================
Sparse coding with a precomputed dictionary
===========================================
Transform a signal as a sparse combination of Ricker wavelets. This example
visually compares different sparse coding methods using the
:class:`sklearn.decomposition.SparseCoder` estimator. The Ricker (also known
as Mexican hat or the second derivative of a Gaussian) is not a particularly
good kernel to represent piecewise constant signals like this one. It can
therefore be seen how much adding different widths of atoms matters and it
therefore motivates learning the dictionary to best fit your type of signals.
The richer dictionary on the right is not larger in size, heavier subsampling
is performed in order to stay on the same order of magnitude.
"""
print(__doc__)
import numpy as np
import matplotlib.pylab as pl
from sklearn.decomposition import SparseCoder
def ricker_function(resolution, center, width):
"""Discrete sub-sampled Ricker (Mexican hat) wavelet"""
x = np.linspace(0, resolution - 1, resolution)
x = ((2 / ((np.sqrt(3 * width) * np.pi ** 1 / 4)))
* (1 - ((x - center) ** 2 / width ** 2))
* np.exp((-(x - center) ** 2) / (2 * width ** 2)))
return x
def ricker_matrix(width, resolution, n_components):
"""Dictionary of Ricker (Mexican hat) wavelets"""
centers = np.linspace(0, resolution - 1, n_components)
D = np.empty((n_components, resolution))
for i, center in enumerate(centers):
D[i] = ricker_function(resolution, center, width)
D /= np.sqrt(np.sum(D ** 2, axis=1))[:, np.newaxis]
return D
resolution = 1024
subsampling = 3 # subsampling factor
width = 100
n_components = resolution / subsampling
# Compute a wavelet dictionary
D_fixed = ricker_matrix(width=width, resolution=resolution,
n_components=n_components)
D_multi = np.r_[tuple(ricker_matrix(width=w, resolution=resolution,
n_components=np.floor(n_components / 5))
for w in (10, 50, 100, 500, 1000))]
# Generate a signal
y = np.linspace(0, resolution - 1, resolution)
first_quarter = y < resolution / 4
y[first_quarter] = 3.
y[np.logical_not(first_quarter)] = -1.
# List the different sparse coding methods in the following format:
# (title, transform_algorithm, transform_alpha, transform_n_nozero_coefs)
estimators = [('OMP', 'omp', None, 15), ('Lasso', 'lasso_cd', 2, None), ]
pl.figure(figsize=(13, 6))
for subplot, (D, title) in enumerate(zip((D_fixed, D_multi),
('fixed width', 'multiple widths'))):
pl.subplot(1, 2, subplot + 1)
pl.title('Sparse coding against %s dictionary' % title)
pl.plot(y, ls='dotted', label='Original signal')
# Do a wavelet approximation
for title, algo, alpha, n_nonzero in estimators:
coder = SparseCoder(dictionary=D, transform_n_nonzero_coefs=n_nonzero,
transform_alpha=alpha, transform_algorithm=algo)
x = coder.transform(y)
density = len(np.flatnonzero(x))
x = np.ravel(np.dot(x, D))
squared_error = np.sum((y - x) ** 2)
pl.plot(x, label='%s: %s nonzero coefs,\n%.2f error'
% (title, density, squared_error))
# Soft thresholding debiasing
coder = SparseCoder(dictionary=D, transform_algorithm='threshold',
transform_alpha=20)
x = coder.transform(y)
_, idx = np.where(x != 0)
x[0, idx], _, _, _ = np.linalg.lstsq(D[idx, :].T, y)
x = np.ravel(np.dot(x, D))
squared_error = np.sum((y - x) ** 2)
pl.plot(x,
label='Thresholding w/ debiasing:\n%d nonzero coefs, %.2f error' %
(len(idx), squared_error))
pl.axis('tight')
pl.legend()
pl.subplots_adjust(.04, .07, .97, .90, .09, .2)
pl.show()
| bsd-3-clause |
mahantheshhv/ibis | ibis/tests/test_comms.py | 16 | 11505 | # Copyright 2014 Cloudera Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import sys
import threading
import pytest
import numpy as np
from ibis.util import guid
from ibis.compat import unittest
try:
import ibis.comms as comms
from ibis.comms import (SharedMmap, IbisType, IbisTableReader,
IbisTableWriter)
SKIP_TESTS = False
except ImportError:
SKIP_TESTS = True
def _nuke(path):
try:
os.remove(path)
except os.error:
pass
pytestmark = pytest.mark.skipif(SKIP_TESTS,
reason='Comms extension disabled')
class TestIPCLock(unittest.TestCase):
def setUp(self):
if sys.platform == 'darwin':
raise unittest.SkipTest
self.timeout = 1
self.master = comms.IPCLock(is_slave=0, lock_timeout_ms=self.timeout)
self.slave = comms.IPCLock(self.master.semaphore_id,
lock_timeout_ms=self.timeout)
def test_acquire_and_release(self):
# It's not our turn
self.assertFalse(self.master.acquire(block=False))
self.slave.acquire()
self.slave.release()
self.assertTrue(self.master.acquire())
def test_cleanup_semaphore_arrays(self):
# Otherwise, there will be too many semaphore arrays floating around
for i in range(500):
comms.IPCLock(is_slave=0)
def test_thread_blocking(self):
lock = threading.Lock()
results = []
# This also verifies that the GIL is correctly dropped
def ping():
while True:
with self.slave:
with lock:
if len(results) == 4:
break
results.append('ping')
def pong():
while True:
with self.master:
with lock:
if len(results) == 4:
break
results.append('pong')
t1 = threading.Thread(target=pong)
t1.start()
t2 = threading.Thread(target=ping)
t2.start()
t1.join()
t2.join()
ex_results = ['ping', 'pong'] * 2
assert results == ex_results
class TestSharedMmap(unittest.TestCase):
def setUp(self):
self.to_nuke = []
def tearDown(self):
for path in self.to_nuke:
_nuke(path)
def test_create_file(self):
size = 1024
path = guid()
try:
mm = SharedMmap(path, size, create=True)
mm.close()
self.assertTrue(os.path.exists(path))
self.assertEqual(os.stat(path).st_size, size)
finally:
_nuke(path)
def test_file_not_exist(self):
path = guid()
self.assertRaises(IOError, SharedMmap, path, 1024)
self.assertRaises(IOError, SharedMmap, path, 1024, offset=20,
create=True)
def test_close_file(self):
path = guid()
self.to_nuke.append(path)
data = guid()
mm = SharedMmap(path, len(data), create=True)
assert mm.closed is False
mm.close()
assert mm.closed is True
# idempotent
mm.close()
assert mm.closed is True
self.assertRaises(IOError, mm.read, 4)
self.assertRaises(IOError, mm.write, 'bazqux')
self.assertRaises(IOError, mm.seek, 0)
self.assertRaises(IOError, mm.flush)
def test_file_interface(self):
path = guid()
self.to_nuke.append(path)
data = guid()
mm = SharedMmap(path, len(data), create=True)
assert mm.tell() == 0
mm.write(data)
assert mm.tell() == len(data)
mm.seek(0)
assert mm.tell() == 0
result = mm.read(16)
assert len(result) == 16
assert result == data[:16]
assert mm.tell() == 16
def test_multiple_mmaps(self):
path = guid()
path2 = guid()
data = guid()
self.to_nuke.extend([path, path2])
mm1 = SharedMmap(path, len(data), create=True)
mm1.write(data)
mm2 = SharedMmap(path, len(data))
result = mm2.read()
self.assertEqual(result, data)
# Open both maps first, see if data synchronizes
mm1 = SharedMmap(path2, len(data), create=True)
mm2 = SharedMmap(path2, len(data))
mm1.write(data)
result = mm2.read()
self.assertEqual(result, data)
def rand_bool(N):
return np.random.randint(0, 2, size=N).astype(np.uint8)
def rand_int_span(dtype, N):
info = np.iinfo(dtype)
lo, hi = info.min, info.max
return np.random.randint(lo, hi, size=N).astype(dtype)
def bool_ex(N):
mask = rand_bool(N)
values = rand_bool(N)
return _to_masked(values, mask, IbisType.BOOLEAN)
def int_ex(N, ibis_type):
mask = rand_bool(N)
nptype = comms._ibis_to_numpy[ibis_type]
values = rand_int_span(nptype, N)
return _to_masked(values, mask, ibis_type)
def double_ex(N):
mask = rand_bool(N)
values = np.random.randn(N)
return _to_masked(values, mask, IbisType.DOUBLE)
def _to_masked(values, mask, dtype):
return comms.masked_from_numpy(values, mask, dtype)
class TestImpalaMaskedFormat(unittest.TestCase):
"""
Check that data makes it to and from the file format, and that it can be
correctly transformed to the appropriate NumPy/pandas/etc. format
"""
N = 1000
def _check_roundtrip(self, columns):
writer = IbisTableWriter(columns)
table_size = writer.total_size()
buf = comms.RAMBuffer(table_size)
writer.write(buf)
buf.seek(0)
reader = IbisTableReader(buf)
for i, expected in enumerate(columns):
result = reader.get_column(i)
assert result.equals(expected)
def test_basic_diverse_table(self):
columns = [
bool_ex(self.N),
int_ex(self.N, IbisType.TINYINT),
int_ex(self.N, IbisType.SMALLINT),
int_ex(self.N, IbisType.INT),
int_ex(self.N, IbisType.BIGINT)
]
self._check_roundtrip(columns)
def test_boolean(self):
col = bool_ex(self.N)
self.assertEqual(col.nbytes(), self.N * 2)
self._check_roundtrip([col])
# Booleans with nulls will come out as object arrays with None for each
# null. This is how pandas handles things
result = col.to_numpy_for_pandas()
assert result.dtype == object
_check_masked_correct(col, result, np.bool_,
lambda x: x is None)
# No nulls, get boolean dtype
mask = np.zeros(self.N, dtype=np.uint8)
values = rand_bool(self.N)
col2 = _to_masked(values, mask, IbisType.BOOLEAN)
result2 = col2.to_numpy_for_pandas()
_check_masked_correct(col2, result2, np.bool_,
lambda x: x is None)
# Get a numpy.ma.MaskedArray
# masked_result = col.to_masked_array()
# didn't copy
# assert not masked_result.flags.owndata
# assert masked_result.base is col
# For each integer type, address conversion back to NumPy rep's: masked
# array, pandas-compatible (nulls force upcast to float + NaN for NULL)
def test_tinyint(self):
col = int_ex(self.N, IbisType.TINYINT)
self.assertEqual(col.nbytes(), self.N * 2)
self._check_roundtrip([col])
_check_pandas_ints_nulls(col, np.int8)
_check_pandas_ints_no_nulls(self.N, IbisType.TINYINT)
def test_smallint(self):
col = int_ex(self.N, IbisType.SMALLINT)
self.assertEqual(col.nbytes(), self.N * 3)
self._check_roundtrip([col])
_check_pandas_ints_nulls(col, np.int16)
_check_pandas_ints_no_nulls(self.N, IbisType.SMALLINT)
def test_int(self):
col = int_ex(self.N, IbisType.INT)
self.assertEqual(col.nbytes(), self.N * 5)
self._check_roundtrip([col])
_check_pandas_ints_nulls(col, np.int32)
_check_pandas_ints_no_nulls(self.N, IbisType.INT)
def test_int_segfault(self):
col = int_ex(1000000, IbisType.INT)
col.to_numpy_for_pandas()
def test_bigint(self):
col = int_ex(self.N, IbisType.BIGINT)
self.assertEqual(col.nbytes(), self.N * 9)
self._check_roundtrip([col])
_check_pandas_ints_nulls(col, np.int64)
_check_pandas_ints_no_nulls(self.N, IbisType.BIGINT)
def test_float(self):
mask = rand_bool(self.N)
values = np.random.randn(self.N).astype(np.float32)
col = _to_masked(values, mask, IbisType.FLOAT)
self.assertEqual(col.nbytes(), self.N * 5)
self._check_roundtrip([col])
result = col.to_numpy_for_pandas()
assert result.dtype == np.float32
mask = np.isnan(result)
ex_mask = col.mask().view(np.bool_)
assert np.array_equal(mask, ex_mask)
def test_double(self):
col = double_ex(self.N)
self.assertEqual(col.nbytes(), self.N * 9)
self._check_roundtrip([col])
result = col.to_numpy_for_pandas()
assert result.dtype == np.float64
mask = np.isnan(result)
ex_mask = col.mask().view(np.bool_)
assert np.array_equal(mask, ex_mask)
def test_string_pyobject(self):
# pandas handles strings in object-type (NPY_OBJECT) arrays and uses
# either None or NaN for nulls. For the time being we'll be consistent
# with that
#
pass
def test_timestamp(self):
pass
def test_decimal(self):
pass
def test_multiple_string_columns(self):
# For the time being, string (STRING, VARCHAR, CHAR) columns will all
# share the same intern table
pass
def _check_pandas_ints_nulls(col, dtype):
result = col.to_numpy_for_pandas()
assert result.dtype == np.float64
_check_masked_correct(col, result, dtype, np.isnan)
def _check_pandas_ints_no_nulls(N, ibis_type):
nptype = comms._ibis_to_numpy[ibis_type]
mask = np.zeros(N, dtype=np.uint8)
values = rand_int_span(nptype, N)
col = _to_masked(values, mask, ibis_type)
result = col.to_numpy_for_pandas()
assert result.dtype == nptype
_check_masked_correct(col, result, nptype, lambda x: False)
def _check_masked_correct(col, result, dtype, is_na_f):
mask = col.mask()
data = col.data_bytes().view(dtype)
for i, v in enumerate(result):
if mask[i]:
assert is_na_f(v)
else:
# For comparisons outside representable integer range, this may
# yield incorrect results
assert v == data[i]
class TestTableRoundTrip(unittest.TestCase):
"""
Test things not captured by datatype-specific tests
"""
def test_table_metadata(self):
# Check values from preamble
pass
| apache-2.0 |
MonoCloud/zipline | zipline/finance/controls.py | 11 | 12900 | #
# Copyright 2014 Quantopian, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import abc
import pandas as pd
from six import with_metaclass
from zipline.errors import (
AccountControlViolation,
TradingControlViolation,
)
class TradingControl(with_metaclass(abc.ABCMeta)):
"""
Abstract base class representing a fail-safe control on the behavior of any
algorithm.
"""
def __init__(self, **kwargs):
"""
Track any arguments that should be printed in the error message
generated by self.fail.
"""
self.__fail_args = kwargs
@abc.abstractmethod
def validate(self,
asset,
amount,
portfolio,
algo_datetime,
algo_current_data):
"""
Before any order is executed by TradingAlgorithm, this method should be
called *exactly once* on each registered TradingControl object.
If the specified asset and amount do not violate this TradingControl's
restraint given the information in `portfolio`, this method should
return None and have no externally-visible side-effects.
If the desired order violates this TradingControl's contraint, this
method should call self.fail(asset, amount).
"""
raise NotImplementedError
def fail(self, asset, amount, datetime, metadata=None):
"""
Raise a TradingControlViolation with information about the failure.
If dynamic information should be displayed as well, pass it in via
`metadata`.
"""
constraint = repr(self)
if metadata:
constraint = "{constraint} (Metadata: {metadata})".format(
constraint=constraint,
metadata=metadata
)
raise TradingControlViolation(asset=asset,
amount=amount,
datetime=datetime,
constraint=constraint)
def __repr__(self):
return "{name}({attrs})".format(name=self.__class__.__name__,
attrs=self.__fail_args)
class MaxOrderCount(TradingControl):
"""
TradingControl representing a limit on the number of orders that can be
placed in a given trading day.
"""
def __init__(self, max_count):
super(MaxOrderCount, self).__init__(max_count=max_count)
self.orders_placed = 0
self.max_count = max_count
self.current_date = None
def validate(self,
asset,
amount,
_portfolio,
algo_datetime,
_algo_current_data):
"""
Fail if we've already placed self.max_count orders today.
"""
algo_date = algo_datetime.date()
# Reset order count if it's a new day.
if self.current_date and self.current_date != algo_date:
self.orders_placed = 0
self.current_date = algo_date
if self.orders_placed >= self.max_count:
self.fail(asset, amount, algo_datetime)
self.orders_placed += 1
class RestrictedListOrder(TradingControl):
"""
TradingControl representing a restricted list of assets that
cannot be ordered by the algorithm.
"""
def __init__(self, restricted_list):
"""
restricted list can be an iterable or a
container (implements __contains__) for dynamic
restrictions.
"""
super(RestrictedListOrder, self).__init__()
self.restricted_list = restricted_list
def validate(self,
asset,
amount,
_portfolio,
_algo_datetime,
_algo_current_data):
"""
Fail if the asset is in the restricted_list.
"""
if asset in self.restricted_list:
self.fail(asset, amount, _algo_datetime)
class MaxOrderSize(TradingControl):
"""
TradingControl representing a limit on the magnitude of any single order
placed with the given asset. Can be specified by share or by dollar
value.
"""
def __init__(self, asset=None, max_shares=None, max_notional=None):
super(MaxOrderSize, self).__init__(asset=asset,
max_shares=max_shares,
max_notional=max_notional)
self.asset = asset
self.max_shares = max_shares
self.max_notional = max_notional
if max_shares is None and max_notional is None:
raise ValueError(
"Must supply at least one of max_shares and max_notional"
)
if max_shares and max_shares < 0:
raise ValueError(
"max_shares cannot be negative."
)
if max_notional and max_notional < 0:
raise ValueError(
"max_notional must be positive."
)
def validate(self,
asset,
amount,
portfolio,
_algo_datetime,
algo_current_data):
"""
Fail if the magnitude of the given order exceeds either self.max_shares
or self.max_notional.
"""
if self.asset is not None and self.asset != asset:
return
if self.max_shares is not None and abs(amount) > self.max_shares:
self.fail(asset, amount, _algo_datetime)
current_asset_price = algo_current_data[asset].price
order_value = amount * current_asset_price
too_much_value = (self.max_notional is not None and
abs(order_value) > self.max_notional)
if too_much_value:
self.fail(asset, amount, _algo_datetime)
class MaxPositionSize(TradingControl):
"""
TradingControl representing a limit on the maximum position size that can
be held by an algo for a given asset.
"""
def __init__(self, asset=None, max_shares=None, max_notional=None):
super(MaxPositionSize, self).__init__(asset=asset,
max_shares=max_shares,
max_notional=max_notional)
self.asset = asset
self.max_shares = max_shares
self.max_notional = max_notional
if max_shares is None and max_notional is None:
raise ValueError(
"Must supply at least one of max_shares and max_notional"
)
if max_shares and max_shares < 0:
raise ValueError(
"max_shares cannot be negative."
)
if max_notional and max_notional < 0:
raise ValueError(
"max_notional must be positive."
)
def validate(self,
asset,
amount,
portfolio,
algo_datetime,
algo_current_data):
"""
Fail if the given order would cause the magnitude of our position to be
greater in shares than self.max_shares or greater in dollar value than
self.max_notional.
"""
if self.asset is not None and self.asset != asset:
return
current_share_count = portfolio.positions[asset].amount
shares_post_order = current_share_count + amount
too_many_shares = (self.max_shares is not None and
abs(shares_post_order) > self.max_shares)
if too_many_shares:
self.fail(asset, amount, algo_datetime)
current_price = algo_current_data[asset].price
value_post_order = shares_post_order * current_price
too_much_value = (self.max_notional is not None and
abs(value_post_order) > self.max_notional)
if too_much_value:
self.fail(asset, amount, algo_datetime)
class LongOnly(TradingControl):
"""
TradingControl representing a prohibition against holding short positions.
"""
def validate(self,
asset,
amount,
portfolio,
_algo_datetime,
_algo_current_data):
"""
Fail if we would hold negative shares of asset after completing this
order.
"""
if portfolio.positions[asset].amount + amount < 0:
self.fail(asset, amount, _algo_datetime)
class AssetDateBounds(TradingControl):
"""
TradingControl representing a prohibition against ordering an asset before
its start_date, or after its end_date.
"""
def validate(self,
asset,
amount,
portfolio,
algo_datetime,
algo_current_data):
"""
Fail if the algo has passed this Asset's end_date, or before the
Asset's start date.
"""
# If the order is for 0 shares, then silently pass through.
if amount == 0:
return
normalized_algo_dt = pd.Timestamp(algo_datetime).normalize()
# Fail if the algo is before this Asset's start_date
if asset.start_date:
normalized_start = pd.Timestamp(asset.start_date).normalize()
if normalized_algo_dt < normalized_start:
metadata = {
'asset_start_date': normalized_start
}
self.fail(asset, amount, algo_datetime, metadata=metadata)
# Fail if the algo has passed this Asset's end_date
if asset.end_date:
normalized_end = pd.Timestamp(asset.end_date).normalize()
if normalized_algo_dt > normalized_end:
metadata = {
'asset_end_date': normalized_end
}
self.fail(asset, amount, algo_datetime, metadata=metadata)
class AccountControl(with_metaclass(abc.ABCMeta)):
"""
Abstract base class representing a fail-safe control on the behavior of any
algorithm.
"""
def __init__(self, **kwargs):
"""
Track any arguments that should be printed in the error message
generated by self.fail.
"""
self.__fail_args = kwargs
@abc.abstractmethod
def validate(self,
_portfolio,
_account,
_algo_datetime,
_algo_current_data):
"""
On each call to handle data by TradingAlgorithm, this method should be
called *exactly once* on each registered AccountControl object.
If the check does not violate this AccountControl's restraint given
the information in `portfolio` and `account`, this method should
return None and have no externally-visible side-effects.
If the desired order violates this AccountControl's contraint, this
method should call self.fail().
"""
raise NotImplementedError
def fail(self):
"""
Raise an AccountControlViolation with information about the failure.
"""
raise AccountControlViolation(constraint=repr(self))
def __repr__(self):
return "{name}({attrs})".format(name=self.__class__.__name__,
attrs=self.__fail_args)
class MaxLeverage(AccountControl):
"""
AccountControl representing a limit on the maximum leverage allowed
by the algorithm.
"""
def __init__(self, max_leverage):
"""
max_leverage is the gross leverage in decimal form. For example,
2, limits an algorithm to trading at most double the account value.
"""
super(MaxLeverage, self).__init__(max_leverage=max_leverage)
self.max_leverage = max_leverage
if max_leverage is None:
raise ValueError(
"Must supply max_leverage"
)
if max_leverage < 0:
raise ValueError(
"max_leverage must be positive"
)
def validate(self,
_portfolio,
_account,
_algo_datetime,
_algo_current_data):
"""
Fail if the leverage is greater than the allowed leverage.
"""
if _account['leverage'] > self.max_leverage:
self.fail()
| apache-2.0 |
isaaclino/wattsup | wattsup.py | 1 | 8060 | #!/usr/bin/env python
"""record data from WattsUp power meter
Reads data from a Watts Up PRO or compatible power meter (http://www.wattsupmeters.com).
Plots in real time, can run in simulation mode, reading from a file rather than
a physical power meter.
Output format will be space sperated containing:
YYYY-MM-DD HH:MM:SS.ssssss n W V A
where n is sample number, W is power in watts, V volts, A current in amps
Usage: "wattsup.py -h" for options
Author: Kelsey Jordahl
Copyright: Kelsey Jordahl 2011
License: GPLv3
Time-stamp: <Tue Sep 20 09:14:29 EDT 2011>
This program is free software: you can redistribute it and/or
modify it under the terms of the GNU General Public License as
published by the Free Software Foundation, either version 3 of the
License, or (at your option) any later version. A copy of the GPL
version 3 license can be found in the file COPYING or at
<http://www.gnu.org/licenses/>.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
"""
import os, serial
import datetime, time
import argparse
import curses
from platform import uname
import numpy as np
import matplotlib.pyplot as plt
EXTERNAL_MODE = 'E'
INTERNAL_MODE = 'I'
TCPIP_MODE = 'T'
FULLHANDLING = 2
class WattsUp(object):
def __init__(self, port, interval):
if args.sim:
self.s = open(port,'r') # not a serial port, but a file
else:
self.s = serial.Serial(port, 115200 )
self.logfile = None
self.interval = interval
# initialize lists for keeping data
self.t = []
self.power = []
self.potential = []
self.current = []
def mode(self, runmode):
if args.sim:
return # can't set run mode while in simulation
self.s.write('#L,W,3,%s,,%d;' % (runmode, self.interval) )
if runmode == INTERNAL_MODE:
self.s.write('#O,W,1,%d' % FULLHANDLING)
def fetch(self):
if args.sim:
return # can't fetch while in simulation
for line in self.s:
if line.startswith( '#d' ):
fields = line.split(',')
W = float(fields[3]) / 10;
V = float(fields[4]) / 10;
A = float(fields[5]) / 1000;
print datetime.datetime.now(), W, V, A
def log(self, logfile = None):
print 'Logging...'
if not args.sim:
self.mode(EXTERNAL_MODE)
if logfile:
self.logfile = logfile
o = open(self.logfile,'w')
if args.raw:
rawfile = '.'.join([os.path.splitext(self.logfile)[0],'raw'])
try:
r = open(rawfile,'w')
except:
print 'Opening raw file %s failed!' % rawfile
args.raw = False
line = self.s.readline()
n = 0
# set up curses
screen = curses.initscr()
curses.noecho()
curses.cbreak()
screen.nodelay(1)
try:
curses.curs_set(0)
except:
pass
if args.plot:
fig = plt.figure()
while True:
if args.sim:
time.sleep(self.interval)
if line.startswith( '#d' ):
if args.raw:
r.write(line)
fields = line.split(',')
if len(fields)>5:
W = float(fields[3]) / 10;
V = float(fields[4]) / 10;
A = float(fields[5]) / 1000;
screen.clear()
screen.addstr(2, 4, 'Logging to file %s' % self.logfile)
screen.addstr(4, 4, 'Time: %d s' % n)
screen.addstr(5, 4, 'Power: %3.1f W' % W)
screen.addstr(6, 4, 'Voltage: %5.1f V' % V)
if A<1000:
screen.addstr(7, 4, 'Current: %d mA' % int(A*1000))
else:
screen.addstr(7, 4, 'Current: %3.3f A' % A)
screen.addstr(9, 4, 'Press "q" to quit ')
#if args.debug:
# screen.addstr(12, 0, line)
screen.refresh()
c = screen.getch()
if c in (ord('q'), ord('Q')):
break # Exit the while()
if args.plot:
self.t.append(float(n))
self.power.append(W)
self.potential.append(V)
self.current.append(A)
fig.clear()
plt.plot(np.array(self.t)/60,np.array(self.power),'r')
ax = plt.gca()
ax.set_xlabel('Time (minutes)')
ax.set_ylabel('Power (W)')
# show the plot
fig.canvas.draw()
if self.logfile:
o.write('%s %d %3.1f %3.1f %5.3f\n' % (datetime.datetime.now(), n, W, V, A))
n += self.interval
line = self.s.readline()
curses.nocbreak()
curses.echo()
curses.endwin()
try:
o.close()
except:
pass
if args.raw:
try:
r.close()
except:
pass
def main(args):
if not args.port:
system = uname()[0]
if system == 'Darwin': # Mac OS X
args.port = '/dev/tty.usbserial-A1000wT3'
elif system == 'Linux':
args.port = '/dev/ttyUSB0'
if not os.path.exists(args.port):
if not args.sim:
print ''
print 'Serial port %s does not exist.' % args.port
print 'Please make sure FDTI drivers are installed'
print ' (http://www.ftdichip.com/Drivers/VCP.htm)'
print 'Default ports are /dev/ttyUSB0 for Linux'
print ' and /dev/tty.usbserial-A1000wT3 for Mac OS X'
exit()
else:
print ''
print 'File %s does not exist.' % args.port
meter = WattsUp(args.port, args.interval)
if args.log:
meter.log(args.outfile)
if args.fetch:
print 'WARNING: Fetch mode not working!!!!'
meter.fetch()
if args.internal:
meter.mode(INTERNAL_MODE)
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Get data from Watts Up power meter.')
parser.add_argument('-v', '--verbose', dest='verbose', action='store_true', help='verbose')
parser.add_argument('-d', '--debug', dest='debug', action='store_true', help='debugging output')
parser.add_argument('-m', '--simulation-mode', dest='sim', action='store_true', help='simulate logging by reading serial data from disk with delay of sample interval between lines')
parser.add_argument('-i', '--internal-mode', dest='internal', action='store_true', help='Set meter to internal logging mode')
parser.add_argument('-f', '--fetch', dest='fetch', action='store_true', help='Fetch data stored on the meter (NOT YET WORKING!)')
parser.add_argument('-g', '--graphics-mode', dest='plot', action='store_true', help='Graphical output: plot the data in real time')
parser.add_argument('-l', '--log', dest='log', action='store_true', help='log data in real time')
parser.add_argument('-r', '--raw', dest='raw', action='store_true', help='output raw file')
parser.add_argument('-o', '--outfile', dest='outfile', default='log.out', help='Output file')
parser.add_argument('-s', '--sample-interval', dest='interval', default=1.0, type=float, help='Sample interval (default 1 s)')
parser.add_argument('-p', '--port', dest='port', default=None, help='USB serial port')
args = parser.parse_args()
main(args)
| gpl-3.0 |
vsoch/myconnectome | myconnectome/qa/qa_summary.py | 2 | 4284 | # -*- coding: utf-8 -*-
"""
assemble qa data for myconnectome data paper
"""
import os,glob
import pickle
import pandas as pd
import numpy
qadatadir='/Users/poldrack/Dropbox/data/selftracking/QA'
# process anat data
if 0:
anatdatafile=os.path.join(qadatadir,'anat_qa.pkl')
anatdata=pickle.load(open(anatdatafile,'rb'))
anatdata['anat_fwhm']
subcode=anatdata.pop('subcode')
s=pd.DataFrame(anatdata)
corr_anat=pd.read_csv('/Users/poldrack/code/myconnectome/quality_assurance/corr_anat.csv')
asdf
# process func data
funcdata={}
corr_func=pd.read_csv('/Users/poldrack/code/myconnectome/quality_assurance/corr_func.csv')
funcfiles=glob.glob(os.path.join(qadatadir,'rsfmri/*pkl'))
funcfiles.sort()
# from http://preprocessed-connectomes-project.github.io/quality-assessment-protocol/index.html
#Entopy Focus Criterion [func_efc]: SUses the Shannon entropy of voxel intensities as an indication of ghosting and blurring induced by head motion, lower is better 2. Uses mean functional.
#Foreground to Background Energy Ratio [func_fber]: Mean energy of image values (i.e., mean of squares) within the head relative to outside the head, higher values are better. Uses mean functional.
#Smoothness of Voxels [func_fwhm]: The full-width half maximum (FWHM) of the spatial distribution of the image intensity values in units of voxels, lower values are better. Uses mean functional.
#Ghost to Signal Ratio (GSR) [func_gsr]: A measure of the mean signal in the ‘ghost’ image (signal present outside the brain due to acquisition in the phase encoding direction) relative to mean signal within the brain, lower values are better. Uses mean functional.
#
#Standardized DVARS [func_dvars]: The spatial standard deviation of the temporal derivative of the data, normalized by the temporal standard deviation and temporal autocorrelation, lower values are better 56. Uses functional time-series.
#Outlier Detection [func_outlier]: The mean fraction of outliers found in each volume using 3dTout command in AFNI (http://afni.nimh.nih.gov/afni), lower values are better 7. Uses functional time-series.
#Median Distance Index [func_quality]: The mean distance (1 – spearman’s rho) between each time-point’s volume and the median volume using AFNI’s 3dTqual command (http://afni.nimh.nih.gov/afni), lower values are better 7. Uses functional time-series.
#Mean Fractional Displacement - Jenkinson [func_mean_fd]: A measure of subject head motion, which compares the motion between the current and previous volumes. This is calculated by summing the absolute value of displacement changes in the x, y and z directions and rotational changes about those three axes. The rotational changes are given distance values based on the changes across the surface of a 80mm radius sphere, lower values are better 89. Uses functional time-series.
#Number of volumes with FD greater than 0.2mm [func_num_fd]: Lower values are better Uses functional time-series.
#Percent of volumes with FD greater than 0.2mm [func_perc_fd]: Lower values are better Uses functional time-series.
#
gsr=[]
fber=[]
snr=[]
efc=[]
fwhm=[]
mean_fd=[]
pct_fd=[]
num_fd=[]
quality=[]
tsnr=[]
exclude_bad_subs=True
subcodes=[i.strip() for i in open('/Users/poldrack/code/myconnectome/rsfmri_analyses/subcodes.txt').readlines()]
for f in funcfiles:
subcode=f.split('/')[-1].split('_')[0]
if exclude_bad_subs and not subcode in subcodes:
continue
funcdata[subcode]=pickle.load(open(f,'rb'))
gsr.append(funcdata[subcode]['gsr'])
fber.append(funcdata[subcode]['fber'])
snr.append(funcdata[subcode]['snr'])
tsnr.append(funcdata[subcode]['tsnr'])
fwhm.append(funcdata[subcode]['fwhm'][3])
efc.append(funcdata[subcode]['efc'])
quality.append(funcdata[subcode]['func_quality'])
mean_fd.append(funcdata[subcode]['fd'])
pct_fd.append(100.0*(funcdata[subcode]['num_fd']/518.0))
funcvars={'func_gsr':gsr,'func_fber':fber,'func_snr':snr,'func_tsnr':tsnr,'func_fwhm':fwhm,'func_efc':efc,'func_mean_fd':mean_fd,'func_perc_fd':pct_fd,'func_quality':quality}
myc=pd.DataFrame(funcvars)
if exclude_bad_subs:
myc.to_csv('myconnectome_func_qa_goodsubs.csv')
else:
myc.to_csv('myconnectome_func_qa_allsubs.csv')
| mit |
asymmetry/beampackage | beampackage/bpmcalib.py | 2 | 17801 | #!/usr/bin/env python
import os,re,numpy
from harppos import *
from bpmfit import *
from signalfilter import decode,getrealpos
from runinfo import *
#class to calibrate bpm
class bpmcalib:
def __init__(self,keywords=False,treename="T",rootpath=os.getenv("REPLAY_OUT_PATH"), onlyb=False,forcefastbus=False,forceredecode=False,ab=False):
self.keywords=keywords
self.treename=treename
self.rootpath=rootpath
self.pklpath=getpklpath(rootpath)
self.onlyb=onlyb
self.forcefastbus=forcefastbus
self.forceredecode=forceredecode
self.period=runinfo()
if not ab:
self.ab=[1] if self.onlyb else [0,1]
else:self.ab=ab
self.chan=[2,3,0,1]
self.bpmraw,self.constfile={},{}
if self.keywords:
self.filtertype="s"
self.getharpinfo()
self.getsurvey()
self.getcalibconf()
self.gethardpos()
#get harp information
def getharpinfo(self):
harpdata=harpinfo()
tmp=harpdata.getdata(self.keywords)
if not tmp:raise Exception("no harp data found")
if not self.onlyb:self.peak_04=[tmp[i]["harp04"] for i in range(tmp["ndata"])]
self.peak_05=[tmp[i]["harp05"] for i in range(tmp["ndata"])]
self.run=[tmp[i]["run"][0] for i in range(tmp["ndata"])]
self.pedrun=tmp["pedrun"][0]
try:self.availruns={"a":tmp["availa"],"b":tmp["availb"]}
except:self.availruns=False
print "calibrate bpm with run",self.run,"and pedestal run %i,"%self.pedrun,"keywords: ",self.keywords
#get survey information
def getsurvey(self,run=False):
if not run:run=self.run[0]
#get position in bpm for harp data
self.harp=harppos(run)
self.bpm={"a":bpmpos(run,"a"),"b":bpmpos(run,"b")}
self.currepics=self.period.current(run)
#get calibration configure
def getcalibconf(self):
self.calibconf=calibsetting(self.keywords)
self.datanum=self.calibconf.datanum
#get position at bpm from harp data
def gethardpos(self):
pos1,pos2,posbpma,posbpmb=[],[],[],[]
for i in range(len(self.peak_05)):
if self.onlyb:
bpmaposhall=self.bpm["a"].posbpm2hall(self.calposfromraw(self.run[i],0))
pos1.append([numpy.mean(x) for x in bpmaposhall])
else:pos1.append(self.harp.getpos_04(self.peak_04[i]))
pos2.append(self.harp.getpos_05(self.peak_05[i]))
if not self.onlyb:
posbpma.append(self.bpm["a"].getpos_bpm(pos1[i],pos2[i]))
posbpmb.append(self.bpm["b"].getpos_bpm(pos1[i],pos2[i]))
else:
#print self.bpm["b"].
posbpmb.append(self.harp.getpos_05_local(self.peak_05[i]))
hardpos=[posbpma,posbpmb]
r=map(lambda p:p[0]**2+p[1]**2,posbpmb)
self.centerid=r.index(min(r))
self.hardpos=[]
for i in self.ab:
self.hardpos.append(hardpos[i])
#print out
print "hard position is:"
for i in range(len(self.hardpos[0])):
print self.run[i],
for j in range(len(self.hardpos)):
for p in self.hardpos[j][i]:
print "%1.2f,"%p,
print "\t",
print
#get bpm calibration constant, used for calibrating bpm B only with A and harp info
def getcalibconst(self,ab,run=False):
if not run:run=self.run[0]
tmp=self.period.bpmconstread(run,self.forcefastbus)[ab]
if not tmp:
print "can not find const for run %i"%run
return False
#pedestal for run, read from pedestal.pkl from database
pedtmp=False
if not self.period.ifautogain(run):
pedtmp=self.period.pedestal(run,self.forcefastbus)[ab]
if not pedtmp:pedtmp=tmp["ped"]
pedestal=map(lambda a,b:a+b,pedtmp,tmp["offset"])
calconst=tmp["const"]
fitorder=tmp["fitorder"]
self.constfile[ab]=tmp["constfile"]
return pedestal,calconst,fitorder
#calculate position from raw data,ab=0 for bpm a ,1 for b
def calposfromraw(self,run,ab,rotate=False):
self.getrawdata(run)
ab="a" if ab==0 else "b"
tmp=self.getcalibconst(ab)
if not tmp:return False,False
ped,const,fitorder=tmp
raw=self.bpmraw[run][:4] if ab=="a" else self.bpmraw[run][4:]
raw=[raw[c]-ped[c] for c in range(4)]
x,y=getrealpos(raw,const,fitorder)
x,y=x[x>-100],y[y>-100]
x,y=x[x<100],y[y<100]
if rotate:x,y,z=self.bpm[ab].posbpmrotate([x,y])
return x,y
#get raw data
def getrawdata(self,run,ped=False,eventcut=False):
if run in self.bpmraw.keys():return
bpmrawpkl=self.pklpath.getpath("raw","%sbpm"%self.filtertype,run)
currpkl=self.pklpath.getpath("raw","curr",run)
availpkl=self.pklpath.getpath("raw","bpmavail",run)
if not os.path.exists(bpmrawpkl):
runpath=checkrunavail(self.rootpath,run)
if not runpath:raise Exception("no file found for run %i"%run)
d=decode(runpath,self.treename,forcefastbus=self.forcefastbus,forceredecode=self.forceredecode)
d.autodecode()
raw=zload(bpmrawpkl)
#ped or signal cut
if ped:
curr=zload(currpkl)
#get average curr
nocurr=0.01 #below this current will deal as no signal
currshift=500
curr=curr<nocurr
curr1=numpy.concatenate((numpy.zeros(currshift),curr[:-currshift]))
bpmavail=curr*curr1
else:
bpmavail=zload(availpkl)
#event cut
if not eventcut:
ecut=getbpmeventcut()
eventcut=ecut.getcut(run,self.forcefastbus)
#filter the unwanted event
if eventcut:
if (len(bpmavail)-eventcut[1])>=0:
cut=numpy.asarray([0]*eventcut[0]+[1]*(eventcut[1]-eventcut[0])\
+[0]*(len(bpmavail)-eventcut[1]),dtype=numpy.int32)
else:
cut=numpy.asarray([0]*eventcut[0]+[1]*(len(bpmavail)-eventcut[0])\
,dtype=numpy.int32)
raw=[x+bpmavail*1e6+cut*1e6-2e6 for x in raw]
else:raw=[x+bpmavail*1e6-1e6 for x in raw]
raw=[x[x>-1e4] for x in raw]
self.bpmraw[run]=raw
#get center point
def getcenterpoint(self,pos):
r=map(lambda p:p[0]**2+p[1]**2,pos)
return r.index(min(r))
#get bpm raw beak and calibration configure
def bpmpeak(self):
for r in self.run:self.getrawdata(r)
#ped peaks and offset
if self.calibconf.pedpeaks:
self.pedpeaks=self.calibconf.pedpeaks
else:
pedtmp=self.period.pedestal(self.run[0],self.forcefastbus)
if pedtmp["a"] and pedtmp["b"]:
self.pedpeaks=pedtmp["a"]+pedtmp["b"]
else:
self.getrawdata(self.pedrun,True)
self.pedpeaks=[numpy.mean(r) for r in self.bpmraw[self.pedrun]]
if self.calibconf.offset:self.offset=self.calibconf.offset
else:self.offset=[0]*8
self.peaks=map(lambda r:[numpy.asarray([numpy.mean(x)]*self.datanum,dtype=numpy.float32) for x in self.bpmraw[r]],self.run)
if self.calibconf.gxy:self.gxy=self.calibconf.gxy
else:self.gxy=[False,False,False,False]
#calibrate gx and gy
def calibrategxgy(self,pos,peak,ped,offset):
index=self.getcenterpoint(pos)
ar=self.ar
purepeak=map(lambda p1,p2,p0:map(lambda p3:map(lambda p4,p5,p6:p4-p5-p6,p3,p2,p0),p1),peak,ped,offset)
gx=map(lambda p1,p2:p1[0]*(1-2/ar*p2[0])/(p1[1]*(1+2/ar*p2[0])),purepeak[0],pos)
gy=map(lambda p1,p2:p1[0]*(1-2/ar*p2[1])/(p1[1]*(1+2/ar*p2[1])),purepeak[1],pos)
return gx[index],gy[index]
#calibrate one bpm
def calibrateone(self,gxy,pos,peak,ped,offset):
#purepeak:1st level:x,y;2nd level:n pos;3rd level:x+,x-
#pos:1st level:n pos;2nd level:x,y
purepeak=map(lambda p1,p2,p0:map(lambda p3:map(lambda p4,p5,p6:p4-p5-p6,p3,p2,p0),p1),peak,ped,offset)
xdiff_sum=map(lambda p:(p[0]-gxy[0]*p[1])/(p[0]+gxy[0]*p[1]),purepeak[0])
ydiff_sum=map(lambda p:(p[0]-gxy[1]*p[1])/(p[0]+gxy[1]*p[1]),purepeak[1])
xbyb2=map(lambda p1,p2:p1**2+p2**2,xdiff_sum,ydiff_sum)
xb2x=map(lambda p:1/p-1/numpy.sqrt(p)*numpy.sqrt(1/p-1),xbyb2)
xdata=map(lambda p1,p2:self.ar*p1*p2,xdiff_sum,xb2x)
ydata=map(lambda p1,p2:self.ar*p1*p2,ydiff_sum,xb2x)
xharp=map(lambda p:p[0],pos)
yharp=map(lambda p:p[1],pos)
#filternan
nanxdata=[all(x) for x in numpy.isnan(xdata)]
nanydata=[all(x) for x in numpy.isnan(ydata)]
nanxy=nanxdata or nanydata
for i in range(len(nanxy)-1,-1,-1):
if nanxy[i]:
del xdata[i],ydata[i],xharp[i],yharp[i]
#fit
centerid=self.getcenterpoint(pos)
xfit=bpmfit(self.keywords,0,xharp,(xdata,ydata),centerid)
px,pxerr,pxval=xfit.fit()
yfit=bpmfit(self.keywords,1,yharp,(ydata,xdata),centerid)
py,pyerr,pyval=yfit.fit()
return px,py,pxerr,pyerr,pxval,pyval
#calibrate
def calibrate(self,rawconst=False):
self.ar=34.925
self.cx,self.cy,self.ex,self.ey,self.px,self.py=[0,0],[0,0],[0,0],[0,0],[0,0],[0,0]
self.bpmpeak()
#read const input
if rawconst:
if "offset" in rawconst.keys():
for i in range(len(self.ab)):
for j in range(4):
self.pedpeaks[self.ab[i]*4+j]=rawconst["ped"][i*4+j]
self.offset[self.ab[i]*4+j]=rawconst["offset"][i*4+j]
if "gxgy" in rawconst.keys():
for i in range(len(self.ab)):
for j in range(2):
self.gxy[self.ab[i]*2+j]=rawconst["gxgy"][i*2+j]
#calibrate
for i in range(len(self.ab)):
peak=([],[])
xchan=self.chan[0]+4*self.ab[i]
ychan=self.chan[2]+4*self.ab[i]
for j in range(len(self.hardpos[i])):
peak[0].append(self.peaks[j][xchan:xchan+2])
peak[1].append(self.peaks[j][ychan:ychan+2])
ped=(self.pedpeaks[xchan:xchan+2],self.pedpeaks[ychan:ychan+2])
print "-----------------------------------------------------------------------------------------"
offset=(self.offset[xchan:xchan+2],self.offset[ychan:ychan+2])
#get gxy
gxychan=self.ab[i]*2
if not self.gxy[gxychan]:
self.gxy[gxychan],self.gxy[gxychan+1]=\
self.calibrategxgy(self.hardpos[i],peak,ped,offset)
#calibrate a,b,c
self.cx[self.ab[i]],self.cy[self.ab[i]],self.ex[self.ab[i]],self.ey[self.ab[i]],self.px[self.ab[i]],self.py[self.ab[i]]=self.calibrateone(self.gxy[gxychan:gxychan+2],self.hardpos[i],peak,ped,offset)
#save const a or b,used for constsave function
def __constsaveone(self,ab):
dbdir=os.getenv("BEAMDBPATH")
if dbdir==None:
print "please define BEAMDBPATH in your env"
return False
pydb=os.path.join(dbdir,"pyDB")
if not os.path.exists(pydb):os.makedirs(pydb)
run=sorted(self.run)
if not self.period.ifhapavail(run[0]) or self.forcefastbus:fastbus=True
else:fastbus=False
#save const
if fastbus:
filename=os.path.join(pydb,"bpm%sfb_%i.dat"%(ab,run[0]))
else:
filename=os.path.join(pydb,"bpm%s_%i.dat"%(ab,run[0]))
if self.availruns:runperiod=self.availruns[ab]
else:
runperiod=""
for r in run:runperiod+="%i,"%r
runperiod=runperiod[:-1]
fitorder=self.calibconf.fitorder
datfile=open(filename,"w")
datfile.write("All of the survey info directly come from survey data,please read survey report to get the detail info about the coordinate\n")
datfile.write("Please contact pengjia immediately if you have any questions(email,gtalk,phone...)\n")
datfile.write("keywords: ")
for keyword in self.keywords:
datfile.write("%s "%keyword)
if "nA" in keyword:curravail=keyword[:-2]
datfile.write("\n\n")
datfile.write("------------------------------------------------\n\n")
datfile.write("avail run period:%s\n"%runperiod)
try:datfile.write("avail curr(nA):%i\n"%(int(self.currepics)))
except:datfile.write("avail curr(nA):%s\n"%(curravail))
datfile.write("target z position(mm,support multi):-14.135 0 14.135 -10.81 -13.6271\n")
ped=self.pedpeaks[:4] if ab=="a" else self.pedpeaks[4:]
offset=self.offset[:4] if ab=="a" else self.offset[4:]
datfile.write("pedestal peak:%f %f %f %f\n"%tuple(ped))
datfile.write("offset:%f %f %f %f\n"%tuple(offset))
abnum=0 if ab=="a" else 1
datfile.write("bpm%s ar,gx,gy:%.15f %.15f %.15f\n"%(ab,self.ar,self.gxy[abnum*2],self.gxy[abnum*2+1]))
datfile.write("fitorder:%i %i\n"%(fitorder[0],fitorder[1]))
cxy=[self.cx[abnum],self.cy[abnum]]
exy=[self.ex[abnum],self.ey[abnum]]
for i in range(2):
xy="x" if i==0 else "y"
datfile.write("bpm%s %s a,b,c:"%(ab,xy))
for j in range(len(cxy[i])):
datfile.write("%.15f "%cxy[i][j])
datfile.write("\n")
#for i in range(2):
#xy="x" if i==0 else "y"
#datfile.write("bpm%s %s para error:"%(ab,xy))
#for j in range(len(exy[i])):
#datfile.write("%.15f "%exy[i][j])
#datfile.write("\n")
datfile.write("fval:%.7f %.7f"%(self.px[abnum],self.py[abnum]))
datfile.write("\n")
datfile.close()
#print constant
print "\n\n-----------------------------------------"
for line in open(filename,"r"):print line.strip()
print "-----------------------------------------\n\n"
#save constant
def constsave(self):
dbdir=os.getenv("BEAMDBPATH")
if not self.onlyb:self.__constsaveone("a")
self.__constsaveone("b")
#check calibration constant
def calibcheck(self):
try:
from pylab import savefig,figure
from matplotlib.colors import LogNorm
from matplotlib import pyplot
from matplotlib.ticker import MultipleLocator, FormatStrFormatter
except:
print "sorry the matplotlib package is needed for plotting!"
return
fig =figure(figsize=(5.0*len(self.ab), 5.0), dpi=100)
axes=[]
majorLocator= MultipleLocator(1)
minorLocator= MultipleLocator(0.2)
for i in range(len(self.ab)):
axes.append(fig.add_subplot(1,len(self.ab),i+1))
axes[i].clear()
xall,yall=numpy.zeros(0,dtype=numpy.float32),numpy.zeros(0,dtype=numpy.float32)
for r in self.run:
x,y=self.calposfromraw(r,self.ab[i])
xall=numpy.concatenate((xall,x))
yall=numpy.concatenate((yall,y))
xymax=max([abs(min([xall.min(),yall.min()])),abs(max([xall.max(),yall.max()]))])*1.2
histrange=[[-xymax,xymax],[-xymax,xymax]]
axes[i].hist2d(xall,yall,bins=300,range=histrange,norm=LogNorm())
#harp pos
hardpos=[[x[0] for x in self.hardpos[i]],[x[1] for x in self.hardpos[i]]]
axes[i].plot(hardpos[0],hardpos[1],"+",markersize=50.,fillstyle="none")
axes[i].xaxis.set_major_locator(majorLocator)
axes[i].yaxis.set_major_locator(majorLocator)
axes[i].xaxis.set_minor_locator(minorLocator)
axes[i].yaxis.set_minor_locator(minorLocator)
axes[i].set_xlabel("u(mm)")
axes[i].set_ylabel("v(mm)")
try:
fig.suptitle("%inA,using %s"%(self.curr,self.constfile["b"]))
construn=re.split("[_.]",self.constfile["b"])[1]
savefig("pic/points%i_%inA_%s.eps"%(sorted(self.run)[0],self.curr,construn))
except:
savefig("pic/points%i.eps"%(sorted(self.run)[0]))
def ovalfun(self,x,a,b,c):
#par:a center,b radius,c entries radius
return c*numpy.sqrt(1-(x-a)*(x-a)/(b*b))
#check calibration constant by using slow raster
def calibcheck_raster(self,run):
try:
from pylab import savefig,figure
from matplotlib.colors import LogNorm
from matplotlib import pyplot,mlab
from matplotlib.ticker import MultipleLocator, FormatStrFormatter
from matplotlib.patches import Ellipse
from scipy.optimize import curve_fit
except:
print "sorry the matplotlib package is needed for plotting!"
return
#if not self.keywords:
self.run=[run]
self.filtertype="f"
self.getsurvey(run)
fig =figure(figsize=(5.0*len(self.ab), 5.0), dpi=100)
axes=[]
for i in range(2):
tmp=self.calposfromraw(run,i,rotate=True)
if not tmp:continue
x,y=tmp
axes.append(fig.add_subplot(121+i))
axes[i].clear()
center=[numpy.mean(x),numpy.mean(y)]
xyrange=[x.max()-x.min(),y.max()-y.min()]
xymin=min([x.min(),y.min()])
xymax=max([x.max(),y.max()])
histrange=[[xymin,xymax],[xymin,xymax]]
axislable=int(xymax/10.)*2
if axislable<1:axislable=1
majorLocator= MultipleLocator(axislable)
minorLocator= MultipleLocator(axislable/5.)
axes[i].hist2d(x,y,bins=300,range=histrange,norm=LogNorm())
axes[i].xaxis.set_major_locator(majorLocator)
axes[i].yaxis.set_major_locator(majorLocator)
axes[i].xaxis.set_minor_locator(minorLocator)
axes[i].yaxis.set_minor_locator(minorLocator)
try:
fig.suptitle("%inA,using %s"%(self.curr,self.constfile["b"]))
construn=re.split("[_.]",self.constfile["b"])[1]
savefig("pic/calibcheck%i_%inA_%s.png"%(run,self.curr,construn))
except:
savefig("pic/calibcheck%i.png"%(run))
| gpl-3.0 |
vrkrishn/FBHacks | src/Common/get_videos.py | 1 | 2664 | import os
import sys
import requests
import urllib2
import json
import datetime
from dateutil import parser
from collections import Counter
import matplotlib.pyplot as plt
def request_video(videoid, access_token):
endpoint = 'https://graph.facebook.com/v2.8'
#access_token='EAACEdEose0cBAHFPMoobVMkxxWagM9i1SrJY1JKEdLd0xOaT5GqCOLnrFcT4ZA0v0Lr9wxrJKwRCHZBiZApuE7pVjpbIQR6lFJkJCVJ8WdMkfdmr1kPbKgf1GnT0oOvAgMbjP5DGy3Q06W8vSOt87CC8gZCgOLGiqZCoZApxkitQZDZD'
fb_graph_url = endpoint+ '/' + videoid + '?fields=comments,length,scheduled_publish_time,created_time,backdated_time&limit=1000&access_token='+access_token
resp = requests.get(fb_graph_url)
data = json.dumps(resp.json(), sort_keys=True, indent=4, separators=(',',': '))
print data
d = json.loads(data)
print d.keys()
print d['created_time']
duration = d['length']
resp=requests.get(d['comments']['paging']['next'])
print resp.json()
data = json.dumps(resp.json(), sort_keys=True, indent=4, separators=(',',': '))
d = json.loads(data)
comment_data = []
while('next' in d['paging']):
resp=requests.get(d['paging']['next'].replace('&limit=25','&limit=500'))
data = json.dumps(resp.json(), sort_keys=True, indent=4, separators=(',',': '))
for comment in resp.json()['data']:
# print comment
comment_data.append((parser.parse(comment['created_time'], ignoretz=True), comment['message']))
d = json.loads(data)
# with open("result.json", 'w') as f:
# f.write(comment_data)
comment_data = sorted(comment_data, key=lambda comment: comment[0])
print comment_data
initial_time = comment_data[0][0]
end_time = comment_data[-1][0]
counts = []
features = {}
features['comments'] = {}
features['intervalSize'] = 30
countZeros = 0
maxC = 0
print int(duration)
print initial_time
final_time = initial_time + datetime.timedelta(0,int(duration))
seconds = 0
while (initial_time < final_time):
next_time = initial_time + datetime.timedelta(0,30)
print next_time
print initial_time + datetime.timedelta(0,int(duration))
count = [x[1] for x in comment_data if (x[0] > initial_time and x[0] < next_time)]
if not count:
counts.append(count)
countZeros = 0
else:
countZeros += 1
if count > maxC:
maxC = count
features['comments'][str(seconds * 30)] = { 'count': len(count), 'comment_data': count }
seconds += 1
#print next_time
initial_time = next_time
with open('data.json', 'w') as outfile:
json.dump(features, outfile)
k = 10
#print json.dumps(resp.json(), sort_keys=True, indent=4, separators=(',',': '))
def main(args):
request_video(args[1], args[2])
return
if __name__=='__main__':
sys.exit(main(sys.argv)) | mit |
meren/anvio | anvio/learning.py | 2 | 4623 | # -*- coding: utf-8
# pylint: disable=line-too-long
"""A simple module with classes for learning operations"""
import pickle
import numpy as np
import anvio
import anvio.utils as utils
import anvio.terminal as terminal
import anvio.filesnpaths as filesnpaths
from anvio.errors import ConfigError
with terminal.SuppressAllOutput():
import sklearn.ensemble
run = terminal.Run()
progress = terminal.Progress()
pp = terminal.pretty_print
__author__ = "Developers of anvi'o (see AUTHORS.txt)"
__copyright__ = "Copyleft 2015-2018, the Meren Lab (http://merenlab.org/)"
__credits__ = []
__license__ = "GPL 3.0"
__version__ = anvio.__version__
__maintainer__ = "A. Murat Eren"
__email__ = "[email protected]"
class RF:
def __init__(self, classifier_object_path="rf.classifier", r=run, p=progress):
self.run = r
self.progress = p
self.classifier_object_path = classifier_object_path
self.classifier_initialized = False
self.classifier = None
self.features = None
self.classes = None
def train(self, features, data, labels, n_trees=64):
self.run.info('RF Train', "%d observations with %d features grouped into %d classes." % (len(data), len(features), len(set(labels))))
filesnpaths.is_output_file_writable(self.classifier_object_path)
self.progress.new('Training')
self.progress.update('...')
rf = sklearn.ensemble.RandomForestClassifier(n_estimators=n_trees)
rf.fit(np.array(data), labels)
self.progress.end()
pickle.dump({'features': features, 'classes': rf.classes_, 'classifier': rf}, open(self.classifier_object_path, 'wb'))
self.run.info('Classifier output', self.classifier_object_path)
def predict_from_TAB_delimited_file(self, file_path):
cols = utils.get_columns_of_TAB_delim_file(file_path)
return self.predict(utils.get_TAB_delimited_file_as_dictionary(file_path, column_mapping=[str] + [float] * len(cols)))
def predict(self, data_dict):
if not self.classifier_initialized:
self.initialize_classifier()
samples = list(data_dict.keys())
self.run.info('Num samples to classify', "%d." % (len(samples)))
data = []
for sample in samples:
datum = []
for feature in self.features:
if feature not in data_dict[sample]:
raise ConfigError("RF prediction run into an issue. All features described in the classifier should be present "
"for all observations in the data. However, that is not the case. For instance, feature "
"'%s' is in the classifier, but the entry '%s' in the input data does not have an observation "
"for it :/ Not good." % (feature, sample))
datum.append(data_dict[sample][feature])
data.append(datum)
predictions = self.classifier.predict_proba(data)
predictions_dict = {}
for i in range(0, len(samples)):
sample = samples[i]
predictions_dict[sample] = {}
for j in range(0, len(self.classes)):
_class = self.classes[j]
predictions_dict[sample][_class] = predictions[i][j]
return predictions_dict
def initialize_classifier(self):
filesnpaths.is_file_exists(self.classifier_object_path)
try:
if anvio.DEBUG:
classifier_obj = pickle.load(open(self.classifier_object_path, 'rb'))
else:
with terminal.SuppressAllOutput():
classifier_obj = pickle.load(open(self.classifier_object_path, 'rb'))
except UnicodeDecodeError:
raise ConfigError("Your classifier object is broken. Probably because you generated is using a different verison "
"of anvi'o. Please create a new one from scratch, and you will probably be golden.")
try:
self.features = classifier_obj['features']
self.classes = classifier_obj['classes']
self.classifier = classifier_obj['classifier']
except:
raise ConfigError("RF class does not like the classifier object it was sent for processing :/ Are you sure you "
"generated it the way you were supposed to?")
self.classifier_initialized = True
self.run.info('Random Forest Classifier', "Initialized with %d features grouped into %d classes." % (len(self.features), len(self.classes)))
| gpl-3.0 |
AdaptivePELE/AdaptivePELE | AdaptivePELE/analysis/writeNetworkFiles.py | 1 | 3771 | import os
import sys
import argparse
from AdaptivePELE.utilities import utilities
import matplotlib.pyplot as plt
try:
# This might fail for older versions of matplotlib (e.g in life cluster)
plt.style.use("ggplot")
except:
pass
def parseArguments():
desc = "Write the information related to the conformation network to file\n"
parser = argparse.ArgumentParser(description=desc)
parser.add_argument("clusteringObject", type=str, help="Path to the clustering object")
parser.add_argument("suffix", type=str, help="Suffix to append to file names")
parser.add_argument("metricCol", type=int, help="Column of the metric of interest")
parser.add_argument("-o", type=str, default=None, help="Output path where to write the files")
parser.add_argument("-c", "--cond", type=str, default="min", help="Condition on the metric optimality, options are max or min")
parser.add_argument("-b", "--bindEn", type=int, default=None, help="Column of the binding energy in the report file")
args = parser.parse_args()
return args.clusteringObject, args.suffix, args.metricCol, args.o, args.cond, args.bindEn
if __name__ == "__main__":
clusteringObject, suffix, metricCol, outputPath, metricOptimization, bindingEnergy = parseArguments()
if outputPath is not None:
outputPath = os.path.join(outputPath, "")
if not os.path.exists(outputPath):
os.makedirs(outputPath)
else:
outputPath = ""
sys.stderr.write("Reading clustering object...\n")
cl = utilities.readClusteringObject(clusteringObject)
if cl.conformationNetwork is None:
sys.exit("Clustering object loaded has no conformation network!!")
conf = cl.conformationNetwork
optimalCluster = cl.getOptimalMetric(metricCol, simulationType=metricOptimization)
pathway = conf.createPathwayToCluster(optimalCluster)
if not os.path.exists(outputPath+"conformationNetwork%s.edgelist" % suffix):
sys.stderr.write("Writing conformation network...\n")
conf.writeConformationNetwork(outputPath+"conformationNetwork%s.edgelist" % suffix)
if not os.path.exists(outputPath+"FDT%s.edgelist" % suffix):
sys.stderr.write("Writing FDT...\n")
conf.writeFDT(outputPath+"FDT%s.edgelist" % suffix)
if not os.path.exists(outputPath+"pathwayFDT%s.pdb" % suffix):
sys.stderr.write("Writing pathway to optimal cluster...\n")
# cl.writePathwayOptimalCluster(outputPath+"pathwayFDT%s.pdb" % suffix)
cl.writePathwayTrajectory(pathway, outputPath+"pathwayFDT%s.pdb" % suffix)
if not os.path.exists(outputPath+"nodesPopulation%s.txt" % suffix):
sys.stderr.write("Writing nodes population...\n")
cl.writeConformationNodePopulation(outputPath+"nodesPopulation%s.txt" % suffix)
if not os.path.exists(outputPath+"nodesMetric%s.txt" % suffix):
sys.stderr.write("Writing nodes metrics...\n")
cl.writeClusterMetric(outputPath+"nodesMetric%s.txt" % suffix, metricCol)
if bindingEnergy is not None:
plt.figure()
plt.plot(pathway, [cl.clusters.clusters[i].getMetricFromColumn(bindingEnergy) for i in pathway])
plt.xlabel("Cluster number")
plt.ylabel("Binding energy(kcal/mol)")
plt.savefig(outputPath+"bindingEnergy_%s.png" % suffix)
plt.figure()
plt.plot(pathway, [cl.clusters.clusters[i].contacts for i in pathway])
plt.xlabel("Cluster number")
plt.ylabel("Contacts ratio")
plt.savefig(outputPath+"contacts_%s.png" % suffix)
plt.figure()
plt.plot(pathway, [cl.clusters.clusters[i].getMetricFromColumn(3) for i in pathway])
plt.xlabel("Cluster number")
plt.ylabel("Energy(kcal/mol)")
plt.savefig(outputPath+"totalEnergy_%s.png" % suffix)
plt.show()
| mit |
changkun/AugmentedTouch | src/clf/main.py | 2 | 4251 | import dataLoader as dl
import numpy as np
from sklearn import svm
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
from sklearn import decomposition
from sklearn.lda import LDA
def pca(data, target):
centers = [[1, 1], [-1, -1], [1, -1]]
X = data
yy=[]
for i, l in enumerate(target):
if l == 'left':
yy.append(1);
else:
yy.append(0);
y = np.array(yy)
fig = plt.figure(1, figsize=(4, 3))
plt.clf()
ax = Axes3D(fig, rect=[0, 0, .95, 1], elev=48, azim=134)
plt.cla()
pca = decomposition.PCA(n_components=3)
pca.fit(X)
X = pca.transform(X)
for name, label in [('Setosa', 0), ('Versicolour', 1), ('Virginica', 2)]:
ax.text3D(X[y == label, 0].mean(),
X[y == label, 1].mean() + 1.5,
X[y == label, 2].mean(), name,
horizontalalignment='center',
bbox=dict(alpha=.5, edgecolor='w', facecolor='w'))
# Reorder the labels to have colors matching the cluster results
y = np.choose(y, [1, 2, 0]).astype(np.float)
ax.scatter(X[:, 0], X[:, 1], X[:, 2], c=y, cmap=plt.cm.spectral)
x_surf = [X[:, 0].min(), X[:, 0].max(),
X[:, 0].min(), X[:, 0].max()]
y_surf = [X[:, 0].max(), X[:, 0].max(),
X[:, 0].min(), X[:, 0].min()]
x_surf = np.array(x_surf)
y_surf = np.array(y_surf)
v0 = pca.transform(pca.components_[0])
v0 /= v0[-1]
v1 = pca.transform(pca.components_[1])
v1 /= v1[-1]
ax.w_xaxis.set_ticklabels([])
ax.w_yaxis.set_ticklabels([])
ax.w_zaxis.set_ticklabels([])
plt.show()
#data = dl.nor(dl.loadData('datafew/'))
#
#test1 = dl.nor(dl.loadData('datalarge/training/'))
#
#test2 = dl.nor(dl.loadData('datalarge/test/'))
#
#
#dl.trainingSVC(test2, data, 0.00001, 2)
#dl.plot2DataSet(test2, test1, 1)
def acc_image(training_data, tarining_label, test_data, test_label):
n_train = training_data.shape[0] # samples for training
n_test = test_data.shape[0] # samples for testing
n_averages = 50 # how often to repeat classification
n_features_max = 5 # maximum number of features
step = 1 # step size for the calculation
acc_clf1, acc_clf2 = [], []
n_features_range = range(1, n_features_max + 1, step)
for n_features in n_features_range:
score_clf1, score_clf2 = 0, 0
for _ in range(n_averages):
X, y = training_data[:,0:n_features], tarining_label
clf1 = LDA(solver='lsqr', shrinkage='auto').fit(X, y)
clf2 = LDA(solver='lsqr', shrinkage=None).fit(X, y)
X, y = test_data[:,0:n_features], test_label
score_clf1 += clf1.score(X, y)
score_clf2 += clf2.score(X, y)
acc_clf1.append(score_clf1 / n_averages)
acc_clf2.append(score_clf2 / n_averages)
features_samples_ratio = np.array(n_features_range) / n_train
plt.plot(features_samples_ratio, acc_clf1, linewidth=2,
label="LDA with shrinkage", color='r')
plt.plot(features_samples_ratio, acc_clf2, linewidth=2,
label="LDA", color='g')
plt.xlabel('n_features / n_samples')
plt.ylabel('Classification accuracy')
plt.legend(loc=1, prop={'size': 12})
plt.suptitle('LDA vs. shrinkage LDA (1 discriminative feature)')
plt.show()
path0 = 'datafew/'
path1 = 'datalarge/training/'
path2 = 'datalarge/test/'
data, label = dl.loadDataStr(path2)
yy=[]
for i, l in enumerate(label):
if l == 'left':
yy.append(1);
else:
yy.append(0);
y = np.array(yy)
test_data, test_label = dl.loadDataStr(path2)
dimension = 3
clf = svm.SVC(kernel='linear').fit(data[:,0:dimension], label)
print clf
print clf.support_vectors_.shape
print clf.n_support_
error_count = 0.0
result = clf.predict(test_data[:,0:dimension])
for i, l in enumerate(result):
#print l, label[i]
if l != test_label[i]:
error_count+=1
print 'error_count: ' + str(error_count)
print 'total_count: ' + str(result.shape[0])
#print result
#print test_label
print 'error_rate: ' + str(error_count/result.shape[0])
#pca(data[:,0:3], label)
#acc_image(data, label, test_data, test_label)
| gpl-2.0 |
JsNoNo/scikit-learn | examples/cluster/plot_feature_agglomeration_vs_univariate_selection.py | 218 | 3893 | """
==============================================
Feature agglomeration vs. univariate selection
==============================================
This example compares 2 dimensionality reduction strategies:
- univariate feature selection with Anova
- feature agglomeration with Ward hierarchical clustering
Both methods are compared in a regression problem using
a BayesianRidge as supervised estimator.
"""
# Author: Alexandre Gramfort <[email protected]>
# License: BSD 3 clause
print(__doc__)
import shutil
import tempfile
import numpy as np
import matplotlib.pyplot as plt
from scipy import linalg, ndimage
from sklearn.feature_extraction.image import grid_to_graph
from sklearn import feature_selection
from sklearn.cluster import FeatureAgglomeration
from sklearn.linear_model import BayesianRidge
from sklearn.pipeline import Pipeline
from sklearn.grid_search import GridSearchCV
from sklearn.externals.joblib import Memory
from sklearn.cross_validation import KFold
###############################################################################
# Generate data
n_samples = 200
size = 40 # image size
roi_size = 15
snr = 5.
np.random.seed(0)
mask = np.ones([size, size], dtype=np.bool)
coef = np.zeros((size, size))
coef[0:roi_size, 0:roi_size] = -1.
coef[-roi_size:, -roi_size:] = 1.
X = np.random.randn(n_samples, size ** 2)
for x in X: # smooth data
x[:] = ndimage.gaussian_filter(x.reshape(size, size), sigma=1.0).ravel()
X -= X.mean(axis=0)
X /= X.std(axis=0)
y = np.dot(X, coef.ravel())
noise = np.random.randn(y.shape[0])
noise_coef = (linalg.norm(y, 2) / np.exp(snr / 20.)) / linalg.norm(noise, 2)
y += noise_coef * noise # add noise
###############################################################################
# Compute the coefs of a Bayesian Ridge with GridSearch
cv = KFold(len(y), 2) # cross-validation generator for model selection
ridge = BayesianRidge()
cachedir = tempfile.mkdtemp()
mem = Memory(cachedir=cachedir, verbose=1)
# Ward agglomeration followed by BayesianRidge
connectivity = grid_to_graph(n_x=size, n_y=size)
ward = FeatureAgglomeration(n_clusters=10, connectivity=connectivity,
memory=mem)
clf = Pipeline([('ward', ward), ('ridge', ridge)])
# Select the optimal number of parcels with grid search
clf = GridSearchCV(clf, {'ward__n_clusters': [10, 20, 30]}, n_jobs=1, cv=cv)
clf.fit(X, y) # set the best parameters
coef_ = clf.best_estimator_.steps[-1][1].coef_
coef_ = clf.best_estimator_.steps[0][1].inverse_transform(coef_)
coef_agglomeration_ = coef_.reshape(size, size)
# Anova univariate feature selection followed by BayesianRidge
f_regression = mem.cache(feature_selection.f_regression) # caching function
anova = feature_selection.SelectPercentile(f_regression)
clf = Pipeline([('anova', anova), ('ridge', ridge)])
# Select the optimal percentage of features with grid search
clf = GridSearchCV(clf, {'anova__percentile': [5, 10, 20]}, cv=cv)
clf.fit(X, y) # set the best parameters
coef_ = clf.best_estimator_.steps[-1][1].coef_
coef_ = clf.best_estimator_.steps[0][1].inverse_transform(coef_)
coef_selection_ = coef_.reshape(size, size)
###############################################################################
# Inverse the transformation to plot the results on an image
plt.close('all')
plt.figure(figsize=(7.3, 2.7))
plt.subplot(1, 3, 1)
plt.imshow(coef, interpolation="nearest", cmap=plt.cm.RdBu_r)
plt.title("True weights")
plt.subplot(1, 3, 2)
plt.imshow(coef_selection_, interpolation="nearest", cmap=plt.cm.RdBu_r)
plt.title("Feature Selection")
plt.subplot(1, 3, 3)
plt.imshow(coef_agglomeration_, interpolation="nearest", cmap=plt.cm.RdBu_r)
plt.title("Feature Agglomeration")
plt.subplots_adjust(0.04, 0.0, 0.98, 0.94, 0.16, 0.26)
plt.show()
# Attempt to remove the temporary cachedir, but don't worry if it fails
shutil.rmtree(cachedir, ignore_errors=True)
| bsd-3-clause |
Srisai85/scikit-learn | sklearn/feature_extraction/hashing.py | 183 | 6155 | # Author: Lars Buitinck <[email protected]>
# License: BSD 3 clause
import numbers
import numpy as np
import scipy.sparse as sp
from . import _hashing
from ..base import BaseEstimator, TransformerMixin
def _iteritems(d):
"""Like d.iteritems, but accepts any collections.Mapping."""
return d.iteritems() if hasattr(d, "iteritems") else d.items()
class FeatureHasher(BaseEstimator, TransformerMixin):
"""Implements feature hashing, aka the hashing trick.
This class turns sequences of symbolic feature names (strings) into
scipy.sparse matrices, using a hash function to compute the matrix column
corresponding to a name. The hash function employed is the signed 32-bit
version of Murmurhash3.
Feature names of type byte string are used as-is. Unicode strings are
converted to UTF-8 first, but no Unicode normalization is done.
Feature values must be (finite) numbers.
This class is a low-memory alternative to DictVectorizer and
CountVectorizer, intended for large-scale (online) learning and situations
where memory is tight, e.g. when running prediction code on embedded
devices.
Read more in the :ref:`User Guide <feature_hashing>`.
Parameters
----------
n_features : integer, optional
The number of features (columns) in the output matrices. Small numbers
of features are likely to cause hash collisions, but large numbers
will cause larger coefficient dimensions in linear learners.
dtype : numpy type, optional
The type of feature values. Passed to scipy.sparse matrix constructors
as the dtype argument. Do not set this to bool, np.boolean or any
unsigned integer type.
input_type : string, optional
Either "dict" (the default) to accept dictionaries over
(feature_name, value); "pair" to accept pairs of (feature_name, value);
or "string" to accept single strings.
feature_name should be a string, while value should be a number.
In the case of "string", a value of 1 is implied.
The feature_name is hashed to find the appropriate column for the
feature. The value's sign might be flipped in the output (but see
non_negative, below).
non_negative : boolean, optional, default np.float64
Whether output matrices should contain non-negative values only;
effectively calls abs on the matrix prior to returning it.
When True, output values can be interpreted as frequencies.
When False, output values will have expected value zero.
Examples
--------
>>> from sklearn.feature_extraction import FeatureHasher
>>> h = FeatureHasher(n_features=10)
>>> D = [{'dog': 1, 'cat':2, 'elephant':4},{'dog': 2, 'run': 5}]
>>> f = h.transform(D)
>>> f.toarray()
array([[ 0., 0., -4., -1., 0., 0., 0., 0., 0., 2.],
[ 0., 0., 0., -2., -5., 0., 0., 0., 0., 0.]])
See also
--------
DictVectorizer : vectorizes string-valued features using a hash table.
sklearn.preprocessing.OneHotEncoder : handles nominal/categorical features
encoded as columns of integers.
"""
def __init__(self, n_features=(2 ** 20), input_type="dict",
dtype=np.float64, non_negative=False):
self._validate_params(n_features, input_type)
self.dtype = dtype
self.input_type = input_type
self.n_features = n_features
self.non_negative = non_negative
@staticmethod
def _validate_params(n_features, input_type):
# strangely, np.int16 instances are not instances of Integral,
# while np.int64 instances are...
if not isinstance(n_features, (numbers.Integral, np.integer)):
raise TypeError("n_features must be integral, got %r (%s)."
% (n_features, type(n_features)))
elif n_features < 1 or n_features >= 2 ** 31:
raise ValueError("Invalid number of features (%d)." % n_features)
if input_type not in ("dict", "pair", "string"):
raise ValueError("input_type must be 'dict', 'pair' or 'string',"
" got %r." % input_type)
def fit(self, X=None, y=None):
"""No-op.
This method doesn't do anything. It exists purely for compatibility
with the scikit-learn transformer API.
Returns
-------
self : FeatureHasher
"""
# repeat input validation for grid search (which calls set_params)
self._validate_params(self.n_features, self.input_type)
return self
def transform(self, raw_X, y=None):
"""Transform a sequence of instances to a scipy.sparse matrix.
Parameters
----------
raw_X : iterable over iterable over raw features, length = n_samples
Samples. Each sample must be iterable an (e.g., a list or tuple)
containing/generating feature names (and optionally values, see
the input_type constructor argument) which will be hashed.
raw_X need not support the len function, so it can be the result
of a generator; n_samples is determined on the fly.
y : (ignored)
Returns
-------
X : scipy.sparse matrix, shape = (n_samples, self.n_features)
Feature matrix, for use with estimators or further transformers.
"""
raw_X = iter(raw_X)
if self.input_type == "dict":
raw_X = (_iteritems(d) for d in raw_X)
elif self.input_type == "string":
raw_X = (((f, 1) for f in x) for x in raw_X)
indices, indptr, values = \
_hashing.transform(raw_X, self.n_features, self.dtype)
n_samples = indptr.shape[0] - 1
if n_samples == 0:
raise ValueError("Cannot vectorize empty sequence.")
X = sp.csr_matrix((values, indices, indptr), dtype=self.dtype,
shape=(n_samples, self.n_features))
X.sum_duplicates() # also sorts the indices
if self.non_negative:
np.abs(X.data, X.data)
return X
| bsd-3-clause |
gfyoung/pandas | pandas/tests/scalar/period/test_period.py | 1 | 54909 | from datetime import date, datetime, timedelta
import numpy as np
import pytest
import pytz
from pandas._libs.tslibs import iNaT, period as libperiod
from pandas._libs.tslibs.ccalendar import DAYS, MONTHS
from pandas._libs.tslibs.np_datetime import OutOfBoundsDatetime
from pandas._libs.tslibs.parsing import DateParseError
from pandas._libs.tslibs.period import INVALID_FREQ_ERR_MSG, IncompatibleFrequency
from pandas._libs.tslibs.timezones import dateutil_gettz, maybe_get_tz
from pandas.compat import np_datetime64_compat
import pandas as pd
from pandas import NaT, Period, Timedelta, Timestamp, offsets
import pandas._testing as tm
class TestPeriodConstruction:
def test_construction(self):
i1 = Period("1/1/2005", freq="M")
i2 = Period("Jan 2005")
assert i1 == i2
i1 = Period("2005", freq="A")
i2 = Period("2005")
i3 = Period("2005", freq="a")
assert i1 == i2
assert i1 == i3
i4 = Period("2005", freq="M")
i5 = Period("2005", freq="m")
assert i1 != i4
assert i4 == i5
i1 = Period.now("Q")
i2 = Period(datetime.now(), freq="Q")
i3 = Period.now("q")
assert i1 == i2
assert i1 == i3
i1 = Period("1982", freq="min")
i2 = Period("1982", freq="MIN")
assert i1 == i2
i1 = Period(year=2005, month=3, day=1, freq="D")
i2 = Period("3/1/2005", freq="D")
assert i1 == i2
i3 = Period(year=2005, month=3, day=1, freq="d")
assert i1 == i3
i1 = Period("2007-01-01 09:00:00.001")
expected = Period(datetime(2007, 1, 1, 9, 0, 0, 1000), freq="L")
assert i1 == expected
expected = Period(np_datetime64_compat("2007-01-01 09:00:00.001Z"), freq="L")
assert i1 == expected
i1 = Period("2007-01-01 09:00:00.00101")
expected = Period(datetime(2007, 1, 1, 9, 0, 0, 1010), freq="U")
assert i1 == expected
expected = Period(np_datetime64_compat("2007-01-01 09:00:00.00101Z"), freq="U")
assert i1 == expected
msg = "Must supply freq for ordinal value"
with pytest.raises(ValueError, match=msg):
Period(ordinal=200701)
msg = "Invalid frequency: X"
with pytest.raises(ValueError, match=msg):
Period("2007-1-1", freq="X")
# GH#34703 tuple freq disallowed
with pytest.raises(TypeError, match="pass as a string instead"):
Period("1982", freq=("Min", 1))
def test_construction_bday(self):
# Biz day construction, roll forward if non-weekday
i1 = Period("3/10/12", freq="B")
i2 = Period("3/10/12", freq="D")
assert i1 == i2.asfreq("B")
i2 = Period("3/11/12", freq="D")
assert i1 == i2.asfreq("B")
i2 = Period("3/12/12", freq="D")
assert i1 == i2.asfreq("B")
i3 = Period("3/10/12", freq="b")
assert i1 == i3
i1 = Period(year=2012, month=3, day=10, freq="B")
i2 = Period("3/12/12", freq="B")
assert i1 == i2
def test_construction_quarter(self):
i1 = Period(year=2005, quarter=1, freq="Q")
i2 = Period("1/1/2005", freq="Q")
assert i1 == i2
i1 = Period(year=2005, quarter=3, freq="Q")
i2 = Period("9/1/2005", freq="Q")
assert i1 == i2
i1 = Period("2005Q1")
i2 = Period(year=2005, quarter=1, freq="Q")
i3 = Period("2005q1")
assert i1 == i2
assert i1 == i3
i1 = Period("05Q1")
assert i1 == i2
lower = Period("05q1")
assert i1 == lower
i1 = Period("1Q2005")
assert i1 == i2
lower = Period("1q2005")
assert i1 == lower
i1 = Period("1Q05")
assert i1 == i2
lower = Period("1q05")
assert i1 == lower
i1 = Period("4Q1984")
assert i1.year == 1984
lower = Period("4q1984")
assert i1 == lower
def test_construction_month(self):
expected = Period("2007-01", freq="M")
i1 = Period("200701", freq="M")
assert i1 == expected
i1 = Period("200701", freq="M")
assert i1 == expected
i1 = Period(200701, freq="M")
assert i1 == expected
i1 = Period(ordinal=200701, freq="M")
assert i1.year == 18695
i1 = Period(datetime(2007, 1, 1), freq="M")
i2 = Period("200701", freq="M")
assert i1 == i2
i1 = Period(date(2007, 1, 1), freq="M")
i2 = Period(datetime(2007, 1, 1), freq="M")
i3 = Period(np.datetime64("2007-01-01"), freq="M")
i4 = Period(np_datetime64_compat("2007-01-01 00:00:00Z"), freq="M")
i5 = Period(np_datetime64_compat("2007-01-01 00:00:00.000Z"), freq="M")
assert i1 == i2
assert i1 == i3
assert i1 == i4
assert i1 == i5
def test_period_constructor_offsets(self):
assert Period("1/1/2005", freq=offsets.MonthEnd()) == Period(
"1/1/2005", freq="M"
)
assert Period("2005", freq=offsets.YearEnd()) == Period("2005", freq="A")
assert Period("2005", freq=offsets.MonthEnd()) == Period("2005", freq="M")
assert Period("3/10/12", freq=offsets.BusinessDay()) == Period(
"3/10/12", freq="B"
)
assert Period("3/10/12", freq=offsets.Day()) == Period("3/10/12", freq="D")
assert Period(
year=2005, quarter=1, freq=offsets.QuarterEnd(startingMonth=12)
) == Period(year=2005, quarter=1, freq="Q")
assert Period(
year=2005, quarter=2, freq=offsets.QuarterEnd(startingMonth=12)
) == Period(year=2005, quarter=2, freq="Q")
assert Period(year=2005, month=3, day=1, freq=offsets.Day()) == Period(
year=2005, month=3, day=1, freq="D"
)
assert Period(year=2012, month=3, day=10, freq=offsets.BDay()) == Period(
year=2012, month=3, day=10, freq="B"
)
expected = Period("2005-03-01", freq="3D")
assert Period(year=2005, month=3, day=1, freq=offsets.Day(3)) == expected
assert Period(year=2005, month=3, day=1, freq="3D") == expected
assert Period(year=2012, month=3, day=10, freq=offsets.BDay(3)) == Period(
year=2012, month=3, day=10, freq="3B"
)
assert Period(200701, freq=offsets.MonthEnd()) == Period(200701, freq="M")
i1 = Period(ordinal=200701, freq=offsets.MonthEnd())
i2 = Period(ordinal=200701, freq="M")
assert i1 == i2
assert i1.year == 18695
assert i2.year == 18695
i1 = Period(datetime(2007, 1, 1), freq="M")
i2 = Period("200701", freq="M")
assert i1 == i2
i1 = Period(date(2007, 1, 1), freq="M")
i2 = Period(datetime(2007, 1, 1), freq="M")
i3 = Period(np.datetime64("2007-01-01"), freq="M")
i4 = Period(np_datetime64_compat("2007-01-01 00:00:00Z"), freq="M")
i5 = Period(np_datetime64_compat("2007-01-01 00:00:00.000Z"), freq="M")
assert i1 == i2
assert i1 == i3
assert i1 == i4
assert i1 == i5
i1 = Period("2007-01-01 09:00:00.001")
expected = Period(datetime(2007, 1, 1, 9, 0, 0, 1000), freq="L")
assert i1 == expected
expected = Period(np_datetime64_compat("2007-01-01 09:00:00.001Z"), freq="L")
assert i1 == expected
i1 = Period("2007-01-01 09:00:00.00101")
expected = Period(datetime(2007, 1, 1, 9, 0, 0, 1010), freq="U")
assert i1 == expected
expected = Period(np_datetime64_compat("2007-01-01 09:00:00.00101Z"), freq="U")
assert i1 == expected
def test_invalid_arguments(self):
msg = "Must supply freq for datetime value"
with pytest.raises(ValueError, match=msg):
Period(datetime.now())
with pytest.raises(ValueError, match=msg):
Period(datetime.now().date())
msg = "Value must be Period, string, integer, or datetime"
with pytest.raises(ValueError, match=msg):
Period(1.6, freq="D")
msg = "Ordinal must be an integer"
with pytest.raises(ValueError, match=msg):
Period(ordinal=1.6, freq="D")
msg = "Only value or ordinal but not both should be given but not both"
with pytest.raises(ValueError, match=msg):
Period(ordinal=2, value=1, freq="D")
msg = "If value is None, freq cannot be None"
with pytest.raises(ValueError, match=msg):
Period(month=1)
msg = "Given date string not likely a datetime"
with pytest.raises(ValueError, match=msg):
Period("-2000", "A")
msg = "day is out of range for month"
with pytest.raises(DateParseError, match=msg):
Period("0", "A")
msg = "Unknown datetime string format, unable to parse"
with pytest.raises(DateParseError, match=msg):
Period("1/1/-2000", "A")
def test_constructor_corner(self):
expected = Period("2007-01", freq="2M")
assert Period(year=2007, month=1, freq="2M") == expected
assert Period(None) is NaT
p = Period("2007-01-01", freq="D")
result = Period(p, freq="A")
exp = Period("2007", freq="A")
assert result == exp
def test_constructor_infer_freq(self):
p = Period("2007-01-01")
assert p.freq == "D"
p = Period("2007-01-01 07")
assert p.freq == "H"
p = Period("2007-01-01 07:10")
assert p.freq == "T"
p = Period("2007-01-01 07:10:15")
assert p.freq == "S"
p = Period("2007-01-01 07:10:15.123")
assert p.freq == "L"
p = Period("2007-01-01 07:10:15.123000")
assert p.freq == "L"
p = Period("2007-01-01 07:10:15.123400")
assert p.freq == "U"
def test_multiples(self):
result1 = Period("1989", freq="2A")
result2 = Period("1989", freq="A")
assert result1.ordinal == result2.ordinal
assert result1.freqstr == "2A-DEC"
assert result2.freqstr == "A-DEC"
assert result1.freq == offsets.YearEnd(2)
assert result2.freq == offsets.YearEnd()
assert (result1 + 1).ordinal == result1.ordinal + 2
assert (1 + result1).ordinal == result1.ordinal + 2
assert (result1 - 1).ordinal == result2.ordinal - 2
assert (-1 + result1).ordinal == result2.ordinal - 2
@pytest.mark.parametrize("month", MONTHS)
def test_period_cons_quarterly(self, month):
# bugs in scikits.timeseries
freq = f"Q-{month}"
exp = Period("1989Q3", freq=freq)
assert "1989Q3" in str(exp)
stamp = exp.to_timestamp("D", how="end")
p = Period(stamp, freq=freq)
assert p == exp
stamp = exp.to_timestamp("3D", how="end")
p = Period(stamp, freq=freq)
assert p == exp
@pytest.mark.parametrize("month", MONTHS)
def test_period_cons_annual(self, month):
# bugs in scikits.timeseries
freq = f"A-{month}"
exp = Period("1989", freq=freq)
stamp = exp.to_timestamp("D", how="end") + timedelta(days=30)
p = Period(stamp, freq=freq)
assert p == exp + 1
assert isinstance(p, Period)
@pytest.mark.parametrize("day", DAYS)
@pytest.mark.parametrize("num", range(10, 17))
def test_period_cons_weekly(self, num, day):
daystr = f"2011-02-{num}"
freq = f"W-{day}"
result = Period(daystr, freq=freq)
expected = Period(daystr, freq="D").asfreq(freq)
assert result == expected
assert isinstance(result, Period)
def test_period_from_ordinal(self):
p = Period("2011-01", freq="M")
res = Period._from_ordinal(p.ordinal, freq="M")
assert p == res
assert isinstance(res, Period)
@pytest.mark.parametrize("freq", ["A", "M", "D", "H"])
def test_construct_from_nat_string_and_freq(self, freq):
per = Period("NaT", freq=freq)
assert per is NaT
per = Period("NaT", freq="2" + freq)
assert per is NaT
per = Period("NaT", freq="3" + freq)
assert per is NaT
def test_period_cons_nat(self):
p = Period("nat", freq="W-SUN")
assert p is NaT
p = Period(iNaT, freq="D")
assert p is NaT
p = Period(iNaT, freq="3D")
assert p is NaT
p = Period(iNaT, freq="1D1H")
assert p is NaT
p = Period("NaT")
assert p is NaT
p = Period(iNaT)
assert p is NaT
def test_period_cons_mult(self):
p1 = Period("2011-01", freq="3M")
p2 = Period("2011-01", freq="M")
assert p1.ordinal == p2.ordinal
assert p1.freq == offsets.MonthEnd(3)
assert p1.freqstr == "3M"
assert p2.freq == offsets.MonthEnd()
assert p2.freqstr == "M"
result = p1 + 1
assert result.ordinal == (p2 + 3).ordinal
assert result.freq == p1.freq
assert result.freqstr == "3M"
result = p1 - 1
assert result.ordinal == (p2 - 3).ordinal
assert result.freq == p1.freq
assert result.freqstr == "3M"
msg = "Frequency must be positive, because it represents span: -3M"
with pytest.raises(ValueError, match=msg):
Period("2011-01", freq="-3M")
msg = "Frequency must be positive, because it represents span: 0M"
with pytest.raises(ValueError, match=msg):
Period("2011-01", freq="0M")
def test_period_cons_combined(self):
p = [
(
Period("2011-01", freq="1D1H"),
Period("2011-01", freq="1H1D"),
Period("2011-01", freq="H"),
),
(
Period(ordinal=1, freq="1D1H"),
Period(ordinal=1, freq="1H1D"),
Period(ordinal=1, freq="H"),
),
]
for p1, p2, p3 in p:
assert p1.ordinal == p3.ordinal
assert p2.ordinal == p3.ordinal
assert p1.freq == offsets.Hour(25)
assert p1.freqstr == "25H"
assert p2.freq == offsets.Hour(25)
assert p2.freqstr == "25H"
assert p3.freq == offsets.Hour()
assert p3.freqstr == "H"
result = p1 + 1
assert result.ordinal == (p3 + 25).ordinal
assert result.freq == p1.freq
assert result.freqstr == "25H"
result = p2 + 1
assert result.ordinal == (p3 + 25).ordinal
assert result.freq == p2.freq
assert result.freqstr == "25H"
result = p1 - 1
assert result.ordinal == (p3 - 25).ordinal
assert result.freq == p1.freq
assert result.freqstr == "25H"
result = p2 - 1
assert result.ordinal == (p3 - 25).ordinal
assert result.freq == p2.freq
assert result.freqstr == "25H"
msg = "Frequency must be positive, because it represents span: -25H"
with pytest.raises(ValueError, match=msg):
Period("2011-01", freq="-1D1H")
with pytest.raises(ValueError, match=msg):
Period("2011-01", freq="-1H1D")
with pytest.raises(ValueError, match=msg):
Period(ordinal=1, freq="-1D1H")
with pytest.raises(ValueError, match=msg):
Period(ordinal=1, freq="-1H1D")
msg = "Frequency must be positive, because it represents span: 0D"
with pytest.raises(ValueError, match=msg):
Period("2011-01", freq="0D0H")
with pytest.raises(ValueError, match=msg):
Period(ordinal=1, freq="0D0H")
# You can only combine together day and intraday offsets
msg = "Invalid frequency: 1W1D"
with pytest.raises(ValueError, match=msg):
Period("2011-01", freq="1W1D")
msg = "Invalid frequency: 1D1W"
with pytest.raises(ValueError, match=msg):
Period("2011-01", freq="1D1W")
@pytest.mark.parametrize("day", ["1970/01/01 ", "2020-12-31 ", "1981/09/13 "])
@pytest.mark.parametrize("hour", ["00:00:00", "00:00:01", "23:59:59", "12:00:59"])
@pytest.mark.parametrize(
"sec_float, expected",
[
(".000000001", 1),
(".000000999", 999),
(".123456789", 789),
(".999999999", 999),
],
)
def test_period_constructor_nanosecond(self, day, hour, sec_float, expected):
# GH 34621
assert Period(day + hour + sec_float).start_time.nanosecond == expected
@pytest.mark.parametrize("hour", range(24))
def test_period_large_ordinal(self, hour):
# Issue #36430
# Integer overflow for Period over the maximum timestamp
p = Period(ordinal=2562048 + hour, freq="1H")
assert p.hour == hour
class TestPeriodMethods:
def test_round_trip(self):
p = Period("2000Q1")
new_p = tm.round_trip_pickle(p)
assert new_p == p
def test_hash(self):
assert hash(Period("2011-01", freq="M")) == hash(Period("2011-01", freq="M"))
assert hash(Period("2011-01-01", freq="D")) != hash(Period("2011-01", freq="M"))
assert hash(Period("2011-01", freq="3M")) != hash(Period("2011-01", freq="2M"))
assert hash(Period("2011-01", freq="M")) != hash(Period("2011-02", freq="M"))
# --------------------------------------------------------------
# to_timestamp
@pytest.mark.parametrize("tzstr", ["Europe/Brussels", "Asia/Tokyo", "US/Pacific"])
def test_to_timestamp_tz_arg(self, tzstr):
# GH#34522 tz kwarg deprecated
with tm.assert_produces_warning(FutureWarning):
p = Period("1/1/2005", freq="M").to_timestamp(tz=tzstr)
exp = Timestamp("1/1/2005", tz="UTC").tz_convert(tzstr)
exp_zone = pytz.timezone(tzstr).normalize(p)
assert p == exp
assert p.tz == exp_zone.tzinfo
assert p.tz == exp.tz
with tm.assert_produces_warning(FutureWarning):
p = Period("1/1/2005", freq="3H").to_timestamp(tz=tzstr)
exp = Timestamp("1/1/2005", tz="UTC").tz_convert(tzstr)
exp_zone = pytz.timezone(tzstr).normalize(p)
assert p == exp
assert p.tz == exp_zone.tzinfo
assert p.tz == exp.tz
with tm.assert_produces_warning(FutureWarning):
p = Period("1/1/2005", freq="A").to_timestamp(freq="A", tz=tzstr)
exp = Timestamp("31/12/2005", tz="UTC").tz_convert(tzstr)
exp_zone = pytz.timezone(tzstr).normalize(p)
assert p == exp
assert p.tz == exp_zone.tzinfo
assert p.tz == exp.tz
with tm.assert_produces_warning(FutureWarning):
p = Period("1/1/2005", freq="A").to_timestamp(freq="3H", tz=tzstr)
exp = Timestamp("1/1/2005", tz="UTC").tz_convert(tzstr)
exp_zone = pytz.timezone(tzstr).normalize(p)
assert p == exp
assert p.tz == exp_zone.tzinfo
assert p.tz == exp.tz
@pytest.mark.parametrize(
"tzstr",
["dateutil/Europe/Brussels", "dateutil/Asia/Tokyo", "dateutil/US/Pacific"],
)
def test_to_timestamp_tz_arg_dateutil(self, tzstr):
tz = maybe_get_tz(tzstr)
with tm.assert_produces_warning(FutureWarning):
p = Period("1/1/2005", freq="M").to_timestamp(tz=tz)
exp = Timestamp("1/1/2005", tz="UTC").tz_convert(tzstr)
assert p == exp
assert p.tz == dateutil_gettz(tzstr.split("/", 1)[1])
assert p.tz == exp.tz
with tm.assert_produces_warning(FutureWarning):
p = Period("1/1/2005", freq="M").to_timestamp(freq="3H", tz=tz)
exp = Timestamp("1/1/2005", tz="UTC").tz_convert(tzstr)
assert p == exp
assert p.tz == dateutil_gettz(tzstr.split("/", 1)[1])
assert p.tz == exp.tz
def test_to_timestamp_tz_arg_dateutil_from_string(self):
with tm.assert_produces_warning(FutureWarning):
p = Period("1/1/2005", freq="M").to_timestamp(tz="dateutil/Europe/Brussels")
assert p.tz == dateutil_gettz("Europe/Brussels")
def test_to_timestamp_mult(self):
p = Period("2011-01", freq="M")
assert p.to_timestamp(how="S") == Timestamp("2011-01-01")
expected = Timestamp("2011-02-01") - Timedelta(1, "ns")
assert p.to_timestamp(how="E") == expected
p = Period("2011-01", freq="3M")
assert p.to_timestamp(how="S") == Timestamp("2011-01-01")
expected = Timestamp("2011-04-01") - Timedelta(1, "ns")
assert p.to_timestamp(how="E") == expected
def test_to_timestamp(self):
p = Period("1982", freq="A")
start_ts = p.to_timestamp(how="S")
aliases = ["s", "StarT", "BEGIn"]
for a in aliases:
assert start_ts == p.to_timestamp("D", how=a)
# freq with mult should not affect to the result
assert start_ts == p.to_timestamp("3D", how=a)
end_ts = p.to_timestamp(how="E")
aliases = ["e", "end", "FINIsH"]
for a in aliases:
assert end_ts == p.to_timestamp("D", how=a)
assert end_ts == p.to_timestamp("3D", how=a)
from_lst = ["A", "Q", "M", "W", "B", "D", "H", "Min", "S"]
def _ex(p):
if p.freq == "B":
return p.start_time + Timedelta(days=1, nanoseconds=-1)
return Timestamp((p + p.freq).start_time.value - 1)
for i, fcode in enumerate(from_lst):
p = Period("1982", freq=fcode)
result = p.to_timestamp().to_period(fcode)
assert result == p
assert p.start_time == p.to_timestamp(how="S")
assert p.end_time == _ex(p)
# Frequency other than daily
p = Period("1985", freq="A")
result = p.to_timestamp("H", how="end")
expected = Timestamp(1986, 1, 1) - Timedelta(1, "ns")
assert result == expected
result = p.to_timestamp("3H", how="end")
assert result == expected
result = p.to_timestamp("T", how="end")
expected = Timestamp(1986, 1, 1) - Timedelta(1, "ns")
assert result == expected
result = p.to_timestamp("2T", how="end")
assert result == expected
result = p.to_timestamp(how="end")
expected = Timestamp(1986, 1, 1) - Timedelta(1, "ns")
assert result == expected
expected = datetime(1985, 1, 1)
result = p.to_timestamp("H", how="start")
assert result == expected
result = p.to_timestamp("T", how="start")
assert result == expected
result = p.to_timestamp("S", how="start")
assert result == expected
result = p.to_timestamp("3H", how="start")
assert result == expected
result = p.to_timestamp("5S", how="start")
assert result == expected
def test_to_timestamp_business_end(self):
per = Period("1990-01-05", "B") # Friday
result = per.to_timestamp("B", how="E")
expected = Timestamp("1990-01-06") - Timedelta(nanoseconds=1)
assert result == expected
@pytest.mark.parametrize(
"ts, expected",
[
("1970-01-01 00:00:00", 0),
("1970-01-01 00:00:00.000001", 1),
("1970-01-01 00:00:00.00001", 10),
("1970-01-01 00:00:00.499", 499000),
("1999-12-31 23:59:59.999", 999000),
("1999-12-31 23:59:59.999999", 999999),
("2050-12-31 23:59:59.5", 500000),
("2050-12-31 23:59:59.500001", 500001),
("2050-12-31 23:59:59.123456", 123456),
],
)
@pytest.mark.parametrize("freq", [None, "us", "ns"])
def test_to_timestamp_microsecond(self, ts, expected, freq):
# GH 24444
result = Period(ts).to_timestamp(freq=freq).microsecond
assert result == expected
# --------------------------------------------------------------
# Rendering: __repr__, strftime, etc
def test_repr(self):
p = Period("Jan-2000")
assert "2000-01" in repr(p)
p = Period("2000-12-15")
assert "2000-12-15" in repr(p)
def test_repr_nat(self):
p = Period("nat", freq="M")
assert repr(NaT) in repr(p)
def test_millisecond_repr(self):
p = Period("2000-01-01 12:15:02.123")
assert repr(p) == "Period('2000-01-01 12:15:02.123', 'L')"
def test_microsecond_repr(self):
p = Period("2000-01-01 12:15:02.123567")
assert repr(p) == "Period('2000-01-01 12:15:02.123567', 'U')"
def test_strftime(self):
# GH#3363
p = Period("2000-1-1 12:34:12", freq="S")
res = p.strftime("%Y-%m-%d %H:%M:%S")
assert res == "2000-01-01 12:34:12"
assert isinstance(res, str)
class TestPeriodProperties:
"""Test properties such as year, month, weekday, etc...."""
@pytest.mark.parametrize("freq", ["A", "M", "D", "H"])
def test_is_leap_year(self, freq):
# GH 13727
p = Period("2000-01-01 00:00:00", freq=freq)
assert p.is_leap_year
assert isinstance(p.is_leap_year, bool)
p = Period("1999-01-01 00:00:00", freq=freq)
assert not p.is_leap_year
p = Period("2004-01-01 00:00:00", freq=freq)
assert p.is_leap_year
p = Period("2100-01-01 00:00:00", freq=freq)
assert not p.is_leap_year
def test_quarterly_negative_ordinals(self):
p = Period(ordinal=-1, freq="Q-DEC")
assert p.year == 1969
assert p.quarter == 4
assert isinstance(p, Period)
p = Period(ordinal=-2, freq="Q-DEC")
assert p.year == 1969
assert p.quarter == 3
assert isinstance(p, Period)
p = Period(ordinal=-2, freq="M")
assert p.year == 1969
assert p.month == 11
assert isinstance(p, Period)
def test_freq_str(self):
i1 = Period("1982", freq="Min")
assert i1.freq == offsets.Minute()
assert i1.freqstr == "T"
def test_period_deprecated_freq(self):
cases = {
"M": ["MTH", "MONTH", "MONTHLY", "Mth", "month", "monthly"],
"B": ["BUS", "BUSINESS", "BUSINESSLY", "WEEKDAY", "bus"],
"D": ["DAY", "DLY", "DAILY", "Day", "Dly", "Daily"],
"H": ["HR", "HOUR", "HRLY", "HOURLY", "hr", "Hour", "HRly"],
"T": ["minute", "MINUTE", "MINUTELY", "minutely"],
"S": ["sec", "SEC", "SECOND", "SECONDLY", "second"],
"L": ["MILLISECOND", "MILLISECONDLY", "millisecond"],
"U": ["MICROSECOND", "MICROSECONDLY", "microsecond"],
"N": ["NANOSECOND", "NANOSECONDLY", "nanosecond"],
}
msg = INVALID_FREQ_ERR_MSG
for exp, freqs in cases.items():
for freq in freqs:
with pytest.raises(ValueError, match=msg):
Period("2016-03-01 09:00", freq=freq)
with pytest.raises(ValueError, match=msg):
Period(ordinal=1, freq=freq)
# check supported freq-aliases still works
p1 = Period("2016-03-01 09:00", freq=exp)
p2 = Period(ordinal=1, freq=exp)
assert isinstance(p1, Period)
assert isinstance(p2, Period)
def _period_constructor(bound, offset):
return Period(
year=bound.year,
month=bound.month,
day=bound.day,
hour=bound.hour,
minute=bound.minute,
second=bound.second + offset,
freq="us",
)
@pytest.mark.parametrize("bound, offset", [(Timestamp.min, -1), (Timestamp.max, 1)])
@pytest.mark.parametrize("period_property", ["start_time", "end_time"])
def test_outter_bounds_start_and_end_time(self, bound, offset, period_property):
# GH #13346
period = TestPeriodProperties._period_constructor(bound, offset)
with pytest.raises(OutOfBoundsDatetime, match="Out of bounds nanosecond"):
getattr(period, period_property)
@pytest.mark.parametrize("bound, offset", [(Timestamp.min, -1), (Timestamp.max, 1)])
@pytest.mark.parametrize("period_property", ["start_time", "end_time"])
def test_inner_bounds_start_and_end_time(self, bound, offset, period_property):
# GH #13346
period = TestPeriodProperties._period_constructor(bound, -offset)
expected = period.to_timestamp().round(freq="S")
assert getattr(period, period_property).round(freq="S") == expected
expected = (bound - offset * Timedelta(1, unit="S")).floor("S")
assert getattr(period, period_property).floor("S") == expected
def test_start_time(self):
freq_lst = ["A", "Q", "M", "D", "H", "T", "S"]
xp = datetime(2012, 1, 1)
for f in freq_lst:
p = Period("2012", freq=f)
assert p.start_time == xp
assert Period("2012", freq="B").start_time == datetime(2012, 1, 2)
assert Period("2012", freq="W").start_time == datetime(2011, 12, 26)
def test_end_time(self):
p = Period("2012", freq="A")
def _ex(*args):
return Timestamp(Timestamp(datetime(*args)).value - 1)
xp = _ex(2013, 1, 1)
assert xp == p.end_time
p = Period("2012", freq="Q")
xp = _ex(2012, 4, 1)
assert xp == p.end_time
p = Period("2012", freq="M")
xp = _ex(2012, 2, 1)
assert xp == p.end_time
p = Period("2012", freq="D")
xp = _ex(2012, 1, 2)
assert xp == p.end_time
p = Period("2012", freq="H")
xp = _ex(2012, 1, 1, 1)
assert xp == p.end_time
p = Period("2012", freq="B")
xp = _ex(2012, 1, 3)
assert xp == p.end_time
p = Period("2012", freq="W")
xp = _ex(2012, 1, 2)
assert xp == p.end_time
# Test for GH 11738
p = Period("2012", freq="15D")
xp = _ex(2012, 1, 16)
assert xp == p.end_time
p = Period("2012", freq="1D1H")
xp = _ex(2012, 1, 2, 1)
assert xp == p.end_time
p = Period("2012", freq="1H1D")
xp = _ex(2012, 1, 2, 1)
assert xp == p.end_time
def test_end_time_business_friday(self):
# GH#34449
per = Period("1990-01-05", "B")
result = per.end_time
expected = Timestamp("1990-01-06") - Timedelta(nanoseconds=1)
assert result == expected
def test_anchor_week_end_time(self):
def _ex(*args):
return Timestamp(Timestamp(datetime(*args)).value - 1)
p = Period("2013-1-1", "W-SAT")
xp = _ex(2013, 1, 6)
assert p.end_time == xp
def test_properties_annually(self):
# Test properties on Periods with annually frequency.
a_date = Period(freq="A", year=2007)
assert a_date.year == 2007
def test_properties_quarterly(self):
# Test properties on Periods with daily frequency.
qedec_date = Period(freq="Q-DEC", year=2007, quarter=1)
qejan_date = Period(freq="Q-JAN", year=2007, quarter=1)
qejun_date = Period(freq="Q-JUN", year=2007, quarter=1)
#
for x in range(3):
for qd in (qedec_date, qejan_date, qejun_date):
assert (qd + x).qyear == 2007
assert (qd + x).quarter == x + 1
def test_properties_monthly(self):
# Test properties on Periods with daily frequency.
m_date = Period(freq="M", year=2007, month=1)
for x in range(11):
m_ival_x = m_date + x
assert m_ival_x.year == 2007
if 1 <= x + 1 <= 3:
assert m_ival_x.quarter == 1
elif 4 <= x + 1 <= 6:
assert m_ival_x.quarter == 2
elif 7 <= x + 1 <= 9:
assert m_ival_x.quarter == 3
elif 10 <= x + 1 <= 12:
assert m_ival_x.quarter == 4
assert m_ival_x.month == x + 1
def test_properties_weekly(self):
# Test properties on Periods with daily frequency.
w_date = Period(freq="W", year=2007, month=1, day=7)
#
assert w_date.year == 2007
assert w_date.quarter == 1
assert w_date.month == 1
assert w_date.week == 1
assert (w_date - 1).week == 52
assert w_date.days_in_month == 31
assert Period(freq="W", year=2012, month=2, day=1).days_in_month == 29
def test_properties_weekly_legacy(self):
# Test properties on Periods with daily frequency.
w_date = Period(freq="W", year=2007, month=1, day=7)
assert w_date.year == 2007
assert w_date.quarter == 1
assert w_date.month == 1
assert w_date.week == 1
assert (w_date - 1).week == 52
assert w_date.days_in_month == 31
exp = Period(freq="W", year=2012, month=2, day=1)
assert exp.days_in_month == 29
msg = INVALID_FREQ_ERR_MSG
with pytest.raises(ValueError, match=msg):
Period(freq="WK", year=2007, month=1, day=7)
def test_properties_daily(self):
# Test properties on Periods with daily frequency.
b_date = Period(freq="B", year=2007, month=1, day=1)
#
assert b_date.year == 2007
assert b_date.quarter == 1
assert b_date.month == 1
assert b_date.day == 1
assert b_date.weekday == 0
assert b_date.dayofyear == 1
assert b_date.days_in_month == 31
assert Period(freq="B", year=2012, month=2, day=1).days_in_month == 29
d_date = Period(freq="D", year=2007, month=1, day=1)
assert d_date.year == 2007
assert d_date.quarter == 1
assert d_date.month == 1
assert d_date.day == 1
assert d_date.weekday == 0
assert d_date.dayofyear == 1
assert d_date.days_in_month == 31
assert Period(freq="D", year=2012, month=2, day=1).days_in_month == 29
def test_properties_hourly(self):
# Test properties on Periods with hourly frequency.
h_date1 = Period(freq="H", year=2007, month=1, day=1, hour=0)
h_date2 = Period(freq="2H", year=2007, month=1, day=1, hour=0)
for h_date in [h_date1, h_date2]:
assert h_date.year == 2007
assert h_date.quarter == 1
assert h_date.month == 1
assert h_date.day == 1
assert h_date.weekday == 0
assert h_date.dayofyear == 1
assert h_date.hour == 0
assert h_date.days_in_month == 31
assert (
Period(freq="H", year=2012, month=2, day=1, hour=0).days_in_month == 29
)
def test_properties_minutely(self):
# Test properties on Periods with minutely frequency.
t_date = Period(freq="Min", year=2007, month=1, day=1, hour=0, minute=0)
#
assert t_date.quarter == 1
assert t_date.month == 1
assert t_date.day == 1
assert t_date.weekday == 0
assert t_date.dayofyear == 1
assert t_date.hour == 0
assert t_date.minute == 0
assert t_date.days_in_month == 31
assert (
Period(freq="D", year=2012, month=2, day=1, hour=0, minute=0).days_in_month
== 29
)
def test_properties_secondly(self):
# Test properties on Periods with secondly frequency.
s_date = Period(
freq="Min", year=2007, month=1, day=1, hour=0, minute=0, second=0
)
#
assert s_date.year == 2007
assert s_date.quarter == 1
assert s_date.month == 1
assert s_date.day == 1
assert s_date.weekday == 0
assert s_date.dayofyear == 1
assert s_date.hour == 0
assert s_date.minute == 0
assert s_date.second == 0
assert s_date.days_in_month == 31
assert (
Period(
freq="Min", year=2012, month=2, day=1, hour=0, minute=0, second=0
).days_in_month
== 29
)
class TestPeriodField:
def test_get_period_field_array_raises_on_out_of_range(self):
msg = "Buffer dtype mismatch, expected 'const int64_t' but got 'double'"
with pytest.raises(ValueError, match=msg):
libperiod.get_period_field_arr(-1, np.empty(1), 0)
class TestPeriodComparisons:
def test_comparison_same_period_different_object(self):
# Separate Period objects for the same period
left = Period("2000-01", "M")
right = Period("2000-01", "M")
assert left == right
assert left >= right
assert left <= right
assert not left < right
assert not left > right
def test_comparison_same_freq(self):
jan = Period("2000-01", "M")
feb = Period("2000-02", "M")
assert not jan == feb
assert jan != feb
assert jan < feb
assert jan <= feb
assert not jan > feb
assert not jan >= feb
def test_comparison_mismatched_freq(self):
jan = Period("2000-01", "M")
day = Period("2012-01-01", "D")
assert not jan == day
assert jan != day
msg = r"Input has different freq=D from Period\(freq=M\)"
with pytest.raises(IncompatibleFrequency, match=msg):
jan < day
with pytest.raises(IncompatibleFrequency, match=msg):
jan <= day
with pytest.raises(IncompatibleFrequency, match=msg):
jan > day
with pytest.raises(IncompatibleFrequency, match=msg):
jan >= day
def test_comparison_invalid_type(self):
jan = Period("2000-01", "M")
assert not jan == 1
assert jan != 1
int_or_per = "'(Period|int)'"
msg = f"not supported between instances of {int_or_per} and {int_or_per}"
for left, right in [(jan, 1), (1, jan)]:
with pytest.raises(TypeError, match=msg):
left > right
with pytest.raises(TypeError, match=msg):
left >= right
with pytest.raises(TypeError, match=msg):
left < right
with pytest.raises(TypeError, match=msg):
left <= right
def test_sort_periods(self):
jan = Period("2000-01", "M")
feb = Period("2000-02", "M")
mar = Period("2000-03", "M")
periods = [mar, jan, feb]
correctPeriods = [jan, feb, mar]
assert sorted(periods) == correctPeriods
def test_period_cmp_nat(self):
p = Period("2011-01-01", freq="D")
t = Timestamp("2011-01-01")
# confirm Period('NaT') work identical with Timestamp('NaT')
for left, right in [
(NaT, p),
(p, NaT),
(NaT, t),
(t, NaT),
]:
assert not left < right
assert not left > right
assert not left == right
assert left != right
assert not left <= right
assert not left >= right
class TestArithmetic:
def test_sub_delta(self):
left, right = Period("2011", freq="A"), Period("2007", freq="A")
result = left - right
assert result == 4 * right.freq
msg = r"Input has different freq=M from Period\(freq=A-DEC\)"
with pytest.raises(IncompatibleFrequency, match=msg):
left - Period("2007-01", freq="M")
def test_add_integer(self):
per1 = Period(freq="D", year=2008, month=1, day=1)
per2 = Period(freq="D", year=2008, month=1, day=2)
assert per1 + 1 == per2
assert 1 + per1 == per2
def test_add_sub_nat(self):
# GH#13071
p = Period("2011-01", freq="M")
assert p + NaT is NaT
assert NaT + p is NaT
assert p - NaT is NaT
assert NaT - p is NaT
def test_add_invalid(self):
# GH#4731
per1 = Period(freq="D", year=2008, month=1, day=1)
per2 = Period(freq="D", year=2008, month=1, day=2)
msg = "|".join(
[
r"unsupported operand type\(s\)",
"can only concatenate str",
"must be str, not Period",
]
)
with pytest.raises(TypeError, match=msg):
per1 + "str"
with pytest.raises(TypeError, match=msg):
"str" + per1
with pytest.raises(TypeError, match=msg):
per1 + per2
boxes = [lambda x: x, lambda x: pd.Series([x]), lambda x: pd.Index([x])]
ids = ["identity", "Series", "Index"]
@pytest.mark.parametrize("lbox", boxes, ids=ids)
@pytest.mark.parametrize("rbox", boxes, ids=ids)
def test_add_timestamp_raises(self, rbox, lbox):
# GH#17983
ts = Timestamp("2017")
per = Period("2017", freq="M")
# We may get a different message depending on which class raises
# the error.
msg = "|".join(
[
"cannot add",
"unsupported operand",
"can only operate on a",
"incompatible type",
"ufunc add cannot use operands",
]
)
with pytest.raises(TypeError, match=msg):
lbox(ts) + rbox(per)
with pytest.raises(TypeError, match=msg):
lbox(per) + rbox(ts)
with pytest.raises(TypeError, match=msg):
lbox(per) + rbox(per)
def test_sub(self):
per1 = Period("2011-01-01", freq="D")
per2 = Period("2011-01-15", freq="D")
off = per1.freq
assert per1 - per2 == -14 * off
assert per2 - per1 == 14 * off
msg = r"Input has different freq=M from Period\(freq=D\)"
with pytest.raises(IncompatibleFrequency, match=msg):
per1 - Period("2011-02", freq="M")
@pytest.mark.parametrize("n", [1, 2, 3, 4])
def test_sub_n_gt_1_ticks(self, tick_classes, n):
# GH 23878
p1 = Period("19910905", freq=tick_classes(n))
p2 = Period("19920406", freq=tick_classes(n))
expected = Period(str(p2), freq=p2.freq.base) - Period(
str(p1), freq=p1.freq.base
)
assert (p2 - p1) == expected
@pytest.mark.parametrize("normalize", [True, False])
@pytest.mark.parametrize("n", [1, 2, 3, 4])
@pytest.mark.parametrize(
"offset, kwd_name",
[
(offsets.YearEnd, "month"),
(offsets.QuarterEnd, "startingMonth"),
(offsets.MonthEnd, None),
(offsets.Week, "weekday"),
],
)
def test_sub_n_gt_1_offsets(self, offset, kwd_name, n, normalize):
# GH 23878
kwds = {kwd_name: 3} if kwd_name is not None else {}
p1_d = "19910905"
p2_d = "19920406"
p1 = Period(p1_d, freq=offset(n, normalize, **kwds))
p2 = Period(p2_d, freq=offset(n, normalize, **kwds))
expected = Period(p2_d, freq=p2.freq.base) - Period(p1_d, freq=p1.freq.base)
assert (p2 - p1) == expected
def test_add_offset(self):
# freq is DateOffset
for freq in ["A", "2A", "3A"]:
p = Period("2011", freq=freq)
exp = Period("2013", freq=freq)
assert p + offsets.YearEnd(2) == exp
assert offsets.YearEnd(2) + p == exp
for o in [
offsets.YearBegin(2),
offsets.MonthBegin(1),
offsets.Minute(),
np.timedelta64(365, "D"),
timedelta(365),
]:
msg = "Input has different freq|Input cannot be converted to Period"
with pytest.raises(IncompatibleFrequency, match=msg):
p + o
if isinstance(o, np.timedelta64):
msg = "cannot use operands with types"
with pytest.raises(TypeError, match=msg):
o + p
else:
msg = "|".join(
[
"Input has different freq",
"Input cannot be converted to Period",
]
)
with pytest.raises(IncompatibleFrequency, match=msg):
o + p
for freq in ["M", "2M", "3M"]:
p = Period("2011-03", freq=freq)
exp = Period("2011-05", freq=freq)
assert p + offsets.MonthEnd(2) == exp
assert offsets.MonthEnd(2) + p == exp
exp = Period("2012-03", freq=freq)
assert p + offsets.MonthEnd(12) == exp
assert offsets.MonthEnd(12) + p == exp
for o in [
offsets.YearBegin(2),
offsets.MonthBegin(1),
offsets.Minute(),
np.timedelta64(365, "D"),
timedelta(365),
]:
msg = "Input has different freq|Input cannot be converted to Period"
with pytest.raises(IncompatibleFrequency, match=msg):
p + o
if isinstance(o, np.timedelta64):
msg = "cannot use operands with types"
with pytest.raises(TypeError, match=msg):
o + p
else:
msg = "|".join(
[
"Input has different freq",
"Input cannot be converted to Period",
]
)
with pytest.raises(IncompatibleFrequency, match=msg):
o + p
# freq is Tick
for freq in ["D", "2D", "3D"]:
p = Period("2011-04-01", freq=freq)
exp = Period("2011-04-06", freq=freq)
assert p + offsets.Day(5) == exp
assert offsets.Day(5) + p == exp
exp = Period("2011-04-02", freq=freq)
assert p + offsets.Hour(24) == exp
assert offsets.Hour(24) + p == exp
exp = Period("2011-04-03", freq=freq)
assert p + np.timedelta64(2, "D") == exp
msg = "cannot use operands with types"
with pytest.raises(TypeError, match=msg):
np.timedelta64(2, "D") + p
exp = Period("2011-04-02", freq=freq)
assert p + np.timedelta64(3600 * 24, "s") == exp
with pytest.raises(TypeError, match=msg):
np.timedelta64(3600 * 24, "s") + p
exp = Period("2011-03-30", freq=freq)
assert p + timedelta(-2) == exp
assert timedelta(-2) + p == exp
exp = Period("2011-04-03", freq=freq)
assert p + timedelta(hours=48) == exp
assert timedelta(hours=48) + p == exp
for o in [
offsets.YearBegin(2),
offsets.MonthBegin(1),
offsets.Minute(),
np.timedelta64(4, "h"),
timedelta(hours=23),
]:
msg = "Input has different freq|Input cannot be converted to Period"
with pytest.raises(IncompatibleFrequency, match=msg):
p + o
if isinstance(o, np.timedelta64):
msg = "cannot use operands with types"
with pytest.raises(TypeError, match=msg):
o + p
else:
msg = "|".join(
[
"Input has different freq",
"Input cannot be converted to Period",
]
)
with pytest.raises(IncompatibleFrequency, match=msg):
o + p
for freq in ["H", "2H", "3H"]:
p = Period("2011-04-01 09:00", freq=freq)
exp = Period("2011-04-03 09:00", freq=freq)
assert p + offsets.Day(2) == exp
assert offsets.Day(2) + p == exp
exp = Period("2011-04-01 12:00", freq=freq)
assert p + offsets.Hour(3) == exp
assert offsets.Hour(3) + p == exp
msg = "cannot use operands with types"
exp = Period("2011-04-01 12:00", freq=freq)
assert p + np.timedelta64(3, "h") == exp
with pytest.raises(TypeError, match=msg):
np.timedelta64(3, "h") + p
exp = Period("2011-04-01 10:00", freq=freq)
assert p + np.timedelta64(3600, "s") == exp
with pytest.raises(TypeError, match=msg):
np.timedelta64(3600, "s") + p
exp = Period("2011-04-01 11:00", freq=freq)
assert p + timedelta(minutes=120) == exp
assert timedelta(minutes=120) + p == exp
exp = Period("2011-04-05 12:00", freq=freq)
assert p + timedelta(days=4, minutes=180) == exp
assert timedelta(days=4, minutes=180) + p == exp
for o in [
offsets.YearBegin(2),
offsets.MonthBegin(1),
offsets.Minute(),
np.timedelta64(3200, "s"),
timedelta(hours=23, minutes=30),
]:
msg = "Input has different freq|Input cannot be converted to Period"
with pytest.raises(IncompatibleFrequency, match=msg):
p + o
if isinstance(o, np.timedelta64):
msg = "cannot use operands with types"
with pytest.raises(TypeError, match=msg):
o + p
else:
msg = "|".join(
[
"Input has different freq",
"Input cannot be converted to Period",
]
)
with pytest.raises(IncompatibleFrequency, match=msg):
o + p
def test_sub_offset(self):
# freq is DateOffset
msg = "Input has different freq|Input cannot be converted to Period"
for freq in ["A", "2A", "3A"]:
p = Period("2011", freq=freq)
assert p - offsets.YearEnd(2) == Period("2009", freq=freq)
for o in [
offsets.YearBegin(2),
offsets.MonthBegin(1),
offsets.Minute(),
np.timedelta64(365, "D"),
timedelta(365),
]:
with pytest.raises(IncompatibleFrequency, match=msg):
p - o
for freq in ["M", "2M", "3M"]:
p = Period("2011-03", freq=freq)
assert p - offsets.MonthEnd(2) == Period("2011-01", freq=freq)
assert p - offsets.MonthEnd(12) == Period("2010-03", freq=freq)
for o in [
offsets.YearBegin(2),
offsets.MonthBegin(1),
offsets.Minute(),
np.timedelta64(365, "D"),
timedelta(365),
]:
with pytest.raises(IncompatibleFrequency, match=msg):
p - o
# freq is Tick
for freq in ["D", "2D", "3D"]:
p = Period("2011-04-01", freq=freq)
assert p - offsets.Day(5) == Period("2011-03-27", freq=freq)
assert p - offsets.Hour(24) == Period("2011-03-31", freq=freq)
assert p - np.timedelta64(2, "D") == Period("2011-03-30", freq=freq)
assert p - np.timedelta64(3600 * 24, "s") == Period("2011-03-31", freq=freq)
assert p - timedelta(-2) == Period("2011-04-03", freq=freq)
assert p - timedelta(hours=48) == Period("2011-03-30", freq=freq)
for o in [
offsets.YearBegin(2),
offsets.MonthBegin(1),
offsets.Minute(),
np.timedelta64(4, "h"),
timedelta(hours=23),
]:
with pytest.raises(IncompatibleFrequency, match=msg):
p - o
for freq in ["H", "2H", "3H"]:
p = Period("2011-04-01 09:00", freq=freq)
assert p - offsets.Day(2) == Period("2011-03-30 09:00", freq=freq)
assert p - offsets.Hour(3) == Period("2011-04-01 06:00", freq=freq)
assert p - np.timedelta64(3, "h") == Period("2011-04-01 06:00", freq=freq)
assert p - np.timedelta64(3600, "s") == Period(
"2011-04-01 08:00", freq=freq
)
assert p - timedelta(minutes=120) == Period("2011-04-01 07:00", freq=freq)
assert p - timedelta(days=4, minutes=180) == Period(
"2011-03-28 06:00", freq=freq
)
for o in [
offsets.YearBegin(2),
offsets.MonthBegin(1),
offsets.Minute(),
np.timedelta64(3200, "s"),
timedelta(hours=23, minutes=30),
]:
with pytest.raises(IncompatibleFrequency, match=msg):
p - o
@pytest.mark.parametrize("freq", ["M", "2M", "3M"])
def test_period_addsub_nat(self, freq):
per = Period("2011-01", freq=freq)
# For subtraction, NaT is treated as another Period object
assert NaT - per is NaT
assert per - NaT is NaT
# For addition, NaT is treated as offset-like
assert NaT + per is NaT
assert per + NaT is NaT
def test_period_ops_offset(self):
p = Period("2011-04-01", freq="D")
result = p + offsets.Day()
exp = Period("2011-04-02", freq="D")
assert result == exp
result = p - offsets.Day(2)
exp = Period("2011-03-30", freq="D")
assert result == exp
msg = r"Input cannot be converted to Period\(freq=D\)"
with pytest.raises(IncompatibleFrequency, match=msg):
p + offsets.Hour(2)
with pytest.raises(IncompatibleFrequency, match=msg):
p - offsets.Hour(2)
def test_period_immutable():
# see gh-17116
msg = "not writable"
per = Period("2014Q1")
with pytest.raises(AttributeError, match=msg):
per.ordinal = 14
freq = per.freq
with pytest.raises(AttributeError, match=msg):
per.freq = 2 * freq
def test_small_year_parsing():
per1 = Period("0001-01-07", "D")
assert per1.year == 1
assert per1.day == 7
def test_negone_ordinals():
freqs = ["A", "M", "Q", "D", "H", "T", "S"]
period = Period(ordinal=-1, freq="D")
for freq in freqs:
repr(period.asfreq(freq))
for freq in freqs:
period = Period(ordinal=-1, freq=freq)
repr(period)
assert period.year == 1969
period = Period(ordinal=-1, freq="B")
repr(period)
period = Period(ordinal=-1, freq="W")
repr(period)
def test_invalid_frequency_error_message():
msg = "Invalid frequency: <WeekOfMonth: week=0, weekday=0>"
with pytest.raises(ValueError, match=msg):
Period("2012-01-02", freq="WOM-1MON")
| bsd-3-clause |
lukasdiem/warlight2-map-generator | visualize.py | 1 | 1329 | import time
import matplotlib.pyplot as plt
from matplotlib import cm
from matplotlib.colors import Normalize
from scipy.spatial._plotutils import voronoi_plot_2d
from wl2generator.voronoi_graph import VoronoiGraph
from wl2generator.voronoi_map import create_map
ts = time.time()
vg = VoronoiGraph(dimensions=(1024, 768), granularity=3000)
vg_pts = vg.relax_points(times=2)
vor = vg.vor
map_dict = create_map(vg, (4, 6), (3, 7), (50, 100))
print('Map creation took: {:.4f}s'.format(time.time() - ts))
mapper = cm.ScalarMappable(norm=Normalize(0, max(map_dict['continent_idx']) + 1), cmap='Set3')
axes = plt.gca()
voronoi_plot_2d(vor, axes, show_points=False, show_vertices=False, s=1)
for idx, pt in enumerate(vor.points):
region = vor.regions[vor.point_region[idx]]
continent_idx = map_dict['continent_idx'][idx][0]
country_idx = map_dict['country_idx'][idx][0]
if continent_idx > 0 and -1 not in region:
polygon = [vor.vertices[i] for i in region]
plt.fill(*zip(*polygon), color=mapper.to_rgba(continent_idx))
# text = '{}/{}'.format(map['continent_idx'][idx], map['country_idx'][idx])
text = '{:d}'.format(country_idx)
plt.text(pt[0], pt[1], text, ha='center')
# cent = vg._region_centroid(region)
# plt.plot(cent[0], cent[1], 'r*')
plt.show()
| mit |
akmorrow13/endive | python/rank.py | 1 | 11438 | from score import *
def iter_grpd_submissions(submission_queue_id):
grpd_submissions = defaultdict(lambda: defaultdict(list))
for submission, status in syn.getSubmissionBundles(submission_queue_id):
# skip unscored submissions
if status['status'] != 'SCORED': continue
principalId = submission.teamId if 'teamId' in submission else submission.userId
creation_ts = parse(submission['createdOn'], fuzzy=True)
file_handles = json.loads(submission['entityBundleJSON'])['fileHandles']
assert len(file_handles) == 1
submission_fname = file_handles[0]['fileName']
if submission_fname == 'NOT_SET':
print "Skipping: %s" % submission_fname
continue
factor, sample = submission_fname.split('.')[1:3]
filename = "{}/{}.{}.{}".format(SUBMISSIONS_DIR, principalId, submission.id, submission_fname)
if not os.path.isfile(filename):
print "Skipping: %s" % filename
continue
grpd_submissions[
(factor, sample)][
principalId].append((creation_ts, filename))
for leader_board, factor_submissions in grpd_submissions.iteritems():
yield leader_board, factor_submissions
return
ScoreRecord = namedtuple(
'ScoreRecord',
'factor sample principalId submission_date submission_fname bootstrap_index recall_at_10_fdr recall_at_50_fdr auPRC auROC rank'
)
def score_record_factory(cursor, row):
row = list(row)
row[3] = parse(row[3], fuzzy=True)
return ScoreRecord(*row)
def calc_and_insert_new_results(
DB, factor, sample, principalId, submission_date, submission_fname):
# sort by submission date
print (factor, sample), principalId, submission_date, submission_fname
full_results, labels, scores = score_main(submission_fname)
print "FULL", full_results
all_res = []
for bootstrap_i, results in calc_bootstrapped_scores(labels, scores):
print bootstrap_i, results
all_res.append([
factor,
sample,
principalId,
submission_date,
submission_fname,
bootstrap_i,
results.recall_at_10_fdr,
results.recall_at_50_fdr,
results.auPRC,
results.auROC,
-1
])
while True:
try:
conn = sqlite3.connect(DB)
c = conn.cursor()
for res in all_res:
c.execute(
"INSERT INTO scores VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?);",
res
)
c.close()
conn.commit()
conn.close()
except sqlite3.OperationalError:
conn.close()
time.sleep(1)
continue
else:
break
return
def estimate_bootstrapped_scores(DB, submission_queue_id):
conn = sqlite3.connect(DB)
c = conn.cursor()
c.execute('''
CREATE TABLE IF NOT EXISTS scores (
factor text,
sample text,
principalId int,
submission_date text,
submission_fname text,
bootstrap_index int,
recall_at_10_fdr real,
recall_at_50_fdr real,
auPRC real,
auROC real,
rank int
);''')
c.close()
conn.commit()
conn.close()
submission_args = []
for (factor, sample), factor_submissions in iter_grpd_submissions(
submission_queue_id):
for principalId, submissions in factor_submissions.iteritems():
submissions.sort(reverse=True)
for (submission_index,
(submission_date, submission_fname)) in enumerate(submissions):
# skip old submissions
if submission_index > 0: continue
conn = sqlite3.connect(DB)
c = conn.cursor()
c.execute(
"SELECT * FROM scores WHERE factor=? AND sample=? AND principalId=? AND submission_date=?",
(
factor,
sample,
principalId,
submission_date
)
)
res = c.fetchall()
c.close()
conn.close()
if len(res) == 0:
submission_args.append([
DB,
factor, sample,
principalId, submission_date, submission_fname
])
run_in_parallel(NTHREADS, calc_and_insert_new_results, submission_args)
return
def calc_combined_ranks(records):
# make sure that the principalId is unique
principal_ids = [x.principalId for x in records]
submission_ids = [x.submission_fname.split(".")[1] for x in records]
attrs_to_rank = ['recall_at_10_fdr', 'recall_at_50_fdr', 'auPRC', 'auROC']
scores = np.zeros(len(principal_ids), dtype=float)
for user_i, attr in enumerate(attrs_to_rank):
attr_scores = np.array([getattr(x, attr) for x in records])
ranks = rankdata(-attr_scores, "average")
pval_scores = np.log(ranks/float(len(ranks) + 1))
scores += pval_scores
ranks = rankdata(scores, "average")
return dict(zip(zip(principal_ids, submission_ids), ranks))
def filter_older_submissions(submissions):
"""Choose the most recent submission for each user.
"""
filtered_submissions = {}
for submission in submissions:
if (submission.principalId not in filtered_submissions
or (filtered_submissions[submission.principalId].submission_date
< submission.submission_date)
):
filtered_submissions[submission.principalId] = submission
return filtered_submissions.values()
def get_name(principalId):
try:
res = syn.restGET('/team/{id}'.format(id=principalId))
return res['name']
except:
profile = syn.getUserProfile(principalId)
return profile['userName']
GlobalScore = namedtuple('GlobalScore', ['principalId', 'name', 'score_lb', 'score_mean', 'score_ub', 'rank'])
def calculate_ranks_from_DB(DB):
conn = sqlite3.connect(DB)
conn.row_factory = score_record_factory
c = conn.cursor()
c.execute("SELECT * FROM scores ORDER BY bootstrap_index, principalId;")
sample_grpd_results = defaultdict(lambda: defaultdict(list))
all_users = set()
for x in c.fetchall():
sample_key = (x.sample, x.factor)
sample_grpd_results[(x.sample, x.factor)][x.bootstrap_index].append(x)
all_users.add(x.principalId)
# group all submissions by tf name and sample
rv = {}
global_scores = defaultdict(lambda: defaultdict(list))
for (tf_name, sample), bootstrapped_submissions in sample_grpd_results.iteritems():
user_ranks = defaultdict(list)
for index, submissions in bootstrapped_submissions.iteritems():
submissions = filter_older_submissions(submissions)
ranks = calc_combined_ranks(submissions)
obs_users = set(x[0] for x in ranks.keys())
for (principalId, submission_id), rank in ranks.iteritems():
user_ranks[(principalId, submission_id)].append(rank)
global_scores[index][principalId].append(
min(0.5, rank/(len(ranks)+1))
)
for principalId in all_users - obs_users:
global_scores[index][principalId].append(0.5)
print tf_name, sample
for (principalId, submission_id), ranks in sorted(
user_ranks.iteritems(), key=lambda x: sorted(x[1])[1]):
print principalId, get_name(principalId), submission_id, sorted(ranks)[1]
assert submission_id not in rv
rv[int(submission_id)] = sorted(ranks)[1]
print
# group the scores by user
user_grpd_global_scores = defaultdict(list)
user_grpd_global_ranks = defaultdict(list)
for bootstrap_index, bootstrap_global_scores in global_scores.iteritems():
sorted_scores = sorted(
bootstrap_global_scores.iteritems(), key=lambda x: sum(x[1]))
ranks = rankdata([sum(x[1]) for x in sorted_scores])
for (principalId, scores), rank in zip(sorted_scores, ranks):
user_grpd_global_scores[principalId].append(sum(scores)/float(len(scores)))
user_grpd_global_ranks[principalId].append(rank)
global_data = []
for principalId, scores in sorted(
user_grpd_global_scores.iteritems(), key=lambda x: sum(x[1])):
global_data.append(GlobalScore(*[
principalId, get_name(principalId),
min(scores), sum(scores)/len(scores), max(scores),
sorted(user_grpd_global_ranks[principalId])[1]
]))
global_data = sorted(global_data, key=lambda x: (x.rank, x.score_mean))
for x in global_data: print x
return rv, global_data
def update_global_scores_table(global_data):
import challenge_config as config
from synapseclient import Schema, Column, Table, Row, RowSet, as_table_columns
# 'principalId', 'name', 'score_lb', 'score_mean', 'score_ub', 'rank'
cols = [
Column(name='UserID', columnType='STRING', maximumSize=100),
Column(name='Name', columnType='STRING', maximumSize=100),
Column(name='score_lb', columnType='DOUBLE'),
Column(name='score_mean', columnType='DOUBLE'),
Column(name='score_ub', columnType='DOUBLE'),
Column(name='rank', columnType='DOUBLE'),
]
schema = Schema(name='Global Scores', columns=cols, parent=config.CHALLENGE_SYN_ID)
results = syn.tableQuery("select * from {}".format('syn7237020'))
if len(results) > 0:
a = syn.delete(results.asRowSet())
table = syn.store(Table(schema, global_data))
results = syn.tableQuery("select * from {}".format(table.tableId))
for row in results:
print row
return
def update_ranks(evaluation, dry_run=False):
if type(evaluation) != Evaluation:
evaluation = syn.getEvaluation(evaluation)
ranks, global_data = calculate_ranks_from_DB(DB)
for submission, status in syn.getSubmissionBundles(evaluation, status='SCORED'):
submission_id = int(submission['id'])
current_annotations = synapseclient.annotations.from_submission_status_annotations(
status["annotations"])
rank = ranks[submission_id] if submission_id in ranks else -1
print submission_id, rank
current_annotations['rank'] = rank
status.annotations = synapseclient.annotations.to_submission_status_annotations(
current_annotations, is_private=False)
status = syn.store(status)
# update the global data table
update_global_scores_table(global_data)
def calc_bootstrapped_scores(labels, scores):
from sklearn.cross_validation import StratifiedKFold
for i, (indices, _) in enumerate(
StratifiedKFold(labels, n_folds=10, random_state=0)):
results = ClassificationResult(
labels[indices], scores[indices].round(), scores[indices])
yield i, results
return
if __name__ == '__main__':
SUBMISSION_QUEUE_ID = 7071644
estimate_bootstrapped_scores(DB, SUBMISSION_QUEUE_ID)
update_ranks(SUBMISSION_QUEUE_ID)
| apache-2.0 |
0asa/scikit-learn | sklearn/decomposition/tests/test_fastica.py | 30 | 7560 | """
Test the fastica algorithm.
"""
import itertools
import numpy as np
from scipy import stats
from nose.tools import assert_raises
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_less
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_warns
from sklearn.decomposition import FastICA, fastica, PCA
from sklearn.decomposition.fastica_ import _gs_decorrelation
from sklearn.externals.six import moves
def center_and_norm(x, axis=-1):
""" Centers and norms x **in place**
Parameters
-----------
x: ndarray
Array with an axis of observations (statistical units) measured on
random variables.
axis: int, optional
Axis along which the mean and variance are calculated.
"""
x = np.rollaxis(x, axis)
x -= x.mean(axis=0)
x /= x.std(axis=0)
def test_gs():
"""
Test gram schmidt orthonormalization
"""
# generate a random orthogonal matrix
rng = np.random.RandomState(0)
W, _, _ = np.linalg.svd(rng.randn(10, 10))
w = rng.randn(10)
_gs_decorrelation(w, W, 10)
assert_less((w ** 2).sum(), 1.e-10)
w = rng.randn(10)
u = _gs_decorrelation(w, W, 5)
tmp = np.dot(u, W.T)
assert_less((tmp[:5] ** 2).sum(), 1.e-10)
def test_fastica_simple(add_noise=False):
""" Test the FastICA algorithm on very simple data.
"""
rng = np.random.RandomState(0)
# scipy.stats uses the global RNG:
np.random.seed(0)
n_samples = 1000
# Generate two sources:
s1 = (2 * np.sin(np.linspace(0, 100, n_samples)) > 0) - 1
s2 = stats.t.rvs(1, size=n_samples)
s = np.c_[s1, s2].T
center_and_norm(s)
s1, s2 = s
# Mixing angle
phi = 0.6
mixing = np.array([[np.cos(phi), np.sin(phi)],
[np.sin(phi), -np.cos(phi)]])
m = np.dot(mixing, s)
if add_noise:
m += 0.1 * rng.randn(2, 1000)
center_and_norm(m)
# function as fun arg
def g_test(x):
return x ** 3, (3 * x ** 2).mean(axis=-1)
algos = ['parallel', 'deflation']
nls = ['logcosh', 'exp', 'cube', g_test]
whitening = [True, False]
for algo, nl, whiten in itertools.product(algos, nls, whitening):
if whiten:
k_, mixing_, s_ = fastica(m.T, fun=nl, algorithm=algo)
assert_raises(ValueError, fastica, m.T, fun=np.tanh,
algorithm=algo)
else:
X = PCA(n_components=2, whiten=True).fit_transform(m.T)
k_, mixing_, s_ = fastica(X, fun=nl, algorithm=algo, whiten=False)
assert_raises(ValueError, fastica, X, fun=np.tanh,
algorithm=algo)
s_ = s_.T
# Check that the mixing model described in the docstring holds:
if whiten:
assert_almost_equal(s_, np.dot(np.dot(mixing_, k_), m))
center_and_norm(s_)
s1_, s2_ = s_
# Check to see if the sources have been estimated
# in the wrong order
if abs(np.dot(s1_, s2)) > abs(np.dot(s1_, s1)):
s2_, s1_ = s_
s1_ *= np.sign(np.dot(s1_, s1))
s2_ *= np.sign(np.dot(s2_, s2))
# Check that we have estimated the original sources
if not add_noise:
assert_almost_equal(np.dot(s1_, s1) / n_samples, 1, decimal=2)
assert_almost_equal(np.dot(s2_, s2) / n_samples, 1, decimal=2)
else:
assert_almost_equal(np.dot(s1_, s1) / n_samples, 1, decimal=1)
assert_almost_equal(np.dot(s2_, s2) / n_samples, 1, decimal=1)
# Test FastICA class
_, _, sources_fun = fastica(m.T, fun=nl, algorithm=algo, random_state=0)
ica = FastICA(fun=nl, algorithm=algo, random_state=0)
sources = ica.fit_transform(m.T)
assert_equal(ica.components_.shape, (2, 2))
assert_equal(sources.shape, (1000, 2))
assert_array_almost_equal(sources_fun, sources)
assert_array_almost_equal(sources, ica.transform(m.T))
assert_equal(ica.mixing_.shape, (2, 2))
for fn in [np.tanh, "exp(-.5(x^2))"]:
ica = FastICA(fun=fn, algorithm=algo, random_state=0)
assert_raises(ValueError, ica.fit, m.T)
assert_raises(TypeError, FastICA(fun=moves.xrange(10)).fit, m.T)
def test_fastica_nowhiten():
m = [[0, 1], [1, 0]]
# test for issue #697
ica = FastICA(n_components=1, whiten=False, random_state=0)
assert_warns(UserWarning, ica.fit, m)
assert_true(hasattr(ica, 'mixing_'))
def test_non_square_fastica(add_noise=False):
""" Test the FastICA algorithm on very simple data.
"""
rng = np.random.RandomState(0)
n_samples = 1000
# Generate two sources:
t = np.linspace(0, 100, n_samples)
s1 = np.sin(t)
s2 = np.ceil(np.sin(np.pi * t))
s = np.c_[s1, s2].T
center_and_norm(s)
s1, s2 = s
# Mixing matrix
mixing = rng.randn(6, 2)
m = np.dot(mixing, s)
if add_noise:
m += 0.1 * rng.randn(6, n_samples)
center_and_norm(m)
k_, mixing_, s_ = fastica(m.T, n_components=2, random_state=rng)
s_ = s_.T
# Check that the mixing model described in the docstring holds:
assert_almost_equal(s_, np.dot(np.dot(mixing_, k_), m))
center_and_norm(s_)
s1_, s2_ = s_
# Check to see if the sources have been estimated
# in the wrong order
if abs(np.dot(s1_, s2)) > abs(np.dot(s1_, s1)):
s2_, s1_ = s_
s1_ *= np.sign(np.dot(s1_, s1))
s2_ *= np.sign(np.dot(s2_, s2))
# Check that we have estimated the original sources
if not add_noise:
assert_almost_equal(np.dot(s1_, s1) / n_samples, 1, decimal=3)
assert_almost_equal(np.dot(s2_, s2) / n_samples, 1, decimal=3)
def test_fit_transform():
"""Test FastICA.fit_transform"""
rng = np.random.RandomState(0)
X = rng.random_sample((100, 10))
for whiten, n_components in [[True, 5], [False, 10]]:
ica = FastICA(n_components=n_components, whiten=whiten, random_state=0)
Xt = ica.fit_transform(X)
assert_equal(ica.components_.shape, (n_components, 10))
assert_equal(Xt.shape, (100, n_components))
ica = FastICA(n_components=n_components, whiten=whiten, random_state=0)
ica.fit(X)
assert_equal(ica.components_.shape, (n_components, 10))
Xt2 = ica.transform(X)
assert_array_almost_equal(Xt, Xt2)
def test_inverse_transform():
"""Test FastICA.inverse_transform"""
n_features = 10
n_samples = 100
n1, n2 = 5, 10
rng = np.random.RandomState(0)
X = rng.random_sample((n_samples, n_features))
expected = {(True, n1): (n_features, n1),
(True, n2): (n_features, n2),
(False, n1): (n_features, n2),
(False, n2): (n_features, n2)}
for whiten in [True, False]:
for n_components in [n1, n2]:
ica = FastICA(n_components=n_components, random_state=rng,
whiten=whiten)
Xt = ica.fit_transform(X)
expected_shape = expected[(whiten, n_components)]
assert_equal(ica.mixing_.shape, expected_shape)
X2 = ica.inverse_transform(Xt)
assert_equal(X.shape, X2.shape)
# reversibility test in non-reduction case
if n_components == X.shape[1]:
assert_array_almost_equal(X, X2)
if __name__ == '__main__':
import nose
nose.run(argv=['', __file__])
| bsd-3-clause |
liberatorqjw/scikit-learn | sklearn/cluster/tests/test_affinity_propagation.py | 31 | 2633 | """
Testing for Clustering methods
"""
import numpy as np
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_raises
from sklearn.cluster.affinity_propagation_ import AffinityPropagation
from sklearn.cluster.affinity_propagation_ import affinity_propagation
from sklearn.datasets.samples_generator import make_blobs
from sklearn.metrics import euclidean_distances
n_clusters = 3
centers = np.array([[1, 1], [-1, -1], [1, -1]]) + 10
X, _ = make_blobs(n_samples=60, n_features=2, centers=centers,
cluster_std=0.4, shuffle=True, random_state=0)
def test_affinity_propagation():
"""Affinity Propagation algorithm """
# Compute similarities
S = -euclidean_distances(X, squared=True)
preference = np.median(S) * 10
# Compute Affinity Propagation
cluster_centers_indices, labels = affinity_propagation(
S, preference=preference)
n_clusters_ = len(cluster_centers_indices)
assert_equal(n_clusters, n_clusters_)
af = AffinityPropagation(preference=preference, affinity="precomputed")
labels_precomputed = af.fit(S).labels_
af = AffinityPropagation(preference=preference, verbose=True)
labels = af.fit(X).labels_
assert_array_equal(labels, labels_precomputed)
cluster_centers_indices = af.cluster_centers_indices_
n_clusters_ = len(cluster_centers_indices)
assert_equal(np.unique(labels).size, n_clusters_)
assert_equal(n_clusters, n_clusters_)
# Test also with no copy
_, labels_no_copy = affinity_propagation(S, preference=preference,
copy=False)
assert_array_equal(labels, labels_no_copy)
# Test input validation
assert_raises(ValueError, affinity_propagation, S[:, :-1])
assert_raises(ValueError, affinity_propagation, S, damping=0)
af = AffinityPropagation(affinity="unknown")
assert_raises(ValueError, af.fit, X)
def test_affinity_propagation_predict():
"""Test AffinityPropagation.predict"""
af = AffinityPropagation(affinity="euclidean")
labels = af.fit_predict(X)
labels2 = af.predict(X)
assert_array_equal(labels, labels2)
def test_affinity_propagation_predict_error():
"""Test exception in AffinityPropagation.predict"""
# Not fitted.
af = AffinityPropagation(affinity="euclidean")
assert_raises(ValueError, af.predict, X)
# Predict not supported when affinity="precomputed".
S = np.dot(X, X.T)
af = AffinityPropagation(affinity="precomputed")
af.fit(S)
assert_raises(ValueError, af.predict, X)
| bsd-3-clause |
glouppe/scikit-learn | sklearn/tree/tree.py | 5 | 40442 | """
This module gathers tree-based methods, including decision, regression and
randomized trees. Single and multi-output problems are both handled.
"""
# Authors: Gilles Louppe <[email protected]>
# Peter Prettenhofer <[email protected]>
# Brian Holt <[email protected]>
# Noel Dawe <[email protected]>
# Satrajit Gosh <[email protected]>
# Joly Arnaud <[email protected]>
# Fares Hedayati <[email protected]>
#
# Licence: BSD 3 clause
from __future__ import division
import numbers
from abc import ABCMeta
from abc import abstractmethod
from math import ceil
import numpy as np
from scipy.sparse import issparse
from ..base import BaseEstimator
from ..base import ClassifierMixin
from ..base import RegressorMixin
from ..externals import six
from ..feature_selection.from_model import _LearntSelectorMixin
from ..utils import check_array
from ..utils import check_random_state
from ..utils import compute_sample_weight
from ..utils.multiclass import check_classification_targets
from ..exceptions import NotFittedError
from ._criterion import Criterion
from ._splitter import Splitter
from ._tree import DepthFirstTreeBuilder
from ._tree import BestFirstTreeBuilder
from ._tree import Tree
from . import _tree, _splitter, _criterion
__all__ = ["DecisionTreeClassifier",
"DecisionTreeRegressor",
"ExtraTreeClassifier",
"ExtraTreeRegressor"]
# =============================================================================
# Types and constants
# =============================================================================
DTYPE = _tree.DTYPE
DOUBLE = _tree.DOUBLE
CRITERIA_CLF = {"gini": _criterion.Gini, "entropy": _criterion.Entropy}
CRITERIA_REG = {"mse": _criterion.MSE, "friedman_mse": _criterion.FriedmanMSE}
DENSE_SPLITTERS = {"best": _splitter.BestSplitter,
"random": _splitter.RandomSplitter}
SPARSE_SPLITTERS = {"best": _splitter.BestSparseSplitter,
"random": _splitter.RandomSparseSplitter}
# =============================================================================
# Base decision tree
# =============================================================================
class BaseDecisionTree(six.with_metaclass(ABCMeta, BaseEstimator,
_LearntSelectorMixin)):
"""Base class for decision trees.
Warning: This class should not be used directly.
Use derived classes instead.
"""
@abstractmethod
def __init__(self,
criterion,
splitter,
max_depth,
min_samples_split,
min_samples_leaf,
min_weight_fraction_leaf,
max_features,
max_leaf_nodes,
random_state,
class_weight=None,
presort=False):
self.criterion = criterion
self.splitter = splitter
self.max_depth = max_depth
self.min_samples_split = min_samples_split
self.min_samples_leaf = min_samples_leaf
self.min_weight_fraction_leaf = min_weight_fraction_leaf
self.max_features = max_features
self.random_state = random_state
self.max_leaf_nodes = max_leaf_nodes
self.class_weight = class_weight
self.presort = presort
self.n_features_ = None
self.n_outputs_ = None
self.classes_ = None
self.n_classes_ = None
self.tree_ = None
self.max_features_ = None
def fit(self, X, y, sample_weight=None, check_input=True,
X_idx_sorted=None):
"""Build a decision tree from the training set (X, y).
Parameters
----------
X : array-like or sparse matrix, shape = [n_samples, n_features]
The training input samples. Internally, it will be converted to
``dtype=np.float32`` and if a sparse matrix is provided
to a sparse ``csc_matrix``.
y : array-like, shape = [n_samples] or [n_samples, n_outputs]
The target values (class labels in classification, real numbers in
regression). In the regression case, use ``dtype=np.float64`` and
``order='C'`` for maximum efficiency.
sample_weight : array-like, shape = [n_samples] or None
Sample weights. If None, then samples are equally weighted. Splits
that would create child nodes with net zero or negative weight are
ignored while searching for a split in each node. In the case of
classification, splits are also ignored if they would result in any
single class carrying a negative weight in either child node.
check_input : boolean, (default=True)
Allow to bypass several input checking.
Don't use this parameter unless you know what you do.
X_idx_sorted : array-like, shape = [n_samples, n_features], optional
The indexes of the sorted training input samples. If many tree
are grown on the same dataset, this allows the ordering to be
cached between trees. If None, the data will be sorted here.
Don't use this parameter unless you know what to do.
Returns
-------
self : object
Returns self.
"""
random_state = check_random_state(self.random_state)
if check_input:
X = check_array(X, dtype=DTYPE, accept_sparse="csc")
y = check_array(y, ensure_2d=False, dtype=None)
if issparse(X):
X.sort_indices()
if X.indices.dtype != np.intc or X.indptr.dtype != np.intc:
raise ValueError("No support for np.int64 index based "
"sparse matrices")
# Determine output settings
n_samples, self.n_features_ = X.shape
is_classification = isinstance(self, ClassifierMixin)
y = np.atleast_1d(y)
expanded_class_weight = None
if y.ndim == 1:
# reshape is necessary to preserve the data contiguity against vs
# [:, np.newaxis] that does not.
y = np.reshape(y, (-1, 1))
self.n_outputs_ = y.shape[1]
if is_classification:
check_classification_targets(y)
y = np.copy(y)
self.classes_ = []
self.n_classes_ = []
if self.class_weight is not None:
y_original = np.copy(y)
y_encoded = np.zeros(y.shape, dtype=np.int)
for k in range(self.n_outputs_):
classes_k, y_encoded[:, k] = np.unique(y[:, k],
return_inverse=True)
self.classes_.append(classes_k)
self.n_classes_.append(classes_k.shape[0])
y = y_encoded
if self.class_weight is not None:
expanded_class_weight = compute_sample_weight(
self.class_weight, y_original)
else:
self.classes_ = [None] * self.n_outputs_
self.n_classes_ = [1] * self.n_outputs_
self.n_classes_ = np.array(self.n_classes_, dtype=np.intp)
if getattr(y, "dtype", None) != DOUBLE or not y.flags.contiguous:
y = np.ascontiguousarray(y, dtype=DOUBLE)
# Check parameters
max_depth = ((2 ** 31) - 1 if self.max_depth is None
else self.max_depth)
max_leaf_nodes = (-1 if self.max_leaf_nodes is None
else self.max_leaf_nodes)
if isinstance(self.min_samples_leaf, (numbers.Integral, np.integer)):
min_samples_leaf = self.min_samples_leaf
else: # float
min_samples_leaf = int(ceil(self.min_samples_leaf * n_samples))
if isinstance(self.min_samples_split, (numbers.Integral, np.integer)):
min_samples_split = self.min_samples_split
else: # float
min_samples_split = int(ceil(self.min_samples_split * n_samples))
min_samples_split = max(2, min_samples_split)
min_samples_split = max(min_samples_split, 2 * min_samples_leaf)
if isinstance(self.max_features, six.string_types):
if self.max_features == "auto":
if is_classification:
max_features = max(1, int(np.sqrt(self.n_features_)))
else:
max_features = self.n_features_
elif self.max_features == "sqrt":
max_features = max(1, int(np.sqrt(self.n_features_)))
elif self.max_features == "log2":
max_features = max(1, int(np.log2(self.n_features_)))
else:
raise ValueError(
'Invalid value for max_features. Allowed string '
'values are "auto", "sqrt" or "log2".')
elif self.max_features is None:
max_features = self.n_features_
elif isinstance(self.max_features, (numbers.Integral, np.integer)):
max_features = self.max_features
else: # float
if self.max_features > 0.0:
max_features = max(1,
int(self.max_features * self.n_features_))
else:
max_features = 0
self.max_features_ = max_features
if len(y) != n_samples:
raise ValueError("Number of labels=%d does not match "
"number of samples=%d" % (len(y), n_samples))
if not (0. < self.min_samples_split <= 1. or
2 <= self.min_samples_split):
raise ValueError("min_samples_split must be in at least 2"
" or in (0, 1], got %s" % min_samples_split)
if not (0. < self.min_samples_leaf <= 0.5 or
1 <= self.min_samples_leaf):
raise ValueError("min_samples_leaf must be at least than 1 "
"or in (0, 0.5], got %s" % min_samples_leaf)
if not 0 <= self.min_weight_fraction_leaf <= 0.5:
raise ValueError("min_weight_fraction_leaf must in [0, 0.5]")
if max_depth <= 0:
raise ValueError("max_depth must be greater than zero. ")
if not (0 < max_features <= self.n_features_):
raise ValueError("max_features must be in (0, n_features]")
if not isinstance(max_leaf_nodes, (numbers.Integral, np.integer)):
raise ValueError("max_leaf_nodes must be integral number but was "
"%r" % max_leaf_nodes)
if -1 < max_leaf_nodes < 2:
raise ValueError(("max_leaf_nodes {0} must be either smaller than "
"0 or larger than 1").format(max_leaf_nodes))
if sample_weight is not None:
if (getattr(sample_weight, "dtype", None) != DOUBLE or
not sample_weight.flags.contiguous):
sample_weight = np.ascontiguousarray(
sample_weight, dtype=DOUBLE)
if len(sample_weight.shape) > 1:
raise ValueError("Sample weights array has more "
"than one dimension: %d" %
len(sample_weight.shape))
if len(sample_weight) != n_samples:
raise ValueError("Number of weights=%d does not match "
"number of samples=%d" %
(len(sample_weight), n_samples))
if expanded_class_weight is not None:
if sample_weight is not None:
sample_weight = sample_weight * expanded_class_weight
else:
sample_weight = expanded_class_weight
# Set min_weight_leaf from min_weight_fraction_leaf
if self.min_weight_fraction_leaf != 0. and sample_weight is not None:
min_weight_leaf = (self.min_weight_fraction_leaf *
np.sum(sample_weight))
else:
min_weight_leaf = 0.
presort = self.presort
# Allow presort to be 'auto', which means True if the dataset is dense,
# otherwise it will be False.
if self.presort == 'auto' and issparse(X):
presort = False
elif self.presort == 'auto':
presort = True
if presort == True and issparse(X):
raise ValueError("Presorting is not supported for sparse "
"matrices.")
# If multiple trees are built on the same dataset, we only want to
# presort once. Splitters now can accept presorted indices if desired,
# but do not handle any presorting themselves. Ensemble algorithms
# which desire presorting must do presorting themselves and pass that
# matrix into each tree.
if X_idx_sorted is None and presort:
X_idx_sorted = np.asfortranarray(np.argsort(X, axis=0),
dtype=np.int32)
if presort and X_idx_sorted.shape != X.shape:
raise ValueError("The shape of X (X.shape = {}) doesn't match "
"the shape of X_idx_sorted (X_idx_sorted"
".shape = {})".format(X.shape,
X_idx_sorted.shape))
# Build tree
criterion = self.criterion
if not isinstance(criterion, Criterion):
if is_classification:
criterion = CRITERIA_CLF[self.criterion](self.n_outputs_,
self.n_classes_)
else:
criterion = CRITERIA_REG[self.criterion](self.n_outputs_)
SPLITTERS = SPARSE_SPLITTERS if issparse(X) else DENSE_SPLITTERS
splitter = self.splitter
if not isinstance(self.splitter, Splitter):
splitter = SPLITTERS[self.splitter](criterion,
self.max_features_,
min_samples_leaf,
min_weight_leaf,
random_state,
self.presort)
self.tree_ = Tree(self.n_features_, self.n_classes_, self.n_outputs_)
# Use BestFirst if max_leaf_nodes given; use DepthFirst otherwise
if max_leaf_nodes < 0:
builder = DepthFirstTreeBuilder(splitter, min_samples_split,
min_samples_leaf,
min_weight_leaf,
max_depth)
else:
builder = BestFirstTreeBuilder(splitter, min_samples_split,
min_samples_leaf,
min_weight_leaf,
max_depth,
max_leaf_nodes)
builder.build(self.tree_, X, y, sample_weight, X_idx_sorted)
if self.n_outputs_ == 1:
self.n_classes_ = self.n_classes_[0]
self.classes_ = self.classes_[0]
return self
def _validate_X_predict(self, X, check_input):
"""Validate X whenever one tries to predict, apply, predict_proba"""
if self.tree_ is None:
raise NotFittedError("Estimator not fitted, "
"call `fit` before exploiting the model.")
if check_input:
X = check_array(X, dtype=DTYPE, accept_sparse="csr")
if issparse(X) and (X.indices.dtype != np.intc or
X.indptr.dtype != np.intc):
raise ValueError("No support for np.int64 index based "
"sparse matrices")
n_features = X.shape[1]
if self.n_features_ != n_features:
raise ValueError("Number of features of the model must "
"match the input. Model n_features is %s and "
"input n_features is %s "
% (self.n_features_, n_features))
return X
def predict(self, X, check_input=True):
"""Predict class or regression value for X.
For a classification model, the predicted class for each sample in X is
returned. For a regression model, the predicted value based on X is
returned.
Parameters
----------
X : array-like or sparse matrix of shape = [n_samples, n_features]
The input samples. Internally, it will be converted to
``dtype=np.float32`` and if a sparse matrix is provided
to a sparse ``csr_matrix``.
check_input : boolean, (default=True)
Allow to bypass several input checking.
Don't use this parameter unless you know what you do.
Returns
-------
y : array of shape = [n_samples] or [n_samples, n_outputs]
The predicted classes, or the predict values.
"""
X = self._validate_X_predict(X, check_input)
proba = self.tree_.predict(X)
n_samples = X.shape[0]
# Classification
if isinstance(self, ClassifierMixin):
if self.n_outputs_ == 1:
return self.classes_.take(np.argmax(proba, axis=1), axis=0)
else:
predictions = np.zeros((n_samples, self.n_outputs_))
for k in range(self.n_outputs_):
predictions[:, k] = self.classes_[k].take(
np.argmax(proba[:, k], axis=1),
axis=0)
return predictions
# Regression
else:
if self.n_outputs_ == 1:
return proba[:, 0]
else:
return proba[:, :, 0]
def apply(self, X, check_input=True):
"""
Returns the index of the leaf that each sample is predicted as.
.. versionadded:: 0.17
Parameters
----------
X : array_like or sparse matrix, shape = [n_samples, n_features]
The input samples. Internally, it will be converted to
``dtype=np.float32`` and if a sparse matrix is provided
to a sparse ``csr_matrix``.
check_input : boolean, (default=True)
Allow to bypass several input checking.
Don't use this parameter unless you know what you do.
Returns
-------
X_leaves : array_like, shape = [n_samples,]
For each datapoint x in X, return the index of the leaf x
ends up in. Leaves are numbered within
``[0; self.tree_.node_count)``, possibly with gaps in the
numbering.
"""
X = self._validate_X_predict(X, check_input)
return self.tree_.apply(X)
def decision_path(self, X, check_input=True):
"""Return the decision path in the tree
Parameters
----------
X : array_like or sparse matrix, shape = [n_samples, n_features]
The input samples. Internally, it will be converted to
``dtype=np.float32`` and if a sparse matrix is provided
to a sparse ``csr_matrix``.
check_input : boolean, (default=True)
Allow to bypass several input checking.
Don't use this parameter unless you know what you do.
Returns
-------
indicator : sparse csr array, shape = [n_samples, n_nodes]
Return a node indicator matrix where non zero elements
indicates that the samples goes through the nodes.
"""
X = self._validate_X_predict(X, check_input)
return self.tree_.decision_path(X)
@property
def feature_importances_(self):
"""Return the feature importances.
The importance of a feature is computed as the (normalized) total
reduction of the criterion brought by that feature.
It is also known as the Gini importance.
Returns
-------
feature_importances_ : array, shape = [n_features]
"""
if self.tree_ is None:
raise NotFittedError("Estimator not fitted, call `fit` before"
" `feature_importances_`.")
return self.tree_.compute_feature_importances()
# =============================================================================
# Public estimators
# =============================================================================
class DecisionTreeClassifier(BaseDecisionTree, ClassifierMixin):
"""A decision tree classifier.
Read more in the :ref:`User Guide <tree>`.
Parameters
----------
criterion : string, optional (default="gini")
The function to measure the quality of a split. Supported criteria are
"gini" for the Gini impurity and "entropy" for the information gain.
splitter : string, optional (default="best")
The strategy used to choose the split at each node. Supported
strategies are "best" to choose the best split and "random" to choose
the best random split.
max_features : int, float, string or None, optional (default=None)
The number of features to consider when looking for the best split:
- If int, then consider `max_features` features at each split.
- If float, then `max_features` is a percentage and
`int(max_features * n_features)` features are considered at each
split.
- If "auto", then `max_features=sqrt(n_features)`.
- If "sqrt", then `max_features=sqrt(n_features)`.
- If "log2", then `max_features=log2(n_features)`.
- If None, then `max_features=n_features`.
Note: the search for a split does not stop until at least one
valid partition of the node samples is found, even if it requires to
effectively inspect more than ``max_features`` features.
max_depth : int or None, optional (default=None)
The maximum depth of the tree. If None, then nodes are expanded until
all leaves are pure or until all leaves contain less than
min_samples_split samples.
Ignored if ``max_leaf_nodes`` is not None.
min_samples_split : int, float, optional (default=2)
The minimum number of samples required to split an internal node:
- If int, then consider `min_samples_split` as the minimum number.
- If float, then `min_samples_split` is a percentage and
`ceil(min_samples_split * n_samples)` are the minimum
number of samples for each split.
min_samples_leaf : int, float, optional (default=1)
The minimum number of samples required to be at a leaf node:
- If int, then consider `min_samples_leaf` as the minimum number.
- If float, then `min_samples_leaf` is a percentage and
`ceil(min_samples_leaf * n_samples)` are the minimum
number of samples for each node.
min_weight_fraction_leaf : float, optional (default=0.)
The minimum weighted fraction of the input samples required to be at a
leaf node.
max_leaf_nodes : int or None, optional (default=None)
Grow a tree with ``max_leaf_nodes`` in best-first fashion.
Best nodes are defined as relative reduction in impurity.
If None then unlimited number of leaf nodes.
If not None then ``max_depth`` will be ignored.
class_weight : dict, list of dicts, "balanced" or None, optional
(default=None)
Weights associated with classes in the form ``{class_label: weight}``.
If not given, all classes are supposed to have weight one. For
multi-output problems, a list of dicts can be provided in the same
order as the columns of y.
The "balanced" mode uses the values of y to automatically adjust
weights inversely proportional to class frequencies in the input data
as ``n_samples / (n_classes * np.bincount(y))``
For multi-output, the weights of each column of y will be multiplied.
Note that these weights will be multiplied with sample_weight (passed
through the fit method) if sample_weight is specified.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
presort : bool, optional (default=False)
Whether to presort the data to speed up the finding of best splits in
fitting. For the default settings of a decision tree on large
datasets, setting this to true may slow down the training process.
When using either a smaller dataset or a restricted depth, this may
speed up the training.
Attributes
----------
classes_ : array of shape = [n_classes] or a list of such arrays
The classes labels (single output problem),
or a list of arrays of class labels (multi-output problem).
feature_importances_ : array of shape = [n_features]
The feature importances. The higher, the more important the
feature. The importance of a feature is computed as the (normalized)
total reduction of the criterion brought by that feature. It is also
known as the Gini importance [4]_.
max_features_ : int,
The inferred value of max_features.
n_classes_ : int or list
The number of classes (for single output problems),
or a list containing the number of classes for each
output (for multi-output problems).
n_features_ : int
The number of features when ``fit`` is performed.
n_outputs_ : int
The number of outputs when ``fit`` is performed.
tree_ : Tree object
The underlying Tree object.
See also
--------
DecisionTreeRegressor
References
----------
.. [1] http://en.wikipedia.org/wiki/Decision_tree_learning
.. [2] L. Breiman, J. Friedman, R. Olshen, and C. Stone, "Classification
and Regression Trees", Wadsworth, Belmont, CA, 1984.
.. [3] T. Hastie, R. Tibshirani and J. Friedman. "Elements of Statistical
Learning", Springer, 2009.
.. [4] L. Breiman, and A. Cutler, "Random Forests",
http://www.stat.berkeley.edu/~breiman/RandomForests/cc_home.htm
Examples
--------
>>> from sklearn.datasets import load_iris
>>> from sklearn.model_selection import cross_val_score
>>> from sklearn.tree import DecisionTreeClassifier
>>> clf = DecisionTreeClassifier(random_state=0)
>>> iris = load_iris()
>>> cross_val_score(clf, iris.data, iris.target, cv=10)
... # doctest: +SKIP
...
array([ 1. , 0.93..., 0.86..., 0.93..., 0.93...,
0.93..., 0.93..., 1. , 0.93..., 1. ])
"""
def __init__(self,
criterion="gini",
splitter="best",
max_depth=None,
min_samples_split=2,
min_samples_leaf=1,
min_weight_fraction_leaf=0.,
max_features=None,
random_state=None,
max_leaf_nodes=None,
class_weight=None,
presort=False):
super(DecisionTreeClassifier, self).__init__(
criterion=criterion,
splitter=splitter,
max_depth=max_depth,
min_samples_split=min_samples_split,
min_samples_leaf=min_samples_leaf,
min_weight_fraction_leaf=min_weight_fraction_leaf,
max_features=max_features,
max_leaf_nodes=max_leaf_nodes,
class_weight=class_weight,
random_state=random_state,
presort=presort)
def predict_proba(self, X, check_input=True):
"""Predict class probabilities of the input samples X.
The predicted class probability is the fraction of samples of the same
class in a leaf.
check_input : boolean, (default=True)
Allow to bypass several input checking.
Don't use this parameter unless you know what you do.
Parameters
----------
X : array-like or sparse matrix of shape = [n_samples, n_features]
The input samples. Internally, it will be converted to
``dtype=np.float32`` and if a sparse matrix is provided
to a sparse ``csr_matrix``.
Returns
-------
p : array of shape = [n_samples, n_classes], or a list of n_outputs
such arrays if n_outputs > 1.
The class probabilities of the input samples. The order of the
classes corresponds to that in the attribute `classes_`.
"""
X = self._validate_X_predict(X, check_input)
proba = self.tree_.predict(X)
if self.n_outputs_ == 1:
proba = proba[:, :self.n_classes_]
normalizer = proba.sum(axis=1)[:, np.newaxis]
normalizer[normalizer == 0.0] = 1.0
proba /= normalizer
return proba
else:
all_proba = []
for k in range(self.n_outputs_):
proba_k = proba[:, k, :self.n_classes_[k]]
normalizer = proba_k.sum(axis=1)[:, np.newaxis]
normalizer[normalizer == 0.0] = 1.0
proba_k /= normalizer
all_proba.append(proba_k)
return all_proba
def predict_log_proba(self, X):
"""Predict class log-probabilities of the input samples X.
Parameters
----------
X : array-like or sparse matrix of shape = [n_samples, n_features]
The input samples. Internally, it will be converted to
``dtype=np.float32`` and if a sparse matrix is provided
to a sparse ``csr_matrix``.
Returns
-------
p : array of shape = [n_samples, n_classes], or a list of n_outputs
such arrays if n_outputs > 1.
The class log-probabilities of the input samples. The order of the
classes corresponds to that in the attribute `classes_`.
"""
proba = self.predict_proba(X)
if self.n_outputs_ == 1:
return np.log(proba)
else:
for k in range(self.n_outputs_):
proba[k] = np.log(proba[k])
return proba
class DecisionTreeRegressor(BaseDecisionTree, RegressorMixin):
"""A decision tree regressor.
Read more in the :ref:`User Guide <tree>`.
Parameters
----------
criterion : string, optional (default="mse")
The function to measure the quality of a split. The only supported
criterion is "mse" for the mean squared error, which is equal to
variance reduction as feature selection criterion.
splitter : string, optional (default="best")
The strategy used to choose the split at each node. Supported
strategies are "best" to choose the best split and "random" to choose
the best random split.
max_features : int, float, string or None, optional (default=None)
The number of features to consider when looking for the best split:
- If int, then consider `max_features` features at each split.
- If float, then `max_features` is a percentage and
`int(max_features * n_features)` features are considered at each
split.
- If "auto", then `max_features=n_features`.
- If "sqrt", then `max_features=sqrt(n_features)`.
- If "log2", then `max_features=log2(n_features)`.
- If None, then `max_features=n_features`.
Note: the search for a split does not stop until at least one
valid partition of the node samples is found, even if it requires to
effectively inspect more than ``max_features`` features.
max_depth : int or None, optional (default=None)
The maximum depth of the tree. If None, then nodes are expanded until
all leaves are pure or until all leaves contain less than
min_samples_split samples.
Ignored if ``max_leaf_nodes`` is not None.
min_samples_split : int, float, optional (default=2)
The minimum number of samples required to split an internal node:
- If int, then consider `min_samples_split` as the minimum number.
- If float, then `min_samples_split` is a percentage and
`ceil(min_samples_split * n_samples)` are the minimum
number of samples for each split.
min_samples_leaf : int, float, optional (default=1)
The minimum number of samples required to be at a leaf node:
- If int, then consider `min_samples_leaf` as the minimum number.
- If float, then `min_samples_leaf` is a percentage and
`ceil(min_samples_leaf * n_samples)` are the minimum
number of samples for each node.
min_weight_fraction_leaf : float, optional (default=0.)
The minimum weighted fraction of the input samples required to be at a
leaf node.
max_leaf_nodes : int or None, optional (default=None)
Grow a tree with ``max_leaf_nodes`` in best-first fashion.
Best nodes are defined as relative reduction in impurity.
If None then unlimited number of leaf nodes.
If not None then ``max_depth`` will be ignored.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
presort : bool, optional (default=False)
Whether to presort the data to speed up the finding of best splits in
fitting. For the default settings of a decision tree on large
datasets, setting this to true may slow down the training process.
When using either a smaller dataset or a restricted depth, this may
speed up the training.
Attributes
----------
feature_importances_ : array of shape = [n_features]
The feature importances.
The higher, the more important the feature.
The importance of a feature is computed as the
(normalized) total reduction of the criterion brought
by that feature. It is also known as the Gini importance [4]_.
max_features_ : int,
The inferred value of max_features.
n_features_ : int
The number of features when ``fit`` is performed.
n_outputs_ : int
The number of outputs when ``fit`` is performed.
tree_ : Tree object
The underlying Tree object.
See also
--------
DecisionTreeClassifier
References
----------
.. [1] http://en.wikipedia.org/wiki/Decision_tree_learning
.. [2] L. Breiman, J. Friedman, R. Olshen, and C. Stone, "Classification
and Regression Trees", Wadsworth, Belmont, CA, 1984.
.. [3] T. Hastie, R. Tibshirani and J. Friedman. "Elements of Statistical
Learning", Springer, 2009.
.. [4] L. Breiman, and A. Cutler, "Random Forests",
http://www.stat.berkeley.edu/~breiman/RandomForests/cc_home.htm
Examples
--------
>>> from sklearn.datasets import load_boston
>>> from sklearn.model_selection import cross_val_score
>>> from sklearn.tree import DecisionTreeRegressor
>>> boston = load_boston()
>>> regressor = DecisionTreeRegressor(random_state=0)
>>> cross_val_score(regressor, boston.data, boston.target, cv=10)
... # doctest: +SKIP
...
array([ 0.61..., 0.57..., -0.34..., 0.41..., 0.75...,
0.07..., 0.29..., 0.33..., -1.42..., -1.77...])
"""
def __init__(self,
criterion="mse",
splitter="best",
max_depth=None,
min_samples_split=2,
min_samples_leaf=1,
min_weight_fraction_leaf=0.,
max_features=None,
random_state=None,
max_leaf_nodes=None,
presort=False):
super(DecisionTreeRegressor, self).__init__(
criterion=criterion,
splitter=splitter,
max_depth=max_depth,
min_samples_split=min_samples_split,
min_samples_leaf=min_samples_leaf,
min_weight_fraction_leaf=min_weight_fraction_leaf,
max_features=max_features,
max_leaf_nodes=max_leaf_nodes,
random_state=random_state,
presort=presort)
class ExtraTreeClassifier(DecisionTreeClassifier):
"""An extremely randomized tree classifier.
Extra-trees differ from classic decision trees in the way they are built.
When looking for the best split to separate the samples of a node into two
groups, random splits are drawn for each of the `max_features` randomly
selected features and the best split among those is chosen. When
`max_features` is set 1, this amounts to building a totally random
decision tree.
Warning: Extra-trees should only be used within ensemble methods.
Read more in the :ref:`User Guide <tree>`.
See also
--------
ExtraTreeRegressor, ExtraTreesClassifier, ExtraTreesRegressor
References
----------
.. [1] P. Geurts, D. Ernst., and L. Wehenkel, "Extremely randomized trees",
Machine Learning, 63(1), 3-42, 2006.
"""
def __init__(self,
criterion="gini",
splitter="random",
max_depth=None,
min_samples_split=2,
min_samples_leaf=1,
min_weight_fraction_leaf=0.,
max_features="auto",
random_state=None,
max_leaf_nodes=None,
class_weight=None):
super(ExtraTreeClassifier, self).__init__(
criterion=criterion,
splitter=splitter,
max_depth=max_depth,
min_samples_split=min_samples_split,
min_samples_leaf=min_samples_leaf,
min_weight_fraction_leaf=min_weight_fraction_leaf,
max_features=max_features,
max_leaf_nodes=max_leaf_nodes,
class_weight=class_weight,
random_state=random_state)
class ExtraTreeRegressor(DecisionTreeRegressor):
"""An extremely randomized tree regressor.
Extra-trees differ from classic decision trees in the way they are built.
When looking for the best split to separate the samples of a node into two
groups, random splits are drawn for each of the `max_features` randomly
selected features and the best split among those is chosen. When
`max_features` is set 1, this amounts to building a totally random
decision tree.
Warning: Extra-trees should only be used within ensemble methods.
Read more in the :ref:`User Guide <tree>`.
See also
--------
ExtraTreeClassifier, ExtraTreesClassifier, ExtraTreesRegressor
References
----------
.. [1] P. Geurts, D. Ernst., and L. Wehenkel, "Extremely randomized trees",
Machine Learning, 63(1), 3-42, 2006.
"""
def __init__(self,
criterion="mse",
splitter="random",
max_depth=None,
min_samples_split=2,
min_samples_leaf=1,
min_weight_fraction_leaf=0.,
max_features="auto",
random_state=None,
max_leaf_nodes=None):
super(ExtraTreeRegressor, self).__init__(
criterion=criterion,
splitter=splitter,
max_depth=max_depth,
min_samples_split=min_samples_split,
min_samples_leaf=min_samples_leaf,
min_weight_fraction_leaf=min_weight_fraction_leaf,
max_features=max_features,
max_leaf_nodes=max_leaf_nodes,
random_state=random_state)
| bsd-3-clause |
zoeyangyy/event-extraction | tf_test/lstm-mnist.py | 1 | 5654 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import tensorflow as tf
import numpy as np
from tensorflow.contrib import rnn
from tensorflow.examples.tutorials.mnist import input_data
import matplotlib.pyplot as plt
# 设置 GPU 按需增长
config = tf.ConfigProto()
config.gpu_options.allow_growth = True
sess = tf.Session(config=config)
# 首先导入数据,看一下数据的形式
mnist = input_data.read_data_sets('MNIST_data', one_hot=True)
lr = 1e-3
# 在训练和测试的时候,我们想用不同的 batch_size.所以采用占位符的方式
batch_size = 128 # 注意类型必须为 tf.int32
# batch_size = 128
# 每个时刻的输入特征是28维的,就是每个时刻输入一行,一行有 28 个像素
input_size = 28
# 时序持续长度为28,即每做一次预测,需要先输入28行
timestep_size = 28
# 每个隐含层的节点数
hidden_size = 256
# LSTM layer 的层数
layer_num = 2
# 最后输出分类类别数量,如果是回归预测的话应该是 1
class_num = 10
_X = tf.placeholder(tf.float32, [None, 784])
y = tf.placeholder(tf.float32, [None, class_num])
keep_prob = tf.placeholder(tf.float32)
# 把784个点的字符信息还原成 28 * 28 的图片
# 下面几个步骤是实现 RNN / LSTM 的关键
####################################################################
# **步骤1:RNN 的输入shape = (batch_size, timestep_size, input_size)
X = tf.reshape(_X, [-1, 28, 28])
# **步骤2:定义一层 LSTM_cell,只需要说明 hidden_size, 它会自动匹配输入的 X 的维度
# lstm_cell = rnn.BasicLSTMCell(num_units=hidden_size, forget_bias=1.0, state_is_tuple=True)
# **步骤3:添加 dropout layer, 一般只设置 output_keep_prob
# lstm_cell = rnn.DropoutWrapper(cell=lstm_cell, input_keep_prob=1.0, output_keep_prob=keep_prob)
# **步骤4:调用 MultiRNNCell 来实现多层 LSTM
# mlstm_cell = rnn.MultiRNNCell([lstm_cell] * layer_num, state_is_tuple=True)
stacked_rnn = []
for iiLyr in range(layer_num):
lstm_cell = rnn.BasicLSTMCell(num_units=hidden_size, forget_bias=1.0, state_is_tuple=True)
stacked_rnn.append(rnn.DropoutWrapper(cell=lstm_cell, input_keep_prob=1.0, output_keep_prob=keep_prob))
mlstm_cell = rnn.MultiRNNCell(cells=stacked_rnn, state_is_tuple=True)
# **步骤5:用全零来初始化state
init_state = mlstm_cell.zero_state(batch_size, dtype=tf.float32)
# **步骤6:方法一,调用 dynamic_rnn() 来让我们构建好的网络运行起来
# ** 当 time_major==False 时, outputs.shape = [batch_size, timestep_size, hidden_size]
# ** 所以,可以取 h_state = outputs[:, -1, :] 作为最后输出
# ** state.shape = [layer_num, 2, batch_size, hidden_size],
# ** 或者,可以取 h_state = state[-1][1] 作为最后输出
# ** 最后输出维度是 [batch_size, hidden_size]
# outputs, state = tf.nn.dynamic_rnn(mlstm_cell, inputs=X, initial_state=init_state, time_major=False)
# h_state = outputs[:, -1, :] # 或者 h_state = state[-1][1]
# *************** 为了更好的理解 LSTM 工作原理,我们把上面 步骤6 中的函数自己来实现 ***************
# 通过查看文档你会发现, RNNCell 都提供了一个 __call__()函数(见最后附),我们可以用它来展开实现LSTM按时间步迭代。
# **步骤6:方法二,按时间步展开计算
outputs = list()
state = init_state
with tf.variable_scope('RNN'):
for timestep in range(timestep_size):
if timestep > 0:
tf.get_variable_scope().reuse_variables()
# 这里的state保存了每一层 LSTM 的状态
(cell_output, state) = mlstm_cell(X[:, timestep, :], state)
outputs.append(cell_output)
h_state = outputs[-1]
# 上面 LSTM 部分的输出会是一个 [hidden_size] 的tensor,我们要分类的话,还需要接一个 softmax 层
# 首先定义 softmax 的连接权重矩阵和偏置
# out_W = tf.placeholder(tf.float32, [hidden_size, class_num], name='out_Weights')
# out_bias = tf.placeholder(tf.float32, [class_num], name='out_bias')
# 开始训练和测试
W = tf.Variable(tf.truncated_normal([hidden_size, class_num], stddev=0.1), dtype=tf.float32)
bias = tf.Variable(tf.constant(0.1,shape=[class_num]), dtype=tf.float32)
y_pre = tf.nn.softmax(tf.matmul(h_state, W) + bias)
# 损失和评估函数
cross_entropy = -tf.reduce_mean(y * tf.log(y_pre))
train_op = tf.train.AdamOptimizer(lr).minimize(cross_entropy)
correct_prediction = tf.equal(tf.argmax(y_pre,1), tf.argmax(y,1))
accuracy = tf.reduce_mean(tf.cast(correct_prediction, "float"))
sess.run(tf.global_variables_initializer())
# for i in range(2000):
# _batch_size = 128
# batch_x, batch_y = mnist.train.next_batch(_batch_size)
# if (i+1)%20 == 0:
# train_accuracy = sess.run(accuracy, feed_dict={
# _X:batch_x, y: batch_y, keep_prob: 1.0})
# # 已经迭代完成的 epoch 数: mnist.train.epochs_completed
# print("Iter%d, step %d, training accuracy %g" % ( mnist.train.epochs_completed, (i+1), train_accuracy))
# sess.run(train_op, feed_dict={_X: batch_x, y: batch_y, keep_prob: 0.5})
# 计算测试数据的准确率
# print("test accuracy %g"% sess.run(accuracy, feed_dict={
# _X: mnist.test.images, y: mnist.test.labels, keep_prob: 1.0, batch_size:mnist.test.images.shape[0]}))
X3 = mnist.train.images[4]
# img3 = X3.reshape([28, 28])
# plt.imshow(img3, cmap='gray')
# plt.show()
X3.shape = [-1, 784]
y_batch = mnist.train.labels[0]
y_batch.shape = [-1, class_num]
X3_outputs = np.array(sess.run(outputs, feed_dict={
_X: X3, y: y_batch, keep_prob: 1.0}))
print(X3_outputs.shape)
X3_outputs.shape = [28, hidden_size]
print(X3_outputs[0])
| mit |
elkingtonmcb/scikit-learn | sklearn/linear_model/ridge.py | 60 | 44642 | """
Ridge regression
"""
# Author: Mathieu Blondel <[email protected]>
# Reuben Fletcher-Costin <[email protected]>
# Fabian Pedregosa <[email protected]>
# Michael Eickenberg <[email protected]>
# License: BSD 3 clause
from abc import ABCMeta, abstractmethod
import warnings
import numpy as np
from scipy import linalg
from scipy import sparse
from scipy.sparse import linalg as sp_linalg
from .base import LinearClassifierMixin, LinearModel, _rescale_data
from .sag import sag_solver
from .sag_fast import get_max_squared_sum
from ..base import RegressorMixin
from ..utils.extmath import safe_sparse_dot
from ..utils import check_X_y
from ..utils import check_array
from ..utils import check_consistent_length
from ..utils import compute_sample_weight
from ..utils import column_or_1d
from ..preprocessing import LabelBinarizer
from ..grid_search import GridSearchCV
from ..externals import six
from ..metrics.scorer import check_scoring
def _solve_sparse_cg(X, y, alpha, max_iter=None, tol=1e-3, verbose=0):
n_samples, n_features = X.shape
X1 = sp_linalg.aslinearoperator(X)
coefs = np.empty((y.shape[1], n_features))
if n_features > n_samples:
def create_mv(curr_alpha):
def _mv(x):
return X1.matvec(X1.rmatvec(x)) + curr_alpha * x
return _mv
else:
def create_mv(curr_alpha):
def _mv(x):
return X1.rmatvec(X1.matvec(x)) + curr_alpha * x
return _mv
for i in range(y.shape[1]):
y_column = y[:, i]
mv = create_mv(alpha[i])
if n_features > n_samples:
# kernel ridge
# w = X.T * inv(X X^t + alpha*Id) y
C = sp_linalg.LinearOperator(
(n_samples, n_samples), matvec=mv, dtype=X.dtype)
coef, info = sp_linalg.cg(C, y_column, tol=tol)
coefs[i] = X1.rmatvec(coef)
else:
# linear ridge
# w = inv(X^t X + alpha*Id) * X.T y
y_column = X1.rmatvec(y_column)
C = sp_linalg.LinearOperator(
(n_features, n_features), matvec=mv, dtype=X.dtype)
coefs[i], info = sp_linalg.cg(C, y_column, maxiter=max_iter,
tol=tol)
if info < 0:
raise ValueError("Failed with error code %d" % info)
if max_iter is None and info > 0 and verbose:
warnings.warn("sparse_cg did not converge after %d iterations." %
info)
return coefs
def _solve_lsqr(X, y, alpha, max_iter=None, tol=1e-3):
n_samples, n_features = X.shape
coefs = np.empty((y.shape[1], n_features))
n_iter = np.empty(y.shape[1], dtype=np.int32)
# According to the lsqr documentation, alpha = damp^2.
sqrt_alpha = np.sqrt(alpha)
for i in range(y.shape[1]):
y_column = y[:, i]
info = sp_linalg.lsqr(X, y_column, damp=sqrt_alpha[i],
atol=tol, btol=tol, iter_lim=max_iter)
coefs[i] = info[0]
n_iter[i] = info[2]
return coefs, n_iter
def _solve_cholesky(X, y, alpha):
# w = inv(X^t X + alpha*Id) * X.T y
n_samples, n_features = X.shape
n_targets = y.shape[1]
A = safe_sparse_dot(X.T, X, dense_output=True)
Xy = safe_sparse_dot(X.T, y, dense_output=True)
one_alpha = np.array_equal(alpha, len(alpha) * [alpha[0]])
if one_alpha:
A.flat[::n_features + 1] += alpha[0]
return linalg.solve(A, Xy, sym_pos=True,
overwrite_a=True).T
else:
coefs = np.empty([n_targets, n_features])
for coef, target, current_alpha in zip(coefs, Xy.T, alpha):
A.flat[::n_features + 1] += current_alpha
coef[:] = linalg.solve(A, target, sym_pos=True,
overwrite_a=False).ravel()
A.flat[::n_features + 1] -= current_alpha
return coefs
def _solve_cholesky_kernel(K, y, alpha, sample_weight=None, copy=False):
# dual_coef = inv(X X^t + alpha*Id) y
n_samples = K.shape[0]
n_targets = y.shape[1]
if copy:
K = K.copy()
alpha = np.atleast_1d(alpha)
one_alpha = (alpha == alpha[0]).all()
has_sw = isinstance(sample_weight, np.ndarray) \
or sample_weight not in [1.0, None]
if has_sw:
# Unlike other solvers, we need to support sample_weight directly
# because K might be a pre-computed kernel.
sw = np.sqrt(np.atleast_1d(sample_weight))
y = y * sw[:, np.newaxis]
K *= np.outer(sw, sw)
if one_alpha:
# Only one penalty, we can solve multi-target problems in one time.
K.flat[::n_samples + 1] += alpha[0]
try:
# Note: we must use overwrite_a=False in order to be able to
# use the fall-back solution below in case a LinAlgError
# is raised
dual_coef = linalg.solve(K, y, sym_pos=True,
overwrite_a=False)
except np.linalg.LinAlgError:
warnings.warn("Singular matrix in solving dual problem. Using "
"least-squares solution instead.")
dual_coef = linalg.lstsq(K, y)[0]
# K is expensive to compute and store in memory so change it back in
# case it was user-given.
K.flat[::n_samples + 1] -= alpha[0]
if has_sw:
dual_coef *= sw[:, np.newaxis]
return dual_coef
else:
# One penalty per target. We need to solve each target separately.
dual_coefs = np.empty([n_targets, n_samples])
for dual_coef, target, current_alpha in zip(dual_coefs, y.T, alpha):
K.flat[::n_samples + 1] += current_alpha
dual_coef[:] = linalg.solve(K, target, sym_pos=True,
overwrite_a=False).ravel()
K.flat[::n_samples + 1] -= current_alpha
if has_sw:
dual_coefs *= sw[np.newaxis, :]
return dual_coefs.T
def _solve_svd(X, y, alpha):
U, s, Vt = linalg.svd(X, full_matrices=False)
idx = s > 1e-15 # same default value as scipy.linalg.pinv
s_nnz = s[idx][:, np.newaxis]
UTy = np.dot(U.T, y)
d = np.zeros((s.size, alpha.size))
d[idx] = s_nnz / (s_nnz ** 2 + alpha)
d_UT_y = d * UTy
return np.dot(Vt.T, d_UT_y).T
def ridge_regression(X, y, alpha, sample_weight=None, solver='auto',
max_iter=None, tol=1e-3, verbose=0, random_state=None,
return_n_iter=False):
"""Solve the ridge equation by the method of normal equations.
Read more in the :ref:`User Guide <ridge_regression>`.
Parameters
----------
X : {array-like, sparse matrix, LinearOperator},
shape = [n_samples, n_features]
Training data
y : array-like, shape = [n_samples] or [n_samples, n_targets]
Target values
alpha : {float, array-like},
shape = [n_targets] if array-like
The l_2 penalty to be used. If an array is passed, penalties are
assumed to be specific to targets
max_iter : int, optional
Maximum number of iterations for conjugate gradient solver.
For 'sparse_cg' and 'lsqr' solvers, the default value is determined
by scipy.sparse.linalg. For 'sag' solver, the default value is 1000.
sample_weight : float or numpy array of shape [n_samples]
Individual weights for each sample. If sample_weight is not None and
solver='auto', the solver will be set to 'cholesky'.
solver : {'auto', 'svd', 'cholesky', 'lsqr', 'sparse_cg'}
Solver to use in the computational routines:
- 'auto' chooses the solver automatically based on the type of data.
- 'svd' uses a Singular Value Decomposition of X to compute the Ridge
coefficients. More stable for singular matrices than
'cholesky'.
- 'cholesky' uses the standard scipy.linalg.solve function to
obtain a closed-form solution via a Cholesky decomposition of
dot(X.T, X)
- 'sparse_cg' uses the conjugate gradient solver as found in
scipy.sparse.linalg.cg. As an iterative algorithm, this solver is
more appropriate than 'cholesky' for large-scale data
(possibility to set `tol` and `max_iter`).
- 'lsqr' uses the dedicated regularized least-squares routine
scipy.sparse.linalg.lsqr. It is the fatest but may not be available
in old scipy versions. It also uses an iterative procedure.
- 'sag' uses a Stochastic Average Gradient descent. It also uses an
iterative procedure, and is often faster than other solvers when
both n_samples and n_features are large. Note that 'sag' fast
convergence is only guaranteed on features with approximately the
same scale. You can preprocess the data with a scaler from
sklearn.preprocessing.
All last four solvers support both dense and sparse data.
tol : float
Precision of the solution.
verbose : int
Verbosity level. Setting verbose > 0 will display additional
information depending on the solver used.
random_state : int seed, RandomState instance, or None (default)
The seed of the pseudo random number generator to use when
shuffling the data. Used in 'sag' solver.
return_n_iter : boolean, default False
If True, the method also returns `n_iter`, the actual number of
iteration performed by the solver.
Returns
-------
coef : array, shape = [n_features] or [n_targets, n_features]
Weight vector(s).
n_iter : int, optional
The actual number of iteration performed by the solver.
Only returned if `return_n_iter` is True.
Notes
-----
This function won't compute the intercept.
"""
# SAG needs X and y columns to be C-contiguous and np.float64
if solver == 'sag':
X = check_array(X, accept_sparse=['csr'],
dtype=np.float64, order='C')
y = check_array(y, dtype=np.float64, ensure_2d=False, order='F')
else:
X = check_array(X, accept_sparse=['csr', 'csc', 'coo'],
dtype=np.float64)
y = check_array(y, dtype='numeric', ensure_2d=False)
check_consistent_length(X, y)
n_samples, n_features = X.shape
if y.ndim > 2:
raise ValueError("Target y has the wrong shape %s" % str(y.shape))
ravel = False
if y.ndim == 1:
y = y.reshape(-1, 1)
ravel = True
n_samples_, n_targets = y.shape
if n_samples != n_samples_:
raise ValueError("Number of samples in X and y does not correspond:"
" %d != %d" % (n_samples, n_samples_))
has_sw = sample_weight is not None
if solver == 'auto':
# cholesky if it's a dense array and cg in any other case
if not sparse.issparse(X) or has_sw:
solver = 'cholesky'
else:
solver = 'sparse_cg'
elif solver == 'lsqr' and not hasattr(sp_linalg, 'lsqr'):
warnings.warn("""lsqr not available on this machine, falling back
to sparse_cg.""")
solver = 'sparse_cg'
if has_sw:
if np.atleast_1d(sample_weight).ndim > 1:
raise ValueError("Sample weights must be 1D array or scalar")
if solver != 'sag':
# SAG supports sample_weight directly. For other solvers,
# we implement sample_weight via a simple rescaling.
X, y = _rescale_data(X, y, sample_weight)
# There should be either 1 or n_targets penalties
alpha = np.asarray(alpha).ravel()
if alpha.size not in [1, n_targets]:
raise ValueError("Number of targets and number of penalties "
"do not correspond: %d != %d"
% (alpha.size, n_targets))
if alpha.size == 1 and n_targets > 1:
alpha = np.repeat(alpha, n_targets)
if solver not in ('sparse_cg', 'cholesky', 'svd', 'lsqr', 'sag'):
raise ValueError('Solver %s not understood' % solver)
n_iter = None
if solver == 'sparse_cg':
coef = _solve_sparse_cg(X, y, alpha, max_iter, tol, verbose)
elif solver == 'lsqr':
coef, n_iter = _solve_lsqr(X, y, alpha, max_iter, tol)
elif solver == 'cholesky':
if n_features > n_samples:
K = safe_sparse_dot(X, X.T, dense_output=True)
try:
dual_coef = _solve_cholesky_kernel(K, y, alpha)
coef = safe_sparse_dot(X.T, dual_coef, dense_output=True).T
except linalg.LinAlgError:
# use SVD solver if matrix is singular
solver = 'svd'
else:
try:
coef = _solve_cholesky(X, y, alpha)
except linalg.LinAlgError:
# use SVD solver if matrix is singular
solver = 'svd'
elif solver == 'sag':
# precompute max_squared_sum for all targets
max_squared_sum = get_max_squared_sum(X)
coef = np.empty((y.shape[1], n_features))
n_iter = np.empty(y.shape[1], dtype=np.int32)
for i, (alpha_i, target) in enumerate(zip(alpha, y.T)):
coef_, n_iter_, _ = sag_solver(
X, target.ravel(), sample_weight, 'squared', alpha_i,
max_iter, tol, verbose, random_state, False, max_squared_sum,
dict())
coef[i] = coef_
n_iter[i] = n_iter_
coef = np.asarray(coef)
if solver == 'svd':
if sparse.issparse(X):
raise TypeError('SVD solver does not support sparse'
' inputs currently')
coef = _solve_svd(X, y, alpha)
if ravel:
# When y was passed as a 1d-array, we flatten the coefficients.
coef = coef.ravel()
if return_n_iter:
return coef, n_iter
else:
return coef
class _BaseRidge(six.with_metaclass(ABCMeta, LinearModel)):
@abstractmethod
def __init__(self, alpha=1.0, fit_intercept=True, normalize=False,
copy_X=True, max_iter=None, tol=1e-3, solver="auto",
random_state=None):
self.alpha = alpha
self.fit_intercept = fit_intercept
self.normalize = normalize
self.copy_X = copy_X
self.max_iter = max_iter
self.tol = tol
self.solver = solver
self.random_state = random_state
def fit(self, X, y, sample_weight=None):
X, y = check_X_y(X, y, ['csr', 'csc', 'coo'], dtype=np.float64,
multi_output=True, y_numeric=True)
if ((sample_weight is not None) and
np.atleast_1d(sample_weight).ndim > 1):
raise ValueError("Sample weights must be 1D array or scalar")
X, y, X_mean, y_mean, X_std = self._center_data(
X, y, self.fit_intercept, self.normalize, self.copy_X,
sample_weight=sample_weight)
self.coef_, self.n_iter_ = ridge_regression(
X, y, alpha=self.alpha, sample_weight=sample_weight,
max_iter=self.max_iter, tol=self.tol, solver=self.solver,
random_state=self.random_state, return_n_iter=True)
self._set_intercept(X_mean, y_mean, X_std)
return self
class Ridge(_BaseRidge, RegressorMixin):
"""Linear least squares with l2 regularization.
This model solves a regression model where the loss function is
the linear least squares function and regularization is given by
the l2-norm. Also known as Ridge Regression or Tikhonov regularization.
This estimator has built-in support for multi-variate regression
(i.e., when y is a 2d-array of shape [n_samples, n_targets]).
Read more in the :ref:`User Guide <ridge_regression>`.
Parameters
----------
alpha : {float, array-like}, shape (n_targets)
Small positive values of alpha improve the conditioning of the problem
and reduce the variance of the estimates. Alpha corresponds to
``C^-1`` in other linear models such as LogisticRegression or
LinearSVC. If an array is passed, penalties are assumed to be specific
to the targets. Hence they must correspond in number.
copy_X : boolean, optional, default True
If True, X will be copied; else, it may be overwritten.
fit_intercept : boolean
Whether to calculate the intercept for this model. If set
to false, no intercept will be used in calculations
(e.g. data is expected to be already centered).
max_iter : int, optional
Maximum number of iterations for conjugate gradient solver.
For 'sparse_cg' and 'lsqr' solvers, the default value is determined
by scipy.sparse.linalg. For 'sag' solver, the default value is 1000.
normalize : boolean, optional, default False
If True, the regressors X will be normalized before regression.
solver : {'auto', 'svd', 'cholesky', 'lsqr', 'sparse_cg', 'sag'}
Solver to use in the computational routines:
- 'auto' chooses the solver automatically based on the type of data.
- 'svd' uses a Singular Value Decomposition of X to compute the Ridge
coefficients. More stable for singular matrices than
'cholesky'.
- 'cholesky' uses the standard scipy.linalg.solve function to
obtain a closed-form solution.
- 'sparse_cg' uses the conjugate gradient solver as found in
scipy.sparse.linalg.cg. As an iterative algorithm, this solver is
more appropriate than 'cholesky' for large-scale data
(possibility to set `tol` and `max_iter`).
- 'lsqr' uses the dedicated regularized least-squares routine
scipy.sparse.linalg.lsqr. It is the fatest but may not be available
in old scipy versions. It also uses an iterative procedure.
- 'sag' uses a Stochastic Average Gradient descent. It also uses an
iterative procedure, and is often faster than other solvers when
both n_samples and n_features are large. Note that 'sag' fast
convergence is only guaranteed on features with approximately the
same scale. You can preprocess the data with a scaler from
sklearn.preprocessing.
All last four solvers support both dense and sparse data.
tol : float
Precision of the solution.
random_state : int seed, RandomState instance, or None (default)
The seed of the pseudo random number generator to use when
shuffling the data. Used in 'sag' solver.
Attributes
----------
coef_ : array, shape (n_features,) or (n_targets, n_features)
Weight vector(s).
intercept_ : float | array, shape = (n_targets,)
Independent term in decision function. Set to 0.0 if
``fit_intercept = False``.
n_iter_ : array or None, shape (n_targets,)
Actual number of iterations for each target. Available only for
sag and lsqr solvers. Other solvers will return None.
See also
--------
RidgeClassifier, RidgeCV, KernelRidge
Examples
--------
>>> from sklearn.linear_model import Ridge
>>> import numpy as np
>>> n_samples, n_features = 10, 5
>>> np.random.seed(0)
>>> y = np.random.randn(n_samples)
>>> X = np.random.randn(n_samples, n_features)
>>> clf = Ridge(alpha=1.0)
>>> clf.fit(X, y) # doctest: +NORMALIZE_WHITESPACE
Ridge(alpha=1.0, copy_X=True, fit_intercept=True, max_iter=None,
normalize=False, random_state=None, solver='auto', tol=0.001)
"""
def __init__(self, alpha=1.0, fit_intercept=True, normalize=False,
copy_X=True, max_iter=None, tol=1e-3, solver="auto",
random_state=None):
super(Ridge, self).__init__(alpha=alpha, fit_intercept=fit_intercept,
normalize=normalize, copy_X=copy_X,
max_iter=max_iter, tol=tol, solver=solver,
random_state=random_state)
def fit(self, X, y, sample_weight=None):
"""Fit Ridge regression model
Parameters
----------
X : {array-like, sparse matrix}, shape = [n_samples, n_features]
Training data
y : array-like, shape = [n_samples] or [n_samples, n_targets]
Target values
sample_weight : float or numpy array of shape [n_samples]
Individual weights for each sample
Returns
-------
self : returns an instance of self.
"""
return super(Ridge, self).fit(X, y, sample_weight=sample_weight)
class RidgeClassifier(LinearClassifierMixin, _BaseRidge):
"""Classifier using Ridge regression.
Read more in the :ref:`User Guide <ridge_regression>`.
Parameters
----------
alpha : float
Small positive values of alpha improve the conditioning of the problem
and reduce the variance of the estimates. Alpha corresponds to
``C^-1`` in other linear models such as LogisticRegression or
LinearSVC.
class_weight : dict or 'balanced', optional
Weights associated with classes in the form ``{class_label: weight}``.
If not given, all classes are supposed to have weight one.
The "balanced" mode uses the values of y to automatically adjust
weights inversely proportional to class frequencies in the input data
as ``n_samples / (n_classes * np.bincount(y))``
copy_X : boolean, optional, default True
If True, X will be copied; else, it may be overwritten.
fit_intercept : boolean
Whether to calculate the intercept for this model. If set to false, no
intercept will be used in calculations (e.g. data is expected to be
already centered).
max_iter : int, optional
Maximum number of iterations for conjugate gradient solver.
The default value is determined by scipy.sparse.linalg.
normalize : boolean, optional, default False
If True, the regressors X will be normalized before regression.
solver : {'auto', 'svd', 'cholesky', 'lsqr', 'sparse_cg', 'sag'}
Solver to use in the computational routines:
- 'auto' chooses the solver automatically based on the type of data.
- 'svd' uses a Singular Value Decomposition of X to compute the Ridge
coefficients. More stable for singular matrices than
'cholesky'.
- 'cholesky' uses the standard scipy.linalg.solve function to
obtain a closed-form solution.
- 'sparse_cg' uses the conjugate gradient solver as found in
scipy.sparse.linalg.cg. As an iterative algorithm, this solver is
more appropriate than 'cholesky' for large-scale data
(possibility to set `tol` and `max_iter`).
- 'lsqr' uses the dedicated regularized least-squares routine
scipy.sparse.linalg.lsqr. It is the fatest but may not be available
in old scipy versions. It also uses an iterative procedure.
- 'sag' uses a Stochastic Average Gradient descent. It also uses an
iterative procedure, and is faster than other solvers when both
n_samples and n_features are large.
tol : float
Precision of the solution.
random_state : int seed, RandomState instance, or None (default)
The seed of the pseudo random number generator to use when
shuffling the data. Used in 'sag' solver.
Attributes
----------
coef_ : array, shape (n_features,) or (n_classes, n_features)
Weight vector(s).
intercept_ : float | array, shape = (n_targets,)
Independent term in decision function. Set to 0.0 if
``fit_intercept = False``.
n_iter_ : array or None, shape (n_targets,)
Actual number of iterations for each target. Available only for
sag and lsqr solvers. Other solvers will return None.
See also
--------
Ridge, RidgeClassifierCV
Notes
-----
For multi-class classification, n_class classifiers are trained in
a one-versus-all approach. Concretely, this is implemented by taking
advantage of the multi-variate response support in Ridge.
"""
def __init__(self, alpha=1.0, fit_intercept=True, normalize=False,
copy_X=True, max_iter=None, tol=1e-3, class_weight=None,
solver="auto", random_state=None):
super(RidgeClassifier, self).__init__(
alpha=alpha, fit_intercept=fit_intercept, normalize=normalize,
copy_X=copy_X, max_iter=max_iter, tol=tol, solver=solver,
random_state=random_state)
self.class_weight = class_weight
def fit(self, X, y, sample_weight=None):
"""Fit Ridge regression model.
Parameters
----------
X : {array-like, sparse matrix}, shape = [n_samples,n_features]
Training data
y : array-like, shape = [n_samples]
Target values
sample_weight : float or numpy array of shape (n_samples,)
Sample weight.
Returns
-------
self : returns an instance of self.
"""
self._label_binarizer = LabelBinarizer(pos_label=1, neg_label=-1)
Y = self._label_binarizer.fit_transform(y)
if not self._label_binarizer.y_type_.startswith('multilabel'):
y = column_or_1d(y, warn=True)
if self.class_weight:
if sample_weight is None:
sample_weight = 1.
# modify the sample weights with the corresponding class weight
sample_weight = (sample_weight *
compute_sample_weight(self.class_weight, y))
super(RidgeClassifier, self).fit(X, Y, sample_weight=sample_weight)
return self
@property
def classes_(self):
return self._label_binarizer.classes_
class _RidgeGCV(LinearModel):
"""Ridge regression with built-in Generalized Cross-Validation
It allows efficient Leave-One-Out cross-validation.
This class is not intended to be used directly. Use RidgeCV instead.
Notes
-----
We want to solve (K + alpha*Id)c = y,
where K = X X^T is the kernel matrix.
Let G = (K + alpha*Id)^-1.
Dual solution: c = Gy
Primal solution: w = X^T c
Compute eigendecomposition K = Q V Q^T.
Then G = Q (V + alpha*Id)^-1 Q^T,
where (V + alpha*Id) is diagonal.
It is thus inexpensive to inverse for many alphas.
Let loov be the vector of prediction values for each example
when the model was fitted with all examples but this example.
loov = (KGY - diag(KG)Y) / diag(I-KG)
Let looe be the vector of prediction errors for each example
when the model was fitted with all examples but this example.
looe = y - loov = c / diag(G)
References
----------
http://cbcl.mit.edu/projects/cbcl/publications/ps/MIT-CSAIL-TR-2007-025.pdf
http://www.mit.edu/~9.520/spring07/Classes/rlsslides.pdf
"""
def __init__(self, alphas=(0.1, 1.0, 10.0),
fit_intercept=True, normalize=False,
scoring=None, copy_X=True,
gcv_mode=None, store_cv_values=False):
self.alphas = np.asarray(alphas)
self.fit_intercept = fit_intercept
self.normalize = normalize
self.scoring = scoring
self.copy_X = copy_X
self.gcv_mode = gcv_mode
self.store_cv_values = store_cv_values
def _pre_compute(self, X, y):
# even if X is very sparse, K is usually very dense
K = safe_sparse_dot(X, X.T, dense_output=True)
v, Q = linalg.eigh(K)
QT_y = np.dot(Q.T, y)
return v, Q, QT_y
def _decomp_diag(self, v_prime, Q):
# compute diagonal of the matrix: dot(Q, dot(diag(v_prime), Q^T))
return (v_prime * Q ** 2).sum(axis=-1)
def _diag_dot(self, D, B):
# compute dot(diag(D), B)
if len(B.shape) > 1:
# handle case where B is > 1-d
D = D[(slice(None), ) + (np.newaxis, ) * (len(B.shape) - 1)]
return D * B
def _errors(self, alpha, y, v, Q, QT_y):
# don't construct matrix G, instead compute action on y & diagonal
w = 1.0 / (v + alpha)
c = np.dot(Q, self._diag_dot(w, QT_y))
G_diag = self._decomp_diag(w, Q)
# handle case where y is 2-d
if len(y.shape) != 1:
G_diag = G_diag[:, np.newaxis]
return (c / G_diag) ** 2, c
def _values(self, alpha, y, v, Q, QT_y):
# don't construct matrix G, instead compute action on y & diagonal
w = 1.0 / (v + alpha)
c = np.dot(Q, self._diag_dot(w, QT_y))
G_diag = self._decomp_diag(w, Q)
# handle case where y is 2-d
if len(y.shape) != 1:
G_diag = G_diag[:, np.newaxis]
return y - (c / G_diag), c
def _pre_compute_svd(self, X, y):
if sparse.issparse(X):
raise TypeError("SVD not supported for sparse matrices")
U, s, _ = linalg.svd(X, full_matrices=0)
v = s ** 2
UT_y = np.dot(U.T, y)
return v, U, UT_y
def _errors_svd(self, alpha, y, v, U, UT_y):
w = ((v + alpha) ** -1) - (alpha ** -1)
c = np.dot(U, self._diag_dot(w, UT_y)) + (alpha ** -1) * y
G_diag = self._decomp_diag(w, U) + (alpha ** -1)
if len(y.shape) != 1:
# handle case where y is 2-d
G_diag = G_diag[:, np.newaxis]
return (c / G_diag) ** 2, c
def _values_svd(self, alpha, y, v, U, UT_y):
w = ((v + alpha) ** -1) - (alpha ** -1)
c = np.dot(U, self._diag_dot(w, UT_y)) + (alpha ** -1) * y
G_diag = self._decomp_diag(w, U) + (alpha ** -1)
if len(y.shape) != 1:
# handle case when y is 2-d
G_diag = G_diag[:, np.newaxis]
return y - (c / G_diag), c
def fit(self, X, y, sample_weight=None):
"""Fit Ridge regression model
Parameters
----------
X : {array-like, sparse matrix}, shape = [n_samples, n_features]
Training data
y : array-like, shape = [n_samples] or [n_samples, n_targets]
Target values
sample_weight : float or array-like of shape [n_samples]
Sample weight
Returns
-------
self : Returns self.
"""
X, y = check_X_y(X, y, ['csr', 'csc', 'coo'], dtype=np.float,
multi_output=True, y_numeric=True)
n_samples, n_features = X.shape
X, y, X_mean, y_mean, X_std = LinearModel._center_data(
X, y, self.fit_intercept, self.normalize, self.copy_X,
sample_weight=sample_weight)
gcv_mode = self.gcv_mode
with_sw = len(np.shape(sample_weight))
if gcv_mode is None or gcv_mode == 'auto':
if sparse.issparse(X) or n_features > n_samples or with_sw:
gcv_mode = 'eigen'
else:
gcv_mode = 'svd'
elif gcv_mode == "svd" and with_sw:
# FIXME non-uniform sample weights not yet supported
warnings.warn("non-uniform sample weights unsupported for svd, "
"forcing usage of eigen")
gcv_mode = 'eigen'
if gcv_mode == 'eigen':
_pre_compute = self._pre_compute
_errors = self._errors
_values = self._values
elif gcv_mode == 'svd':
# assert n_samples >= n_features
_pre_compute = self._pre_compute_svd
_errors = self._errors_svd
_values = self._values_svd
else:
raise ValueError('bad gcv_mode "%s"' % gcv_mode)
v, Q, QT_y = _pre_compute(X, y)
n_y = 1 if len(y.shape) == 1 else y.shape[1]
cv_values = np.zeros((n_samples * n_y, len(self.alphas)))
C = []
scorer = check_scoring(self, scoring=self.scoring, allow_none=True)
error = scorer is None
for i, alpha in enumerate(self.alphas):
weighted_alpha = (sample_weight * alpha
if sample_weight is not None
else alpha)
if error:
out, c = _errors(weighted_alpha, y, v, Q, QT_y)
else:
out, c = _values(weighted_alpha, y, v, Q, QT_y)
cv_values[:, i] = out.ravel()
C.append(c)
if error:
best = cv_values.mean(axis=0).argmin()
else:
# The scorer want an object that will make the predictions but
# they are already computed efficiently by _RidgeGCV. This
# identity_estimator will just return them
def identity_estimator():
pass
identity_estimator.decision_function = lambda y_predict: y_predict
identity_estimator.predict = lambda y_predict: y_predict
out = [scorer(identity_estimator, y.ravel(), cv_values[:, i])
for i in range(len(self.alphas))]
best = np.argmax(out)
self.alpha_ = self.alphas[best]
self.dual_coef_ = C[best]
self.coef_ = safe_sparse_dot(self.dual_coef_.T, X)
self._set_intercept(X_mean, y_mean, X_std)
if self.store_cv_values:
if len(y.shape) == 1:
cv_values_shape = n_samples, len(self.alphas)
else:
cv_values_shape = n_samples, n_y, len(self.alphas)
self.cv_values_ = cv_values.reshape(cv_values_shape)
return self
class _BaseRidgeCV(LinearModel):
def __init__(self, alphas=(0.1, 1.0, 10.0),
fit_intercept=True, normalize=False, scoring=None,
cv=None, gcv_mode=None,
store_cv_values=False):
self.alphas = alphas
self.fit_intercept = fit_intercept
self.normalize = normalize
self.scoring = scoring
self.cv = cv
self.gcv_mode = gcv_mode
self.store_cv_values = store_cv_values
def fit(self, X, y, sample_weight=None):
"""Fit Ridge regression model
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Training data
y : array-like, shape = [n_samples] or [n_samples, n_targets]
Target values
sample_weight : float or array-like of shape [n_samples]
Sample weight
Returns
-------
self : Returns self.
"""
if self.cv is None:
estimator = _RidgeGCV(self.alphas,
fit_intercept=self.fit_intercept,
normalize=self.normalize,
scoring=self.scoring,
gcv_mode=self.gcv_mode,
store_cv_values=self.store_cv_values)
estimator.fit(X, y, sample_weight=sample_weight)
self.alpha_ = estimator.alpha_
if self.store_cv_values:
self.cv_values_ = estimator.cv_values_
else:
if self.store_cv_values:
raise ValueError("cv!=None and store_cv_values=True "
" are incompatible")
parameters = {'alpha': self.alphas}
fit_params = {'sample_weight': sample_weight}
gs = GridSearchCV(Ridge(fit_intercept=self.fit_intercept),
parameters, fit_params=fit_params, cv=self.cv)
gs.fit(X, y)
estimator = gs.best_estimator_
self.alpha_ = gs.best_estimator_.alpha
self.coef_ = estimator.coef_
self.intercept_ = estimator.intercept_
return self
class RidgeCV(_BaseRidgeCV, RegressorMixin):
"""Ridge regression with built-in cross-validation.
By default, it performs Generalized Cross-Validation, which is a form of
efficient Leave-One-Out cross-validation.
Read more in the :ref:`User Guide <ridge_regression>`.
Parameters
----------
alphas : numpy array of shape [n_alphas]
Array of alpha values to try.
Small positive values of alpha improve the conditioning of the
problem and reduce the variance of the estimates.
Alpha corresponds to ``C^-1`` in other linear models such as
LogisticRegression or LinearSVC.
fit_intercept : boolean
Whether to calculate the intercept for this model. If set
to false, no intercept will be used in calculations
(e.g. data is expected to be already centered).
normalize : boolean, optional, default False
If True, the regressors X will be normalized before regression.
scoring : string, callable or None, optional, default: None
A string (see model evaluation documentation) or
a scorer callable object / function with signature
``scorer(estimator, X, y)``.
cv : int, cross-validation generator or an iterable, optional
Determines the cross-validation splitting strategy.
Possible inputs for cv are:
- None, to use the default 3-fold cross-validation,
- integer, to specify the number of folds.
- An object to be used as a cross-validation generator.
- An iterable yielding train/test splits.
For integer/None inputs, if ``y`` is binary or multiclass,
:class:`StratifiedKFold` used, else, :class:`KFold` is used.
Refer :ref:`User Guide <cross_validation>` for the various
cross-validation strategies that can be used here.
gcv_mode : {None, 'auto', 'svd', eigen'}, optional
Flag indicating which strategy to use when performing
Generalized Cross-Validation. Options are::
'auto' : use svd if n_samples > n_features or when X is a sparse
matrix, otherwise use eigen
'svd' : force computation via singular value decomposition of X
(does not work for sparse matrices)
'eigen' : force computation via eigendecomposition of X^T X
The 'auto' mode is the default and is intended to pick the cheaper
option of the two depending upon the shape and format of the training
data.
store_cv_values : boolean, default=False
Flag indicating if the cross-validation values corresponding to
each alpha should be stored in the `cv_values_` attribute (see
below). This flag is only compatible with `cv=None` (i.e. using
Generalized Cross-Validation).
Attributes
----------
cv_values_ : array, shape = [n_samples, n_alphas] or \
shape = [n_samples, n_targets, n_alphas], optional
Cross-validation values for each alpha (if `store_cv_values=True` and \
`cv=None`). After `fit()` has been called, this attribute will \
contain the mean squared errors (by default) or the values of the \
`{loss,score}_func` function (if provided in the constructor).
coef_ : array, shape = [n_features] or [n_targets, n_features]
Weight vector(s).
intercept_ : float | array, shape = (n_targets,)
Independent term in decision function. Set to 0.0 if
``fit_intercept = False``.
alpha_ : float
Estimated regularization parameter.
See also
--------
Ridge: Ridge regression
RidgeClassifier: Ridge classifier
RidgeClassifierCV: Ridge classifier with built-in cross validation
"""
pass
class RidgeClassifierCV(LinearClassifierMixin, _BaseRidgeCV):
"""Ridge classifier with built-in cross-validation.
By default, it performs Generalized Cross-Validation, which is a form of
efficient Leave-One-Out cross-validation. Currently, only the n_features >
n_samples case is handled efficiently.
Read more in the :ref:`User Guide <ridge_regression>`.
Parameters
----------
alphas : numpy array of shape [n_alphas]
Array of alpha values to try.
Small positive values of alpha improve the conditioning of the
problem and reduce the variance of the estimates.
Alpha corresponds to ``C^-1`` in other linear models such as
LogisticRegression or LinearSVC.
fit_intercept : boolean
Whether to calculate the intercept for this model. If set
to false, no intercept will be used in calculations
(e.g. data is expected to be already centered).
normalize : boolean, optional, default False
If True, the regressors X will be normalized before regression.
scoring : string, callable or None, optional, default: None
A string (see model evaluation documentation) or
a scorer callable object / function with signature
``scorer(estimator, X, y)``.
cv : int, cross-validation generator or an iterable, optional
Determines the cross-validation splitting strategy.
Possible inputs for cv are:
- None, to use the efficient Leave-One-Out cross-validation
- integer, to specify the number of folds.
- An object to be used as a cross-validation generator.
- An iterable yielding train/test splits.
Refer :ref:`User Guide <cross_validation>` for the various
cross-validation strategies that can be used here.
class_weight : dict or 'balanced', optional
Weights associated with classes in the form ``{class_label: weight}``.
If not given, all classes are supposed to have weight one.
The "balanced" mode uses the values of y to automatically adjust
weights inversely proportional to class frequencies in the input data
as ``n_samples / (n_classes * np.bincount(y))``
Attributes
----------
cv_values_ : array, shape = [n_samples, n_alphas] or \
shape = [n_samples, n_responses, n_alphas], optional
Cross-validation values for each alpha (if `store_cv_values=True` and
`cv=None`). After `fit()` has been called, this attribute will contain \
the mean squared errors (by default) or the values of the \
`{loss,score}_func` function (if provided in the constructor).
coef_ : array, shape = [n_features] or [n_targets, n_features]
Weight vector(s).
intercept_ : float | array, shape = (n_targets,)
Independent term in decision function. Set to 0.0 if
``fit_intercept = False``.
alpha_ : float
Estimated regularization parameter
See also
--------
Ridge: Ridge regression
RidgeClassifier: Ridge classifier
RidgeCV: Ridge regression with built-in cross validation
Notes
-----
For multi-class classification, n_class classifiers are trained in
a one-versus-all approach. Concretely, this is implemented by taking
advantage of the multi-variate response support in Ridge.
"""
def __init__(self, alphas=(0.1, 1.0, 10.0), fit_intercept=True,
normalize=False, scoring=None, cv=None, class_weight=None):
super(RidgeClassifierCV, self).__init__(
alphas=alphas, fit_intercept=fit_intercept, normalize=normalize,
scoring=scoring, cv=cv)
self.class_weight = class_weight
def fit(self, X, y, sample_weight=None):
"""Fit the ridge classifier.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Training vectors, where n_samples is the number of samples
and n_features is the number of features.
y : array-like, shape (n_samples,)
Target values.
sample_weight : float or numpy array of shape (n_samples,)
Sample weight.
Returns
-------
self : object
Returns self.
"""
self._label_binarizer = LabelBinarizer(pos_label=1, neg_label=-1)
Y = self._label_binarizer.fit_transform(y)
if not self._label_binarizer.y_type_.startswith('multilabel'):
y = column_or_1d(y, warn=True)
if self.class_weight:
if sample_weight is None:
sample_weight = 1.
# modify the sample weights with the corresponding class weight
sample_weight = (sample_weight *
compute_sample_weight(self.class_weight, y))
_BaseRidgeCV.fit(self, X, Y, sample_weight=sample_weight)
return self
@property
def classes_(self):
return self._label_binarizer.classes_
| bsd-3-clause |
joshbohde/scikit-learn | benchmarks/bench_plot_ward.py | 2 | 1150 | """
Bench the scikit's ward implement compared to scipy's
"""
import time
import numpy as np
from scipy.cluster import hierarchy
import pylab as pl
from sklearn.cluster import Ward
ward = Ward(n_clusters=15)
n_samples = np.logspace(.5, 3, 9)
n_features = np.logspace(1, 3.5, 7)
N_samples, N_features = np.meshgrid(n_samples,
n_features)
scikits_time = np.zeros(N_samples.shape)
scipy_time = np.zeros(N_samples.shape)
for i, n in enumerate(n_samples):
for j, p in enumerate(n_features):
X = np.random.normal(size=(n, p))
t0 = time.time()
ward.fit(X)
scikits_time[j, i] = time.time() - t0
t0 = time.time()
hierarchy.ward(X)
scipy_time[j, i] = time.time() - t0
ratio = scikits_time/scipy_time
pl.clf()
pl.imshow(np.log(ratio), aspect='auto', origin="lower")
pl.colorbar()
pl.contour(ratio, levels=[1, ], colors='k')
pl.yticks(range(len(n_features)), n_features.astype(np.int))
pl.ylabel('N features')
pl.xticks(range(len(n_samples)), n_samples.astype(np.int))
pl.xlabel('N samples')
pl.title("Scikit's time, in units of scipy time (log)")
pl.show()
| bsd-3-clause |
pombredanne/metamorphosys-desktop | metamorphosys/META/WebGME/samples/RollingWheel/OriginalDesktopProject/post_processing/common/post_processing_class.py | 1 | 28477 | # Copyright (C) 2013-2015 MetaMorph Software, Inc
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this data, including any software or models in source or binary
# form, as well as any drawings, specifications, and documentation
# (collectively "the Data"), to deal in the Data without restriction,
# including without limitation the rights to use, copy, modify, merge,
# publish, distribute, sublicense, and/or sell copies of the Data, and to
# permit persons to whom the Data is furnished to do so, subject to the
# following conditions:
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Data.
# THE DATA IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
# THE AUTHORS, SPONSORS, DEVELOPERS, CONTRIBUTORS, OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE DATA OR THE USE OR OTHER DEALINGS IN THE DATA.
# =======================
# This version of the META tools is a fork of an original version produced
# by Vanderbilt University's Institute for Software Integrated Systems (ISIS).
# Their license statement:
# Copyright (C) 2011-2014 Vanderbilt University
# Developed with the sponsorship of the Defense Advanced Research Projects
# Agency (DARPA) and delivered to the U.S. Government with Unlimited Rights
# as defined in DFARS 252.227-7013.
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this data, including any software or models in source or binary
# form, as well as any drawings, specifications, and documentation
# (collectively "the Data"), to deal in the Data without restriction,
# including without limitation the rights to use, copy, modify, merge,
# publish, distribute, sublicense, and/or sell copies of the Data, and to
# permit persons to whom the Data is furnished to do so, subject to the
# following conditions:
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Data.
# THE DATA IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
# THE AUTHORS, SPONSORS, DEVELOPERS, CONTRIBUTORS, OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE DATA OR THE USE OR OTHER DEALINGS IN THE DATA.
import os
import json
import sys
import re
import numpy as np
from py_modelica.mat_file_functions.mat_file_to_dict import MatFile2Dict
import matplotlib.pyplot as plt
# Rescue if the limit-checking should get stuck in an infinite while-loop.
# Which should be impossible to start with, but if I am wrong...
MAX_ITERATIONS = 100000
class PostProcess:
filter = [] # list of all variables/parameter to load from mat-file
# (does not need to include 'time' - loaded by default)
time = None
result = None
def __init__(self, mat_file='', filter=None):
"""
Loads in mat-file, extracts given variables in filter (time always included)
and converts lists of values into numpy arrays.
These are stored in result as:
{{name1: array([values1])}, ..., {nameN: array([valuesN])}}
"""
mat_converter = MatFile2Dict(mat_file, filter, False)
result_lists = mat_converter.get_results()
# convert lists into numpy arrays
self.result = {}
for item in result_lists.iteritems():
self.result.update({item[0]: np.array(item[1])})
self.time = self.result['time']
def data_array(self, name):
"""
Get time-series in numpy array format.
name - name of variable
e.g. data_array('time')
returns with the time.
"""
return self.result[name]
def print_data(self, name):
"""
Prints the time-series.
name - name of variable
e.g. data_array('time')
returns with the time.
"""
data = self.data_array(name)
print 'name of data: '
print name
print 'here is the data: (with index)'
print '[',
for i in xrange(data.size - 1):
print str(i) + ':', str(data[i]) + ',',
print str(i + 1) + ':', str(data[i + 1]) + ']'
return data
def save_as_svg(self, name, metric_value, metric_name='metric_name', formula='', unit=''):
metric_array = np.ones(len(self.time)) * metric_value
plt.plot(self.time, self.data_array(name))
plt.plot(self.time, metric_array)
plt.plot()
plt.title('{0}\n{1}'.format(metric_name, formula))
plt.xlabel('time\n[s]')
if unit:
plt.ylabel('{0}\n[{1}]'.format(name, unit))
else:
plt.ylabel(name)
if not os.path.isdir('plots'):
os.mkdir('plots')
plot_path = os.path.join('plots', '{0}.svg'.format(metric_name))
plt.savefig(plot_path)
plt.close()
with open('testbench_manifest.json', 'r') as f_in:
sum_rep_json = json.load(f_in)
for metric_item in sum_rep_json['Metrics']:
if metric_item['Name'] == metric_name and 'VisualizationArtifacts' in metric_item:
metric_item['VisualizationArtifacts'].append(plot_path.replace(os.path.sep, '/'))
with open('testbench_manifest.json', 'wb') as f_out:
json.dump(sum_rep_json, f_out, indent=4)
return plot_path
def time_array(self):
"""
Get time-series of time in numpy array format.
"""
return self.time
def print_time(self):
"""
Prints and returns with time-series of time.
"""
time = self.time
print 'here are time intervals:', time
return time
def short_array(self, name, start=0, end=-1):
"""
Get a truncated, from n1 to n2 array for variable name
name - name of variable
start - start index of interval
end - end index of interval
N.B index goes from 0 to len(array)-1
"""
return self.result[name][start:end]
def plot(self, name):
"""
Returns a tuple, suitable for plotting, of the variable's time-series together with time.
name - name of variable
"""
return self.data_array(name), self.time
def get_data_by_time(self, name, time_val):
"""
Get data based on time value.
name - name of variable to consider
time_val - time point where to extract the value
Returns the data and the index of the data
"""
i = 0
time = self.time
while time[i] < time_val and i in xrange(time.size - 1):
i += 1
data_arr = self.data_array(name)
if time[i - 1] != time_val:
cur = data_arr[i - 1]
next = data_arr[i]
data = time[i - 1] / ((time[i - 1] + time[i]) / 2) * (next - cur) + cur
else:
data = data_arr[i - 1]
return data, i
def get_data_by_index(self, name, index):
return self.data_array(name)[index]
def get_index_from_time(self, time_val):
"""
Get index based on time value.
time_val - time point where to extract the value
Returns index nearest to time_val
"""
i = 0
time = self.time
while time[i] < time_val and i in xrange(time.size-1):
i += 1
return i
def get_time(self, name, value, atol=1e-4, rtol=1e-4, start_index=0, end_index=-1):
"""
Gets the first time point where the variable satisfies either atol or rtol,
if no such point exists - returns with -1.
name - name of variable
atol - absolute tolerance
rtol - relative tolerance
"""
index = -1
# N.B. this is only one of many ways to do this
denominator = 1
if value > rtol:
denominator = value
data = self.data_array(name)[start_index:end_index]
cnt = 0
for x in data:
abs_diff = abs(x - value)
rel_diff = abs_diff / denominator
if abs_diff < atol or rel_diff < rtol:
index = cnt
break
else:
cnt += 1
if index >= 0:
return self.time[start_index + index]
return -1
def last_value(self, name):
"""
Get last value of variable
name - name of variable
"""
return self.data_array(name)[-1]
def global_max(self, name):
"""
Get maximum value of variable
name - name of variable
"""
return self.data_array(name).max()
def global_max_time(self, name):
"""
Get time where max occurs
name - name of variable
returns the time at where the max is
"""
index = self.data_array(name).argmax()
time_at_max = self.time[index]
return time_at_max
def global_min(self, name):
"""
Get minimum value of variable
name - name of variable
"""
return self.data_array(name).min()
def global_min_time(self, name):
"""
Get time where min occurs
name - name of variable
returns the time at where the min is
"""
index = self.data_array(name).argmin()
time_at_min = self.time[index]
return time_at_min
def global_abs_max(self, name):
"""
Get the maximum absolute value of variable
name - name of variable
"""
return np.absolute(self.data_array(name)).max()
def std_dev(self, name):
"""
Returns the standard deviation of variable
name - name of variable
"""
stddev = self.data_array(name).std()
return stddev
def variance(self, name):
"""
Returns the variance of variable
name - name of variable
"""
variance = self.data_array(name).var()
return variance
def sum_value(self, name):
"""
Returns the sum of the time-series for the variable
name - name of variable
"""
result = self.data_array(name).sum()
return result
def mean(self, name):
"""
Returns the mean of the time-series for the variable
name - name of variable
"""
result = np.mean(self.data_array(name), dtype=np.float64)
return result
def integrate(self, name):
"""
Returns the area under the curve of the time-series for the variable
name - name of variable
"""
time = self.time
data = self.data_array(name)
sum = 0
next = data[0]
next_t = time[0]
for i in xrange(data.size):
cur = next
next = data[i]
cur_t = next_t
next_t = time[i]
height = (next + cur) / 2
interval = next_t - cur_t
sum += height * interval
return sum
def minima(self, name):
"""
Returns the minima of time-series of variable
name - name of variable
"""
data = self.data_array(name)
min = []
prev = 0
cur = 0
next = data[0]
for i in xrange(data.size):
if cur < prev and cur <= next:
min.append(cur)
prev = cur
cur = next
next = data[++i]
minimum = np.array(min)
return minimum
def maxima(self, name):
"""
Returns the maxima of time-series of variable
name - name of variable
"""
data = self.data_array(name)
max = []
prev = 0
cur = 0
next = data[0]
for i in xrange(data.size):
if cur >= prev and cur > next:
max.append(cur)
prev = cur
cur = next
next = data[++i]
maximum = np.array(max)
return maximum
def pos_neg(self, name, tol=0.00000015):
"""
Returns time of the roots from positive to negative of time-series of variable
name - name of variable
tol - tolerance
"""
data = self.data_array(name)
time_arr = self.time
time = []
next = -1
for i in xrange(data.size):
cur = next
next = data[i]
if cur > 0 + tol and next <= 0 + tol:
if cur != 0:
cur_t = time_arr[i - 1]
next_t = time_arr[i]
time.append((cur / (cur + next) / 2) * (next_t - cur_t) + cur_t)
else:
time.append(time_arr[i - 1])
timing = np.array(time)
return timing
def neg_pos(self, name, tol=0.00000015):
"""
Returns time of the roots from negative to positive of time-series of variable
name - name of variable
tol - tolerance
"""
time = []
data = self.data_array(name)
time_arr = self.time
next = 1
for i in xrange(data.size):
cur = next
next = data[i]
if cur <= 0 + tol and next > 0 + tol:
if cur != 0:
cur_t = time_arr[i - 1]
next_t = time_arr[i]
time.append(cur / ((cur + next) / 2) * (next_t - cur_t) + cur_t)
else:
time.append(time_arr[i - 1])
timing = np.array(time)
return timing
def to_zero(self, name, value_index):
"""
# time from a number to zero
# (use index from print_data() function)
# parameters: data array, time array, index of value
# returns the time of the zero
"""
data = self.data_array(name)
time_arr = self.time
i = value_index + 1
cur = data[value_index]
next = data[i]
tolerance = 0.00000015
if data[value_index] >= 0:
while next >= 0 + tolerance and i in xrange(data.size - 1):
i += 1
cur = next
next = data[i]
if next >= 0 + tolerance:
return -1
else:
while next <= 0 + tolerance and i in xrange(data.size - 1):
i += 1
cur = next
next = data[i]
if next <= 0 + tolerance:
return -1
if cur != 0:
cur_t = time_arr[i - 1]
next_t = time_arr[i]
time = cur / ((cur + next) / 2) * (next_t - cur_t) + cur_t
else:
time = time_arr[i - 1]
return time
def from_zero(self, name, value_index):
"""
# time from a number to zero
# (use index from print_data() function)
# parameters: data array, time array, index of value
# returns the time of the zero
"""
data = self.data_array(name)
time_arr = self.time
i = value_index - 1
cur = data[value_index]
next = data[i]
tolerance = 0.00000015
if data[value_index - 1] >= 0:
while next >= 0 + tolerance and i in xrange(data.size):
i -= 1
cur = next
next = data[i]
if next >= 0 + tolerance:
return -1
else:
while next <= 0 + tolerance and i in xrange(data.size):
i -= 1
cur = next
next = data[i]
if next <= 0 + tolerance:
return -1
if cur != 0:
cur_t = time_arr[i + 1]
next_t = time_arr[i]
time = cur / ((cur + next) / 2) * (next_t - cur_t) + cur_t
else:
time = time_arr[i + 1]
return time
def zeros(self, name):
"""
Find zeros of time-series for variable
name - name of variable
returns the time of the zero
"""
data_array = self.data_array(name)
time = self.time
data = [[], []]
data[0].append(self.pos_neg(data_array, time))
data[1].append(self.neg_pos(data_array, time))
data_arr = np.array(data)
return data_arr
def compare(self, name1, name2):
"""
Compare the time-series of two variables
name1 - name of variable 1
name2 - name of variable 2
returns true if the results are identical
"""
data1 = self.data_array(name1)
data2 = self.data_array(name2)
for i in xrange(data1.size):
if data1[i] != data2[i]:
return False
return True
def time_total(self, val1, val2):
# finding the difference between 2 times
time = abs(val2 - val1)
return time
def delta_t(self, start_index, end_index):
"""
Returns the length of the time-interval between to indices
"""
t1 = self.time[start_index]
t2 = self.time[end_index]
dt = t2 - t1
return dt
def get_local_max(self, name, start_index, end_index):
"""
Returns the value of the maximum between two indices
N.B. including both points
:param name:
:param start_index:
:param end_index:
"""
if end_index == -1:
maximum = self.data_array(name)[start_index:].max()
else:
maximum = self.data_array(name)[start_index:end_index + 1].max()
return maximum
def get_local_min(self, name, start_index, end_index):
"""
Returns the value of the minimum between two indices
N.B. including both points
"""
if end_index == -1:
minimum = self.data_array(name)[start_index:].min()
else:
minimum = self.data_array(name)[start_index:end_index + 1].min()
return minimum
def find_first_max_violation(self, name, value, start_index=0):
"""
Starting from start_index it looks for the first index where the
time-series has a value greater than value.
If it never occurs, it returns -1
"""
time_series = self.data_array(name)[start_index:]
n = len(time_series)
for i in range(n):
if time_series[i] > value:
return i + start_index
return -1
def find_first_min_violation(self, name, value, start_index=0):
"""
Starting from start_index it looks for the first index where the
time-series has a value less than value.
If it never occurs, it returns -1
"""
time_series = self.data_array(name)[start_index:]
n = len(time_series)
for i in range(n):
if time_series[i] < value:
return i + start_index
return -1
def check_max_limit(self, name, value):
actual_value = ''
limit_exceeded = False
start_index = 0
global_max = -np.Inf
cnt = 0
print 'check_max_limit'
while start_index > -1:
index = self.find_first_max_violation(name, value, start_index)
if index > -1:
end_index = self.find_first_min_violation(name, value, index)
d_t = self.delta_t(index, end_index)
print 'Found violation at t={0} lasting : {1}'.format(self.time[index], d_t)
if d_t > 0.5:
limit_exceeded = True
local_max = self.get_local_max(name, index, end_index)
print 'Local maximum : {0}'.format(local_max)
if local_max > global_max:
global_max = local_max
start_index = end_index
else:
break
cnt += 1
if cnt == MAX_ITERATIONS:
print 'Limit checking for variable {0} aborted after {1} iterations' \
.format(name, MAX_ITERATIONS)
sys.exit(1)
if limit_exceeded:
actual_value = global_max
return limit_exceeded, actual_value
def check_min_limit(self, name, value):
actual_value = ''
limit_exceeded = False
start_index = 0
global_min = np.Inf
cnt = 0
print 'check_min_limit'
while start_index > -1:
index = self.find_first_min_violation(name, value, start_index)
if index > -1:
end_index = self.find_first_max_violation(name, value, index)
d_t = self.delta_t(index, end_index)
print 'Found violation at t={0} lasting : {1} s'.format(self.time[index], d_t)
if d_t > 0.5:
limit_exceeded = True
local_min = self.get_local_min(name, index, end_index)
print 'Local minimum : {0}'.format(local_min)
if local_min < global_min:
global_min = local_min
start_index = end_index
else:
break
cnt += 1
if cnt == MAX_ITERATIONS:
print 'Limit checking for variable {0} aborted after {1} iterations' \
.format(name, MAX_ITERATIONS)
sys.exit(1)
if limit_exceeded:
actual_value = global_min
return limit_exceeded, actual_value
def update_metrics_in_report_json(metrics, report_file='testbench_manifest.json'):
"""
Metrics should be of the form
:param metrics:
:param report_file:
{'name_of_metric' : {value: (int) or (float), unit: ""}, ...}
"""
if not os.path.exists(report_file):
raise IOError('Report file does not exits : {0}'.format(report_file))
# read current summary report, which contains the metrics
with open(report_file, 'r') as file_in:
result_json = json.load(file_in)
assert isinstance(result_json, dict)
if 'Metrics' in result_json:
for metric in result_json['Metrics']:
if 'Name' in metric and 'Value' in metric:
if metric['Name'] in metrics.keys():
new_value = metrics[metric['Name']]['value']
new_unit = metrics[metric['Name']]['unit']
if new_unit is not None:
metric['Unit'] = new_unit
if new_value is not None:
metric['Value'] = str(new_value)
else:
pass
else:
print 'Metric item : {0} does not have right format'.format(metric)
pass
# update json file with the new values
with open(report_file, 'wb') as file_out:
json.dump(result_json, file_out, indent=4)
else:
print 'Report file {0} does not have any Metrics defined..'
pass
def read_limits():
"""
Reads in limits and modifies the ModelicaUri to the correct one.
Returns:
- the updated limit_dict
- the filter as a list
"""
with open('limits.json', 'r') as f_in:
limit_dict = json.load(f_in)
# use set to avoid checking for duplicates
filter = set()
for limit_item in limit_dict['LimitChecks']:
# drop first part of VariableFullPath update the limit_item
# once the limit.json is generated correctly these two lines can be dropped
# modelica_uri = '.'.join(.split('.')[1:])
# modelica_model_rel_uri = limit_item['VariableName']
# split_full_path = limit_item['LimitFullPath'].split('/')
# modelica_model = split_full_path[-2]
# cyphy_relative_uri = '{0}.{1}'.format(modelica_model, modelica_model_rel_uri)
# modelica_uri = modelica_uri.replace(modelica_model_rel_uri, cyphy_relative_uri)
# limit_item['VariableFullPath'] = modelica_uri
# limit_item['ComponentInstanceName'] = split_full_path[-3]
# filter out this variable in the .mat-file
filter.add(limit_item['VariableFullPath'])
# Code specific for FANG-I, with no defined VariableName from GME
# limit_var_name = limit_item['VariableName']
# limit_var_name = re.sub('\.u(.*)$', '', limit_item['VariableFullPath'])
# limit_var_name_split = limit_var_name.split('.')
# limit_var_name = limit_var_name_split[len(limit_var_name_split)-3] + '=>' + \
# limit_var_name_split[len(limit_var_name_split)-1]
# limit_item['LimitName'] = limit_var_name
filter = list(filter)
print "Variables for limit-checking : {0}".format(filter)
return limit_dict, filter
def check_limits_and_add_to_report_json(pp, limit_dict):
"""
Check the limits and write out dictionary to testbench_manifest.json
"""
assert isinstance(pp, PostProcess)
for limit_item in limit_dict['LimitChecks']:
modelica_uri = limit_item['VariableFullPath']
limit_value = limit_item['Value']
limit_type = limit_item['Type']
print "--== {0} ==--".format(modelica_uri)
print "Type of Limit : {0}".format(limit_type)
print "Limit : {0} ".format(limit_value)
if limit_type == 'min':
limit_exceeded, actual_value = pp.check_min_limit(modelica_uri, limit_value)
limit_item['LimitExceeded'] = limit_exceeded
limit_item['ActualValue'] = str(actual_value)
elif limit_type == 'max':
limit_exceeded, actual_value = pp.check_max_limit(modelica_uri, limit_value)
limit_item['LimitExceeded'] = limit_exceeded
limit_item['ActualValue'] = str(actual_value)
else:
limit_exceeded_max, actual_max_value = pp.check_max_limit(modelica_uri, limit_value)
limit_exceeded_min, actual_min_value = pp.check_min_limit(modelica_uri, -limit_value)
# determine the actual value depending on which limits were exceeded
if limit_exceeded_max and limit_exceeded_min:
if actual_max_value > abs(actual_min_value):
actual_value = str(actual_max_value)
else:
actual_value = str(abs(actual_min_value))
elif limit_exceeded_max:
actual_value = str(actual_max_value)
elif limit_exceeded_min:
actual_value = str(abs(actual_min_value))
else:
actual_value = ''
limit_item['LimitExceeded'] = limit_exceeded_max or limit_exceeded_min
limit_item['ActualValue'] = actual_value
limit_item['Value'] = str(limit_value)
print "Violation : {0}".format(limit_item["LimitExceeded"])
with open('testbench_manifest.json', 'r') as f_in:
sum_rep_json = json.load(f_in)
sum_rep_json['LimitChecks'] = limit_dict['LimitChecks']
with open('testbench_manifest.json', 'wb') as f_out:
json.dump(sum_rep_json, f_out, indent=4)
print "Limits updated"
| mit |
apapiu/airbnb_app | airbnb/web_app/flaskexample/views.py | 1 | 8183 | import os
import sys
import pandas as pd
import numpy as np
import folium
from sklearn.externals import joblib
from sklearn.neighbors import NearestNeighbors
from flask import render_template
from flask import request
#local module:
from flaskexample import app
from flaskexample import airbnb_pipeline
import psycopg2
from sqlalchemy import create_engine
from sqlalchemy_utils import database_exists, create_database
from bokeh.charts import Histogram
from bokeh.embed import components
home_folder = os.environ["home_folder"]
dbname = os.environ["dbname"]
username = os.environ["username"]
if sys.platform == "linux":
password = os.environ["password"]
if sys.platform == "linux":
connect_str = "dbname='%s' user='%s' host='localhost' password='%s'"%(dbname,username,password)
con = psycopg2.connect(connect_str)
else:
con = psycopg2.connect(database = dbname, user = username)
train = pd.read_sql_query("SELECT * FROM location_descriptions", con)
train["id"] = train["id"].astype("float").astype("int")
listings = pd.read_sql_query(
"""
SELECT id, price, diff, neighbourhood_cleansed,listing_url,
name, summary, preds, medium_url, city, room_type FROM listings_price
""", con)
train = train.merge(listings)
#visualaizing preds.
# %matplotlib inline
# import seaborn as sns
# #doing the ratio thing:
# temp = train[train["room_type"] == "Private room"][train["price"] < 200][["preds", "price"]]
# #%config InlineBackend.figure_format = 'retina'
#
#
# temp["ratio"] = (temp["preds"] - temp["price"])/temp["price"]
# temp["good"] = 0
# temp["good"][temp["ratio"] < 0] = 1
# temp["good"][temp["ratio"] > 0][temp["ratio"] < 0.6] = 2
# temp.loc[(temp["ratio"] > 0) & (temp["ratio"] < 0.6), "good"] = 2
# sns.lmplot(x = "price", y = "preds", data = temp, hue = "good", fit_reg = False, palette = "Dark2")
nbd_counts = train["neighbourhood_cleansed"].value_counts()
descp = train[["id", "neighborhood_overview"]]
descp = descp.drop_duplicates()
nbds = list(nbd_counts[:40].index)
print("loading models")
model = joblib.load(os.path.join(home_folder, 'airbnb_app/Data/tf_idf_model.pkl'))
knn = NearestNeighbors(500, metric = "cosine", algorithm = "brute")
X = descp["neighborhood_overview"]
#somewhat slow - could save the projections first here:
X_proj = model.transform(X)
#fast since there's no real fittting going on here
#should check how fast predicting is - should be fine for a few values.
knn.fit(X_proj)
#for debugging:
# descp = "hip trendy cool"
# descp_2 = "gritty urban"
# descp_3 = "dangerous"
# map_osm = get_heat_map(str(descp), knn, model, train)
# map_osm = add_heat_layer(map_osm, descp_2,knn, model, train, scale=scale_2)
# map_osm = add_heat_layer(map_osm, descp_3,knn, model, train, scale = scale_3)
# folium.LayerControl().add_to(map_osm)
# map_osm.save(outfile='map_test.html')
#~~~~~~~~~~
#Map Views:
#~~~~~~~~~~
@app.route('/')
@app.route('/home')
def map_input():
return render_template("home.html", nbds = nbds)
@app.route('/map')
def return_map():
descp = request.args.get('descp')
descp_2 = None
descp_3 = None
try:
descp_2 = request.args.get('map_descp_2')
descp_3 = request.args.get('map_descp_3')
except:
pass
map_osm = airbnb_pipeline.get_heat_map(str(descp), knn, model, train)
if descp_2 and descp_2 is not "":
map_osm = airbnb_pipeline.add_heat_layer(map_osm, descp_2,knn,
model, train, scale = 1)
if descp_2 and descp_3 is not "":
map_osm = airbnb_pipeline.add_heat_layer(map_osm, descp_3,knn,
model, train, scale = 2)
folium.LayerControl().add_to(map_osm)
#TODO: find a way to add custom html on top of map
#maybe add the nbd scores over it.
#TODO:make this serve the map directly?
#this will save the map and then reload it
#sounds like it could be slow but it's actually very fast.
map_osm.save(outfile='flaskexample/templates/map.html')
return render_template("map.html")
#return render_template("rec_temp.html")
#~~~~~~~~~~~~~~~~~~~
#Neighborhood Views:
#~~~~~~~~~~~~~~~~~~~
@app.route('/nbd')
def nbd():
#pull 'nbd' from input field and store it:
nbd = request.args.get('nbd')
room_type = "Private room"
#nbd = "East Village"
train = pd.read_sql_query("""
SELECT * FROM listings_price
WHERE neighbourhood_cleansed = %(nbd)s
AND room_type = %(room_type)s;
""",
con, index_col = "id",
params = {"nbd":nbd, "room_type":room_type})
#train
train["ratio"] = (train["preds"] - train["price"])/train["price"]
#keep train for the histogram
sm_train = train[train["ratio"] > 0][train["ratio"] < 1.2]
sm_train = sm_train.sort_values("diff", ascending = False)
sm_train["diff"]
births = []
#showing some tables:
for i in range(0,25):
births.append(dict(price=int(sm_train.iloc[i]['price']),
city=sm_train.iloc[i]['name'],
id = sm_train.index[i],
room_type=int(sm_train.iloc[i]['preds']),
url=sm_train.iloc[i]['listing_url']))
the_result = ''
#births
plot = Histogram(train["price"], bins = 20, plot_width=500, plot_height=300)
script, div = components(plot)
title = "Distribution of daily prices in {0}".format(nbd)
median = train["price"].median()
percentile_05 = np.round(train["price"].quantile(0.05), 1)
percentile_95 = np.round(train["price"].quantile(0.95), 1)
more_info = "The median price per day is ${0}. 95% of the listings are in \
between ${1} and ${2}".format(median, percentile_05, percentile_95)
return render_template('nbd.html', births = births, the_result = the_result, nbd = nbd,
script = script, div = div, title = title, more_info = more_info)
@app.route('/nbd_rec')
def nbd_rec():
descp = request.args.get('descp')
#map:
map_osm = airbnb_pipeline.get_heat_map(str(descp), knn, model, train)
map_osm.save(outfile='flaskexample/templates/map.html')
#scores:
nbd_score = airbnb_pipeline.get_nbds(descp, knn = knn,
model = model, train = train, nbd_counts = nbd_counts)
nbd_score = (nbd_score["weighted_score"].replace(np.inf, np.nan).dropna().
sort_values(ascending = False).head(10))
nbd_score = np.sqrt(np.sqrt(np.sqrt(nbd_score/np.max(nbd_score))))*95
nbd_score_list = []
for i in range(10):
nbd_score_list.append(dict(name = nbd_score.index[i], score = int(nbd_score.iloc[i])))
return render_template('nbd_rec.html', nbds = nbd_score_list, descp = descp)
@app.route('/listing')
def listing():
listing_id = int(request.args.get('listing_id'))
#listing_id = 685006
#one_listing = listings.iloc[0]
one_listing = listings[listings["id"] == listing_id].iloc[0]
title = one_listing["name"]
summary = one_listing["summary"]
text = ("The predicted daily price for this listing is {0}$ which is {1}$ from the actual price {2}$"
.format(int(one_listing.preds),
int(one_listing["diff"]),
int(one_listing.price)))
photo_link = one_listing["medium_url"]
plot = airbnb_pipeline.get_price_plot(one_listing = one_listing, std = 35)
script, div = components(plot)
return render_template('listing_view.html', script = script, div = div, summary = summary,
text = text, link = one_listing.listing_url, title = title, photo_link = photo_link)
@app.route('/description')
def description():
return render_template('description_2.html')
@app.route('/about_me')
def about_me():
return render_template('about_me.html')
| apache-2.0 |
pnedunuri/scikit-learn | examples/cluster/plot_kmeans_assumptions.py | 270 | 2040 | """
====================================
Demonstration of k-means assumptions
====================================
This example is meant to illustrate situations where k-means will produce
unintuitive and possibly unexpected clusters. In the first three plots, the
input data does not conform to some implicit assumption that k-means makes and
undesirable clusters are produced as a result. In the last plot, k-means
returns intuitive clusters despite unevenly sized blobs.
"""
print(__doc__)
# Author: Phil Roth <[email protected]>
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
from sklearn.cluster import KMeans
from sklearn.datasets import make_blobs
plt.figure(figsize=(12, 12))
n_samples = 1500
random_state = 170
X, y = make_blobs(n_samples=n_samples, random_state=random_state)
# Incorrect number of clusters
y_pred = KMeans(n_clusters=2, random_state=random_state).fit_predict(X)
plt.subplot(221)
plt.scatter(X[:, 0], X[:, 1], c=y_pred)
plt.title("Incorrect Number of Blobs")
# Anisotropicly distributed data
transformation = [[ 0.60834549, -0.63667341], [-0.40887718, 0.85253229]]
X_aniso = np.dot(X, transformation)
y_pred = KMeans(n_clusters=3, random_state=random_state).fit_predict(X_aniso)
plt.subplot(222)
plt.scatter(X_aniso[:, 0], X_aniso[:, 1], c=y_pred)
plt.title("Anisotropicly Distributed Blobs")
# Different variance
X_varied, y_varied = make_blobs(n_samples=n_samples,
cluster_std=[1.0, 2.5, 0.5],
random_state=random_state)
y_pred = KMeans(n_clusters=3, random_state=random_state).fit_predict(X_varied)
plt.subplot(223)
plt.scatter(X_varied[:, 0], X_varied[:, 1], c=y_pred)
plt.title("Unequal Variance")
# Unevenly sized blobs
X_filtered = np.vstack((X[y == 0][:500], X[y == 1][:100], X[y == 2][:10]))
y_pred = KMeans(n_clusters=3, random_state=random_state).fit_predict(X_filtered)
plt.subplot(224)
plt.scatter(X_filtered[:, 0], X_filtered[:, 1], c=y_pred)
plt.title("Unevenly Sized Blobs")
plt.show()
| bsd-3-clause |
bgris/ODL_bgris | lib/python3.5/site-packages/matplotlib/backends/backend_pdf.py | 6 | 95758 | # -*- coding: utf-8 -*-
"""
A PDF matplotlib backend
Author: Jouni K Seppänen <[email protected]>
"""
from __future__ import (absolute_import, division, print_function,
unicode_literals)
import six
import codecs
import os
import re
import struct
import sys
import time
import warnings
import zlib
from io import BytesIO
import numpy as np
from six import unichr
from datetime import datetime
from math import ceil, cos, floor, pi, sin
import matplotlib
from matplotlib import __version__, rcParams
from matplotlib._pylab_helpers import Gcf
from matplotlib.backend_bases import (RendererBase, GraphicsContextBase,
FigureManagerBase, FigureCanvasBase)
from matplotlib.backends.backend_mixed import MixedModeRenderer
from matplotlib.cbook import (Bunch, is_string_like, get_realpath_and_stat,
is_writable_file_like, maxdict)
from matplotlib.figure import Figure
from matplotlib.font_manager import findfont, is_opentype_cff_font, get_font
from matplotlib.afm import AFM
import matplotlib.type1font as type1font
import matplotlib.dviread as dviread
from matplotlib.ft2font import (FIXED_WIDTH, ITALIC, LOAD_NO_SCALE,
LOAD_NO_HINTING, KERNING_UNFITTED)
from matplotlib.mathtext import MathTextParser
from matplotlib.transforms import Affine2D, BboxBase
from matplotlib.path import Path
from matplotlib import _path
from matplotlib import _png
from matplotlib import ttconv
# Overview
#
# The low-level knowledge about pdf syntax lies mainly in the pdfRepr
# function and the classes Reference, Name, Operator, and Stream. The
# PdfFile class knows about the overall structure of pdf documents.
# It provides a "write" method for writing arbitrary strings in the
# file, and an "output" method that passes objects through the pdfRepr
# function before writing them in the file. The output method is
# called by the RendererPdf class, which contains the various draw_foo
# methods. RendererPdf contains a GraphicsContextPdf instance, and
# each draw_foo calls self.check_gc before outputting commands. This
# method checks whether the pdf graphics state needs to be modified
# and outputs the necessary commands. GraphicsContextPdf represents
# the graphics state, and its "delta" method returns the commands that
# modify the state.
# Add "pdf.use14corefonts: True" in your configuration file to use only
# the 14 PDF core fonts. These fonts do not need to be embedded; every
# PDF viewing application is required to have them. This results in very
# light PDF files you can use directly in LaTeX or ConTeXt documents
# generated with pdfTeX, without any conversion.
# These fonts are: Helvetica, Helvetica-Bold, Helvetica-Oblique,
# Helvetica-BoldOblique, Courier, Courier-Bold, Courier-Oblique,
# Courier-BoldOblique, Times-Roman, Times-Bold, Times-Italic,
# Times-BoldItalic, Symbol, ZapfDingbats.
#
# Some tricky points:
#
# 1. The clip path can only be widened by popping from the state
# stack. Thus the state must be pushed onto the stack before narrowing
# the clip path. This is taken care of by GraphicsContextPdf.
#
# 2. Sometimes it is necessary to refer to something (e.g., font,
# image, or extended graphics state, which contains the alpha value)
# in the page stream by a name that needs to be defined outside the
# stream. PdfFile provides the methods fontName, imageObject, and
# alphaState for this purpose. The implementations of these methods
# should perhaps be generalized.
# TODOs:
#
# * encoding of fonts, including mathtext fonts and unicode support
# * TTF support has lots of small TODOs, e.g., how do you know if a font
# is serif/sans-serif, or symbolic/non-symbolic?
# * draw_markers, draw_line_collection, etc.
def fill(strings, linelen=75):
"""Make one string from sequence of strings, with whitespace
in between. The whitespace is chosen to form lines of at most
linelen characters, if possible."""
currpos = 0
lasti = 0
result = []
for i, s in enumerate(strings):
length = len(s)
if currpos + length < linelen:
currpos += length + 1
else:
result.append(b' '.join(strings[lasti:i]))
lasti = i
currpos = length
result.append(b' '.join(strings[lasti:]))
return b'\n'.join(result)
# PDF strings are supposed to be able to include any eight-bit data,
# except that unbalanced parens and backslashes must be escaped by a
# backslash. However, sf bug #2708559 shows that the carriage return
# character may get read as a newline; these characters correspond to
# \gamma and \Omega in TeX's math font encoding. Escaping them fixes
# the bug.
_string_escape_regex = re.compile(br'([\\()\r\n])')
def _string_escape(match):
m = match.group(0)
if m in br'\()':
return b'\\' + m
elif m == b'\n':
return br'\n'
elif m == b'\r':
return br'\r'
assert False
def pdfRepr(obj):
"""Map Python objects to PDF syntax."""
# Some objects defined later have their own pdfRepr method.
if hasattr(obj, 'pdfRepr'):
return obj.pdfRepr()
# Floats. PDF does not have exponential notation (1.0e-10) so we
# need to use %f with some precision. Perhaps the precision
# should adapt to the magnitude of the number?
elif isinstance(obj, (float, np.floating)):
if not np.isfinite(obj):
raise ValueError("Can only output finite numbers in PDF")
r = ("%.10f" % obj).encode('ascii')
return r.rstrip(b'0').rstrip(b'.')
# Booleans. Needs to be tested before integers since
# isinstance(True, int) is true.
elif isinstance(obj, bool):
return [b'false', b'true'][obj]
# Integers are written as such.
elif isinstance(obj, (six.integer_types, np.integer)):
return ("%d" % obj).encode('ascii')
# Unicode strings are encoded in UTF-16BE with byte-order mark.
elif isinstance(obj, six.text_type):
try:
# But maybe it's really ASCII?
s = obj.encode('ASCII')
return pdfRepr(s)
except UnicodeEncodeError:
s = codecs.BOM_UTF16_BE + obj.encode('UTF-16BE')
return pdfRepr(s)
# Strings are written in parentheses, with backslashes and parens
# escaped. Actually balanced parens are allowed, but it is
# simpler to escape them all. TODO: cut long strings into lines;
# I believe there is some maximum line length in PDF.
elif isinstance(obj, bytes):
return b'(' + _string_escape_regex.sub(_string_escape, obj) + b')'
# Dictionaries. The keys must be PDF names, so if we find strings
# there, we make Name objects from them. The values may be
# anything, so the caller must ensure that PDF names are
# represented as Name objects.
elif isinstance(obj, dict):
r = [b"<<"]
r.extend([Name(key).pdfRepr() + b" " + pdfRepr(val)
for key, val in six.iteritems(obj)])
r.append(b">>")
return fill(r)
# Lists.
elif isinstance(obj, (list, tuple)):
r = [b"["]
r.extend([pdfRepr(val) for val in obj])
r.append(b"]")
return fill(r)
# The null keyword.
elif obj is None:
return b'null'
# A date.
elif isinstance(obj, datetime):
r = obj.strftime('D:%Y%m%d%H%M%S')
if time.daylight:
z = time.altzone
else:
z = time.timezone
if z == 0:
r += 'Z'
elif z < 0:
r += "+%02d'%02d'" % ((-z) // 3600, (-z) % 3600)
else:
r += "-%02d'%02d'" % (z // 3600, z % 3600)
return pdfRepr(r)
# A bounding box
elif isinstance(obj, BboxBase):
return fill([pdfRepr(val) for val in obj.bounds])
else:
msg = "Don't know a PDF representation for %s objects." % type(obj)
raise TypeError(msg)
class Reference(object):
"""PDF reference object.
Use PdfFile.reserveObject() to create References.
"""
def __init__(self, id):
self.id = id
def __repr__(self):
return "<Reference %d>" % self.id
def pdfRepr(self):
return ("%d 0 R" % self.id).encode('ascii')
def write(self, contents, file):
write = file.write
write(("%d 0 obj\n" % self.id).encode('ascii'))
write(pdfRepr(contents))
write(b"\nendobj\n")
class Name(object):
"""PDF name object."""
__slots__ = ('name',)
_regex = re.compile(r'[^!-~]')
def __init__(self, name):
if isinstance(name, Name):
self.name = name.name
else:
if isinstance(name, bytes):
name = name.decode('ascii')
self.name = self._regex.sub(Name.hexify, name).encode('ascii')
def __repr__(self):
return "<Name %s>" % self.name
def __str__(self):
return '/' + six.text_type(self.name)
@staticmethod
def hexify(match):
return '#%02x' % ord(match.group())
def pdfRepr(self):
return b'/' + self.name
class Operator(object):
"""PDF operator object."""
__slots__ = ('op',)
def __init__(self, op):
self.op = op
def __repr__(self):
return '<Operator %s>' % self.op
def pdfRepr(self):
return self.op
class Verbatim(object):
"""Store verbatim PDF command content for later inclusion in the
stream."""
def __init__(self, x):
self._x = x
def pdfRepr(self):
return self._x
# PDF operators (not an exhaustive list)
_pdfops = dict(
close_fill_stroke=b'b', fill_stroke=b'B', fill=b'f', closepath=b'h',
close_stroke=b's', stroke=b'S', endpath=b'n', begin_text=b'BT',
end_text=b'ET', curveto=b'c', rectangle=b're', lineto=b'l', moveto=b'm',
concat_matrix=b'cm', use_xobject=b'Do', setgray_stroke=b'G',
setgray_nonstroke=b'g', setrgb_stroke=b'RG', setrgb_nonstroke=b'rg',
setcolorspace_stroke=b'CS', setcolorspace_nonstroke=b'cs',
setcolor_stroke=b'SCN', setcolor_nonstroke=b'scn', setdash=b'd',
setlinejoin=b'j', setlinecap=b'J', setgstate=b'gs', gsave=b'q',
grestore=b'Q', textpos=b'Td', selectfont=b'Tf', textmatrix=b'Tm',
show=b'Tj', showkern=b'TJ', setlinewidth=b'w', clip=b'W', shading=b'sh')
Op = Bunch(**dict([(name, Operator(value))
for name, value in six.iteritems(_pdfops)]))
def _paint_path(fill, stroke):
"""Return the PDF operator to paint a path in the following way:
fill: fill the path with the fill color
stroke: stroke the outline of the path with the line color"""
if stroke:
if fill:
return Op.fill_stroke
else:
return Op.stroke
else:
if fill:
return Op.fill
else:
return Op.endpath
Op.paint_path = _paint_path
class Stream(object):
"""PDF stream object.
This has no pdfRepr method. Instead, call begin(), then output the
contents of the stream by calling write(), and finally call end().
"""
__slots__ = ('id', 'len', 'pdfFile', 'file', 'compressobj', 'extra', 'pos')
def __init__(self, id, len, file, extra=None, png=None):
"""id: object id of stream; len: an unused Reference object for the
length of the stream, or None (to use a memory buffer); file:
a PdfFile; extra: a dictionary of extra key-value pairs to
include in the stream header; png: if the data is already
png compressed, the decode parameters"""
self.id = id # object id
self.len = len # id of length object
self.pdfFile = file
self.file = file.fh # file to which the stream is written
self.compressobj = None # compression object
if extra is None:
self.extra = dict()
else:
self.extra = extra.copy()
if png is not None:
self.extra.update({'Filter': Name('FlateDecode'),
'DecodeParms': png})
self.pdfFile.recordXref(self.id)
if rcParams['pdf.compression'] and not png:
self.compressobj = zlib.compressobj(rcParams['pdf.compression'])
if self.len is None:
self.file = BytesIO()
else:
self._writeHeader()
self.pos = self.file.tell()
def _writeHeader(self):
write = self.file.write
write(("%d 0 obj\n" % self.id).encode('ascii'))
dict = self.extra
dict['Length'] = self.len
if rcParams['pdf.compression']:
dict['Filter'] = Name('FlateDecode')
write(pdfRepr(dict))
write(b"\nstream\n")
def end(self):
"""Finalize stream."""
self._flush()
if self.len is None:
contents = self.file.getvalue()
self.len = len(contents)
self.file = self.pdfFile.fh
self._writeHeader()
self.file.write(contents)
self.file.write(b"\nendstream\nendobj\n")
else:
length = self.file.tell() - self.pos
self.file.write(b"\nendstream\nendobj\n")
self.pdfFile.writeObject(self.len, length)
def write(self, data):
"""Write some data on the stream."""
if self.compressobj is None:
self.file.write(data)
else:
compressed = self.compressobj.compress(data)
self.file.write(compressed)
def _flush(self):
"""Flush the compression object."""
if self.compressobj is not None:
compressed = self.compressobj.flush()
self.file.write(compressed)
self.compressobj = None
class PdfFile(object):
"""PDF file object."""
def __init__(self, filename):
self.nextObject = 1 # next free object id
self.xrefTable = [[0, 65535, 'the zero object']]
self.passed_in_file_object = False
self.original_file_like = None
self.tell_base = 0
if is_string_like(filename):
fh = open(filename, 'wb')
elif is_writable_file_like(filename):
try:
self.tell_base = filename.tell()
except IOError:
fh = BytesIO()
self.original_file_like = filename
else:
fh = filename
self.passed_in_file_object = True
else:
raise ValueError("filename must be a path or a file-like object")
self._core14fontdir = os.path.join(
rcParams['datapath'], 'fonts', 'pdfcorefonts')
self.fh = fh
self.currentstream = None # stream object to write to, if any
fh.write(b"%PDF-1.4\n") # 1.4 is the first version to have alpha
# Output some eight-bit chars as a comment so various utilities
# recognize the file as binary by looking at the first few
# lines (see note in section 3.4.1 of the PDF reference).
fh.write(b"%\254\334 \253\272\n")
self.rootObject = self.reserveObject('root')
self.pagesObject = self.reserveObject('pages')
self.pageList = []
self.fontObject = self.reserveObject('fonts')
self.alphaStateObject = self.reserveObject('extended graphics states')
self.hatchObject = self.reserveObject('tiling patterns')
self.gouraudObject = self.reserveObject('Gouraud triangles')
self.XObjectObject = self.reserveObject('external objects')
self.resourceObject = self.reserveObject('resources')
root = {'Type': Name('Catalog'),
'Pages': self.pagesObject}
self.writeObject(self.rootObject, root)
revision = ''
self.infoDict = {
'Creator': 'matplotlib %s, http://matplotlib.org' % __version__,
'Producer': 'matplotlib pdf backend%s' % revision,
'CreationDate': datetime.today()
}
self.fontNames = {} # maps filenames to internal font names
self.nextFont = 1 # next free internal font name
self.dviFontInfo = {} # information on dvi fonts
# differently encoded Type-1 fonts may share the same descriptor
self.type1Descriptors = {}
self.used_characters = {}
self.alphaStates = {} # maps alpha values to graphics state objects
self.nextAlphaState = 1
self.hatchPatterns = {}
self.nextHatch = 1
self.gouraudTriangles = []
self._images = {}
self.nextImage = 1
self.markers = {}
self.multi_byte_charprocs = {}
self.paths = []
self.pageAnnotations = [] # A list of annotations for the
# current page
# The PDF spec recommends to include every procset
procsets = [Name(x)
for x in "PDF Text ImageB ImageC ImageI".split()]
# Write resource dictionary.
# Possibly TODO: more general ExtGState (graphics state dictionaries)
# ColorSpace Pattern Shading Properties
resources = {'Font': self.fontObject,
'XObject': self.XObjectObject,
'ExtGState': self.alphaStateObject,
'Pattern': self.hatchObject,
'Shading': self.gouraudObject,
'ProcSet': procsets}
self.writeObject(self.resourceObject, resources)
def newPage(self, width, height):
self.endStream()
self.width, self.height = width, height
contentObject = self.reserveObject('page contents')
thePage = {'Type': Name('Page'),
'Parent': self.pagesObject,
'Resources': self.resourceObject,
'MediaBox': [0, 0, 72 * width, 72 * height],
'Contents': contentObject,
'Group': {'Type': Name('Group'),
'S': Name('Transparency'),
'CS': Name('DeviceRGB')},
'Annots': self.pageAnnotations,
}
pageObject = self.reserveObject('page')
self.writeObject(pageObject, thePage)
self.pageList.append(pageObject)
self.beginStream(contentObject.id,
self.reserveObject('length of content stream'))
# Initialize the pdf graphics state to match the default mpl
# graphics context: currently only the join style needs to be set
self.output(GraphicsContextPdf.joinstyles['round'], Op.setlinejoin)
# Clear the list of annotations for the next page
self.pageAnnotations = []
def newTextnote(self, text, positionRect=[-100, -100, 0, 0]):
# Create a new annotation of type text
theNote = {'Type': Name('Annot'),
'Subtype': Name('Text'),
'Contents': text,
'Rect': positionRect,
}
annotObject = self.reserveObject('annotation')
self.writeObject(annotObject, theNote)
self.pageAnnotations.append(annotObject)
def close(self):
self.endStream()
# Write out the various deferred objects
self.writeFonts()
self.writeObject(self.alphaStateObject,
dict([(val[0], val[1])
for val in six.itervalues(self.alphaStates)]))
self.writeHatches()
self.writeGouraudTriangles()
xobjects = dict(x[1:] for x in six.itervalues(self._images))
for tup in six.itervalues(self.markers):
xobjects[tup[0]] = tup[1]
for name, value in six.iteritems(self.multi_byte_charprocs):
xobjects[name] = value
for name, path, trans, ob, join, cap, padding, filled, stroked \
in self.paths:
xobjects[name] = ob
self.writeObject(self.XObjectObject, xobjects)
self.writeImages()
self.writeMarkers()
self.writePathCollectionTemplates()
self.writeObject(self.pagesObject,
{'Type': Name('Pages'),
'Kids': self.pageList,
'Count': len(self.pageList)})
self.writeInfoDict()
# Finalize the file
self.writeXref()
self.writeTrailer()
if self.passed_in_file_object:
self.fh.flush()
elif self.original_file_like is not None:
self.original_file_like.write(self.fh.getvalue())
self.fh.close()
else:
self.fh.close()
def write(self, data):
if self.currentstream is None:
self.fh.write(data)
else:
self.currentstream.write(data)
def output(self, *data):
self.write(fill([pdfRepr(x) for x in data]))
self.write(b'\n')
def beginStream(self, id, len, extra=None, png=None):
assert self.currentstream is None
self.currentstream = Stream(id, len, self, extra, png)
def endStream(self):
if self.currentstream is not None:
self.currentstream.end()
self.currentstream = None
def fontName(self, fontprop):
"""
Select a font based on fontprop and return a name suitable for
Op.selectfont. If fontprop is a string, it will be interpreted
as the filename (or dvi name) of the font.
"""
if is_string_like(fontprop):
filename = fontprop
elif rcParams['pdf.use14corefonts']:
filename = findfont(
fontprop, fontext='afm', directory=self._core14fontdir)
if filename is None:
filename = findfont(
"Helvetica", fontext='afm', directory=self._core14fontdir)
else:
filename = findfont(fontprop)
Fx = self.fontNames.get(filename)
if Fx is None:
Fx = Name('F%d' % self.nextFont)
self.fontNames[filename] = Fx
self.nextFont += 1
matplotlib.verbose.report(
'Assigning font %s = %r' % (Fx, filename),
'debug')
return Fx
def writeFonts(self):
fonts = {}
for filename, Fx in six.iteritems(self.fontNames):
matplotlib.verbose.report('Embedding font %s' % filename, 'debug')
if filename.endswith('.afm'):
# from pdf.use14corefonts
matplotlib.verbose.report('Writing AFM font', 'debug')
fonts[Fx] = self._write_afm_font(filename)
elif filename in self.dviFontInfo:
# a Type 1 font from a dvi file;
# the filename is really the TeX name
matplotlib.verbose.report('Writing Type-1 font', 'debug')
fonts[Fx] = self.embedTeXFont(filename,
self.dviFontInfo[filename])
else:
# a normal TrueType font
matplotlib.verbose.report('Writing TrueType font', 'debug')
realpath, stat_key = get_realpath_and_stat(filename)
chars = self.used_characters.get(stat_key)
if chars is not None and len(chars[1]):
fonts[Fx] = self.embedTTF(realpath, chars[1])
self.writeObject(self.fontObject, fonts)
def _write_afm_font(self, filename):
with open(filename, 'rb') as fh:
font = AFM(fh)
fontname = font.get_fontname()
fontdict = {'Type': Name('Font'),
'Subtype': Name('Type1'),
'BaseFont': Name(fontname),
'Encoding': Name('WinAnsiEncoding')}
fontdictObject = self.reserveObject('font dictionary')
self.writeObject(fontdictObject, fontdict)
return fontdictObject
def embedTeXFont(self, texname, fontinfo):
msg = ('Embedding TeX font ' + texname + ' - fontinfo=' +
repr(fontinfo.__dict__))
matplotlib.verbose.report(msg, 'debug')
# Widths
widthsObject = self.reserveObject('font widths')
self.writeObject(widthsObject, fontinfo.dvifont.widths)
# Font dictionary
fontdictObject = self.reserveObject('font dictionary')
fontdict = {
'Type': Name('Font'),
'Subtype': Name('Type1'),
'FirstChar': 0,
'LastChar': len(fontinfo.dvifont.widths) - 1,
'Widths': widthsObject,
}
# Encoding (if needed)
if fontinfo.encodingfile is not None:
enc = dviread.Encoding(fontinfo.encodingfile)
differencesArray = [Name(ch) for ch in enc]
differencesArray = [0] + differencesArray
fontdict['Encoding'] = \
{'Type': Name('Encoding'),
'Differences': differencesArray}
# If no file is specified, stop short
if fontinfo.fontfile is None:
msg = ('Because of TeX configuration (pdftex.map, see updmap '
'option pdftexDownloadBase14) the font {0} is not '
'embedded. This is deprecated as of PDF 1.5 and it may '
'cause the consumer application to show something that '
'was not intended.').format(fontinfo.basefont)
warnings.warn(msg)
fontdict['BaseFont'] = Name(fontinfo.basefont)
self.writeObject(fontdictObject, fontdict)
return fontdictObject
# We have a font file to embed - read it in and apply any effects
t1font = type1font.Type1Font(fontinfo.fontfile)
if fontinfo.effects:
t1font = t1font.transform(fontinfo.effects)
fontdict['BaseFont'] = Name(t1font.prop['FontName'])
# Font descriptors may be shared between differently encoded
# Type-1 fonts, so only create a new descriptor if there is no
# existing descriptor for this font.
effects = (fontinfo.effects.get('slant', 0.0),
fontinfo.effects.get('extend', 1.0))
fontdesc = self.type1Descriptors.get((fontinfo.fontfile, effects))
if fontdesc is None:
fontdesc = self.createType1Descriptor(t1font, fontinfo.fontfile)
self.type1Descriptors[(fontinfo.fontfile, effects)] = fontdesc
fontdict['FontDescriptor'] = fontdesc
self.writeObject(fontdictObject, fontdict)
return fontdictObject
def createType1Descriptor(self, t1font, fontfile):
# Create and write the font descriptor and the font file
# of a Type-1 font
fontdescObject = self.reserveObject('font descriptor')
fontfileObject = self.reserveObject('font file')
italic_angle = t1font.prop['ItalicAngle']
fixed_pitch = t1font.prop['isFixedPitch']
flags = 0
# fixed width
if fixed_pitch:
flags |= 1 << 0
# TODO: serif
if 0:
flags |= 1 << 1
# TODO: symbolic (most TeX fonts are)
if 1:
flags |= 1 << 2
# non-symbolic
else:
flags |= 1 << 5
# italic
if italic_angle:
flags |= 1 << 6
# TODO: all caps
if 0:
flags |= 1 << 16
# TODO: small caps
if 0:
flags |= 1 << 17
# TODO: force bold
if 0:
flags |= 1 << 18
ft2font = get_font(fontfile)
descriptor = {
'Type': Name('FontDescriptor'),
'FontName': Name(t1font.prop['FontName']),
'Flags': flags,
'FontBBox': ft2font.bbox,
'ItalicAngle': italic_angle,
'Ascent': ft2font.ascender,
'Descent': ft2font.descender,
'CapHeight': 1000, # TODO: find this out
'XHeight': 500, # TODO: this one too
'FontFile': fontfileObject,
'FontFamily': t1font.prop['FamilyName'],
'StemV': 50, # TODO
# (see also revision 3874; but not all TeX distros have AFM files!)
# 'FontWeight': a number where 400 = Regular, 700 = Bold
}
self.writeObject(fontdescObject, descriptor)
self.beginStream(fontfileObject.id, None,
{'Length1': len(t1font.parts[0]),
'Length2': len(t1font.parts[1]),
'Length3': 0})
self.currentstream.write(t1font.parts[0])
self.currentstream.write(t1font.parts[1])
self.endStream()
return fontdescObject
def _get_xobject_symbol_name(self, filename, symbol_name):
return "%s-%s" % (
os.path.splitext(os.path.basename(filename))[0],
symbol_name)
_identityToUnicodeCMap = """/CIDInit /ProcSet findresource begin
12 dict begin
begincmap
/CIDSystemInfo
<< /Registry (Adobe)
/Ordering (UCS)
/Supplement 0
>> def
/CMapName /Adobe-Identity-UCS def
/CMapType 2 def
1 begincodespacerange
<0000> <ffff>
endcodespacerange
%d beginbfrange
%s
endbfrange
endcmap
CMapName currentdict /CMap defineresource pop
end
end"""
def embedTTF(self, filename, characters):
"""Embed the TTF font from the named file into the document."""
font = get_font(filename)
fonttype = rcParams['pdf.fonttype']
def cvt(length, upe=font.units_per_EM, nearest=True):
"Convert font coordinates to PDF glyph coordinates"
value = length / upe * 1000
if nearest:
return np.round(value)
# Perhaps best to round away from zero for bounding
# boxes and the like
if value < 0:
return floor(value)
else:
return ceil(value)
def embedTTFType3(font, characters, descriptor):
"""The Type 3-specific part of embedding a Truetype font"""
widthsObject = self.reserveObject('font widths')
fontdescObject = self.reserveObject('font descriptor')
fontdictObject = self.reserveObject('font dictionary')
charprocsObject = self.reserveObject('character procs')
differencesArray = []
firstchar, lastchar = 0, 255
bbox = [cvt(x, nearest=False) for x in font.bbox]
fontdict = {
'Type': Name('Font'),
'BaseFont': ps_name,
'FirstChar': firstchar,
'LastChar': lastchar,
'FontDescriptor': fontdescObject,
'Subtype': Name('Type3'),
'Name': descriptor['FontName'],
'FontBBox': bbox,
'FontMatrix': [.001, 0, 0, .001, 0, 0],
'CharProcs': charprocsObject,
'Encoding': {
'Type': Name('Encoding'),
'Differences': differencesArray},
'Widths': widthsObject
}
# Make the "Widths" array
from encodings import cp1252
# The "decoding_map" was changed
# to a "decoding_table" as of Python 2.5.
if hasattr(cp1252, 'decoding_map'):
def decode_char(charcode):
return cp1252.decoding_map[charcode] or 0
else:
def decode_char(charcode):
return ord(cp1252.decoding_table[charcode])
def get_char_width(charcode):
s = decode_char(charcode)
width = font.load_char(
s, flags=LOAD_NO_SCALE | LOAD_NO_HINTING).horiAdvance
return cvt(width)
widths = [get_char_width(charcode)
for charcode in range(firstchar, lastchar+1)]
descriptor['MaxWidth'] = max(widths)
# Make the "Differences" array, sort the ccodes < 255 from
# the multi-byte ccodes, and build the whole set of glyph ids
# that we need from this font.
glyph_ids = []
differences = []
multi_byte_chars = set()
for c in characters:
ccode = c
gind = font.get_char_index(ccode)
glyph_ids.append(gind)
glyph_name = font.get_glyph_name(gind)
if ccode <= 255:
differences.append((ccode, glyph_name))
else:
multi_byte_chars.add(glyph_name)
differences.sort()
last_c = -2
for c, name in differences:
if c != last_c + 1:
differencesArray.append(c)
differencesArray.append(Name(name))
last_c = c
# Make the charprocs array (using ttconv to generate the
# actual outlines)
rawcharprocs = ttconv.get_pdf_charprocs(
filename.encode(sys.getfilesystemencoding()), glyph_ids)
charprocs = {}
for charname, stream in six.iteritems(rawcharprocs):
charprocDict = {'Length': len(stream)}
# The 2-byte characters are used as XObjects, so they
# need extra info in their dictionary
if charname in multi_byte_chars:
charprocDict['Type'] = Name('XObject')
charprocDict['Subtype'] = Name('Form')
charprocDict['BBox'] = bbox
# Each glyph includes bounding box information,
# but xpdf and ghostscript can't handle it in a
# Form XObject (they segfault!!!), so we remove it
# from the stream here. It's not needed anyway,
# since the Form XObject includes it in its BBox
# value.
stream = stream[stream.find(b"d1") + 2:]
charprocObject = self.reserveObject('charProc')
self.beginStream(charprocObject.id, None, charprocDict)
self.currentstream.write(stream)
self.endStream()
# Send the glyphs with ccode > 255 to the XObject dictionary,
# and the others to the font itself
if charname in multi_byte_chars:
name = self._get_xobject_symbol_name(filename, charname)
self.multi_byte_charprocs[name] = charprocObject
else:
charprocs[charname] = charprocObject
# Write everything out
self.writeObject(fontdictObject, fontdict)
self.writeObject(fontdescObject, descriptor)
self.writeObject(widthsObject, widths)
self.writeObject(charprocsObject, charprocs)
return fontdictObject
def embedTTFType42(font, characters, descriptor):
"""The Type 42-specific part of embedding a Truetype font"""
fontdescObject = self.reserveObject('font descriptor')
cidFontDictObject = self.reserveObject('CID font dictionary')
type0FontDictObject = self.reserveObject('Type 0 font dictionary')
cidToGidMapObject = self.reserveObject('CIDToGIDMap stream')
fontfileObject = self.reserveObject('font file stream')
wObject = self.reserveObject('Type 0 widths')
toUnicodeMapObject = self.reserveObject('ToUnicode map')
cidFontDict = {
'Type': Name('Font'),
'Subtype': Name('CIDFontType2'),
'BaseFont': ps_name,
'CIDSystemInfo': {
'Registry': 'Adobe',
'Ordering': 'Identity',
'Supplement': 0},
'FontDescriptor': fontdescObject,
'W': wObject,
'CIDToGIDMap': cidToGidMapObject
}
type0FontDict = {
'Type': Name('Font'),
'Subtype': Name('Type0'),
'BaseFont': ps_name,
'Encoding': Name('Identity-H'),
'DescendantFonts': [cidFontDictObject],
'ToUnicode': toUnicodeMapObject
}
# Make fontfile stream
descriptor['FontFile2'] = fontfileObject
length1Object = self.reserveObject('decoded length of a font')
self.beginStream(
fontfileObject.id,
self.reserveObject('length of font stream'),
{'Length1': length1Object})
with open(filename, 'rb') as fontfile:
length1 = 0
while True:
data = fontfile.read(4096)
if not data:
break
length1 += len(data)
self.currentstream.write(data)
self.endStream()
self.writeObject(length1Object, length1)
# Make the 'W' (Widths) array, CidToGidMap and ToUnicode CMap
# at the same time
cid_to_gid_map = ['\u0000'] * 65536
widths = []
max_ccode = 0
for c in characters:
ccode = c
gind = font.get_char_index(ccode)
glyph = font.load_char(ccode, flags=LOAD_NO_HINTING)
widths.append((ccode, glyph.horiAdvance / 6))
if ccode < 65536:
cid_to_gid_map[ccode] = unichr(gind)
max_ccode = max(ccode, max_ccode)
widths.sort()
cid_to_gid_map = cid_to_gid_map[:max_ccode + 1]
last_ccode = -2
w = []
max_width = 0
unicode_groups = []
for ccode, width in widths:
if ccode != last_ccode + 1:
w.append(ccode)
w.append([width])
unicode_groups.append([ccode, ccode])
else:
w[-1].append(width)
unicode_groups[-1][1] = ccode
max_width = max(max_width, width)
last_ccode = ccode
unicode_bfrange = []
for start, end in unicode_groups:
unicode_bfrange.append(
"<%04x> <%04x> [%s]" %
(start, end,
" ".join(["<%04x>" % x for x in range(start, end+1)])))
unicode_cmap = (self._identityToUnicodeCMap %
(len(unicode_groups),
"\n".join(unicode_bfrange))).encode('ascii')
# CIDToGIDMap stream
cid_to_gid_map = "".join(cid_to_gid_map).encode("utf-16be")
self.beginStream(cidToGidMapObject.id,
None,
{'Length': len(cid_to_gid_map)})
self.currentstream.write(cid_to_gid_map)
self.endStream()
# ToUnicode CMap
self.beginStream(toUnicodeMapObject.id,
None,
{'Length': unicode_cmap})
self.currentstream.write(unicode_cmap)
self.endStream()
descriptor['MaxWidth'] = max_width
# Write everything out
self.writeObject(cidFontDictObject, cidFontDict)
self.writeObject(type0FontDictObject, type0FontDict)
self.writeObject(fontdescObject, descriptor)
self.writeObject(wObject, w)
return type0FontDictObject
# Beginning of main embedTTF function...
# You are lost in a maze of TrueType tables, all different...
sfnt = font.get_sfnt()
try:
ps_name = sfnt[(1, 0, 0, 6)].decode('macroman') # Macintosh scheme
except KeyError:
# Microsoft scheme:
ps_name = sfnt[(3, 1, 0x0409, 6)].decode('utf-16be')
# (see freetype/ttnameid.h)
ps_name = ps_name.encode('ascii', 'replace')
ps_name = Name(ps_name)
pclt = font.get_sfnt_table('pclt') or {'capHeight': 0, 'xHeight': 0}
post = font.get_sfnt_table('post') or {'italicAngle': (0, 0)}
ff = font.face_flags
sf = font.style_flags
flags = 0
symbolic = False # ps_name.name in ('Cmsy10', 'Cmmi10', 'Cmex10')
if ff & FIXED_WIDTH:
flags |= 1 << 0
if 0: # TODO: serif
flags |= 1 << 1
if symbolic:
flags |= 1 << 2
else:
flags |= 1 << 5
if sf & ITALIC:
flags |= 1 << 6
if 0: # TODO: all caps
flags |= 1 << 16
if 0: # TODO: small caps
flags |= 1 << 17
if 0: # TODO: force bold
flags |= 1 << 18
descriptor = {
'Type': Name('FontDescriptor'),
'FontName': ps_name,
'Flags': flags,
'FontBBox': [cvt(x, nearest=False) for x in font.bbox],
'Ascent': cvt(font.ascender, nearest=False),
'Descent': cvt(font.descender, nearest=False),
'CapHeight': cvt(pclt['capHeight'], nearest=False),
'XHeight': cvt(pclt['xHeight']),
'ItalicAngle': post['italicAngle'][1], # ???
'StemV': 0 # ???
}
# The font subsetting to a Type 3 font does not work for
# OpenType (.otf) that embed a Postscript CFF font, so avoid that --
# save as a (non-subsetted) Type 42 font instead.
if is_opentype_cff_font(filename):
fonttype = 42
msg = ("'%s' can not be subsetted into a Type 3 font. "
"The entire font will be embedded in the output.")
warnings.warn(msg % os.path.basename(filename))
if fonttype == 3:
return embedTTFType3(font, characters, descriptor)
elif fonttype == 42:
return embedTTFType42(font, characters, descriptor)
def alphaState(self, alpha):
"""Return name of an ExtGState that sets alpha to the given value"""
state = self.alphaStates.get(alpha, None)
if state is not None:
return state[0]
name = Name('A%d' % self.nextAlphaState)
self.nextAlphaState += 1
self.alphaStates[alpha] = \
(name, {'Type': Name('ExtGState'),
'CA': alpha[0], 'ca': alpha[1]})
return name
def hatchPattern(self, hatch_style):
# The colors may come in as numpy arrays, which aren't hashable
if hatch_style is not None:
edge, face, hatch = hatch_style
if edge is not None:
edge = tuple(edge)
if face is not None:
face = tuple(face)
hatch_style = (edge, face, hatch)
pattern = self.hatchPatterns.get(hatch_style, None)
if pattern is not None:
return pattern
name = Name('H%d' % self.nextHatch)
self.nextHatch += 1
self.hatchPatterns[hatch_style] = name
return name
def writeHatches(self):
hatchDict = dict()
sidelen = 72.0
for hatch_style, name in six.iteritems(self.hatchPatterns):
ob = self.reserveObject('hatch pattern')
hatchDict[name] = ob
res = {'Procsets':
[Name(x) for x in "PDF Text ImageB ImageC ImageI".split()]}
self.beginStream(
ob.id, None,
{'Type': Name('Pattern'),
'PatternType': 1, 'PaintType': 1, 'TilingType': 1,
'BBox': [0, 0, sidelen, sidelen],
'XStep': sidelen, 'YStep': sidelen,
'Resources': res,
# Change origin to match Agg at top-left.
'Matrix': [1, 0, 0, 1, 0, self.height * 72]})
stroke_rgb, fill_rgb, path = hatch_style
self.output(stroke_rgb[0], stroke_rgb[1], stroke_rgb[2],
Op.setrgb_stroke)
if fill_rgb is not None:
self.output(fill_rgb[0], fill_rgb[1], fill_rgb[2],
Op.setrgb_nonstroke,
0, 0, sidelen, sidelen, Op.rectangle,
Op.fill)
self.output(rcParams['hatch.linewidth'], Op.setlinewidth)
self.output(*self.pathOperations(
Path.hatch(path),
Affine2D().scale(sidelen),
simplify=False))
self.output(Op.fill_stroke)
self.endStream()
self.writeObject(self.hatchObject, hatchDict)
def addGouraudTriangles(self, points, colors):
name = Name('GT%d' % len(self.gouraudTriangles))
self.gouraudTriangles.append((name, points, colors))
return name
def writeGouraudTriangles(self):
gouraudDict = dict()
for name, points, colors in self.gouraudTriangles:
ob = self.reserveObject('Gouraud triangle')
gouraudDict[name] = ob
shape = points.shape
flat_points = points.reshape((shape[0] * shape[1], 2))
flat_colors = colors.reshape((shape[0] * shape[1], 4))
points_min = np.min(flat_points, axis=0) - (1 << 8)
points_max = np.max(flat_points, axis=0) + (1 << 8)
factor = float(0xffffffff) / (points_max - points_min)
self.beginStream(
ob.id, None,
{'ShadingType': 4,
'BitsPerCoordinate': 32,
'BitsPerComponent': 8,
'BitsPerFlag': 8,
'ColorSpace': Name('DeviceRGB'),
'AntiAlias': True,
'Decode': [points_min[0], points_max[0],
points_min[1], points_max[1],
0, 1, 0, 1, 0, 1]
})
streamarr = np.empty(
(shape[0] * shape[1],),
dtype=[(str('flags'), str('u1')),
(str('points'), str('>u4'), (2,)),
(str('colors'), str('u1'), (3,))])
streamarr['flags'] = 0
streamarr['points'] = (flat_points - points_min) * factor
streamarr['colors'] = flat_colors[:, :3] * 255.0
self.write(streamarr.tostring())
self.endStream()
self.writeObject(self.gouraudObject, gouraudDict)
def imageObject(self, image):
"""Return name of an image XObject representing the given image."""
entry = self._images.get(id(image), None)
if entry is not None:
return entry[1]
name = Name('I%d' % self.nextImage)
ob = self.reserveObject('image %d' % self.nextImage)
self.nextImage += 1
self._images[id(image)] = (image, name, ob)
return name
def _unpack(self, im):
"""
Unpack the image object im into height, width, data, alpha,
where data and alpha are HxWx3 (RGB) or HxWx1 (grayscale or alpha)
arrays, except alpha is None if the image is fully opaque.
"""
h, w = im.shape[:2]
im = im[::-1]
if im.ndim == 2:
return h, w, im, None
else:
rgb = im[:, :, :3]
rgb = np.array(rgb, order='C')
# PDF needs a separate alpha image
if im.shape[2] == 4:
alpha = im[:, :, 3][..., None]
if np.all(alpha == 255):
alpha = None
else:
alpha = np.array(alpha, order='C')
else:
alpha = None
return h, w, rgb, alpha
def _writePng(self, data):
"""
Write the image *data* into the pdf file using png
predictors with Flate compression.
"""
buffer = BytesIO()
_png.write_png(data, buffer)
buffer.seek(8)
written = 0
header = bytearray(8)
while True:
n = buffer.readinto(header)
assert n == 8
length, type = struct.unpack(b'!L4s', bytes(header))
if type == b'IDAT':
data = bytearray(length)
n = buffer.readinto(data)
assert n == length
self.currentstream.write(bytes(data))
written += n
elif type == b'IEND':
break
else:
buffer.seek(length, 1)
buffer.seek(4, 1) # skip CRC
def _writeImg(self, data, height, width, grayscale, id, smask=None):
"""
Write the image *data* of size *height* x *width*, as grayscale
if *grayscale* is true and RGB otherwise, as pdf object *id*
and with the soft mask (alpha channel) *smask*, which should be
either None or a *height* x *width* x 1 array.
"""
obj = {'Type': Name('XObject'),
'Subtype': Name('Image'),
'Width': width,
'Height': height,
'ColorSpace': Name('DeviceGray' if grayscale
else 'DeviceRGB'),
'BitsPerComponent': 8}
if smask:
obj['SMask'] = smask
if rcParams['pdf.compression']:
png = {'Predictor': 10,
'Colors': 1 if grayscale else 3,
'Columns': width}
else:
png = None
self.beginStream(
id,
self.reserveObject('length of image stream'),
obj,
png=png
)
if png:
self._writePng(data)
else:
self.currentstream.write(data.tostring())
self.endStream()
def writeImages(self):
for img, name, ob in six.itervalues(self._images):
height, width, data, adata = self._unpack(img)
if adata is not None:
smaskObject = self.reserveObject("smask")
self._writeImg(adata, height, width, True, smaskObject.id)
else:
smaskObject = None
self._writeImg(data, height, width, False,
ob.id, smaskObject)
def markerObject(self, path, trans, fill, stroke, lw, joinstyle,
capstyle):
"""Return name of a marker XObject representing the given path."""
# self.markers used by markerObject, writeMarkers, close:
# mapping from (path operations, fill?, stroke?) to
# [name, object reference, bounding box, linewidth]
# This enables different draw_markers calls to share the XObject
# if the gc is sufficiently similar: colors etc can vary, but
# the choices of whether to fill and whether to stroke cannot.
# We need a bounding box enclosing all of the XObject path,
# but since line width may vary, we store the maximum of all
# occurring line widths in self.markers.
# close() is somewhat tightly coupled in that it expects the
# first two components of each value in self.markers to be the
# name and object reference.
pathops = self.pathOperations(path, trans, simplify=False)
key = (tuple(pathops), bool(fill), bool(stroke), joinstyle, capstyle)
result = self.markers.get(key)
if result is None:
name = Name('M%d' % len(self.markers))
ob = self.reserveObject('marker %d' % len(self.markers))
bbox = path.get_extents(trans)
self.markers[key] = [name, ob, bbox, lw]
else:
if result[-1] < lw:
result[-1] = lw
name = result[0]
return name
def writeMarkers(self):
for ((pathops, fill, stroke, joinstyle, capstyle),
(name, ob, bbox, lw)) in six.iteritems(self.markers):
bbox = bbox.padded(lw * 0.5)
self.beginStream(
ob.id, None,
{'Type': Name('XObject'), 'Subtype': Name('Form'),
'BBox': list(bbox.extents)})
self.output(GraphicsContextPdf.joinstyles[joinstyle],
Op.setlinejoin)
self.output(GraphicsContextPdf.capstyles[capstyle], Op.setlinecap)
self.output(*pathops)
self.output(Op.paint_path(fill, stroke))
self.endStream()
def pathCollectionObject(self, gc, path, trans, padding, filled, stroked):
name = Name('P%d' % len(self.paths))
ob = self.reserveObject('path %d' % len(self.paths))
self.paths.append(
(name, path, trans, ob, gc.get_joinstyle(), gc.get_capstyle(),
padding, filled, stroked))
return name
def writePathCollectionTemplates(self):
for (name, path, trans, ob, joinstyle, capstyle, padding, filled,
stroked) in self.paths:
pathops = self.pathOperations(path, trans, simplify=False)
bbox = path.get_extents(trans)
if not np.all(np.isfinite(bbox.extents)):
extents = [0, 0, 0, 0]
else:
bbox = bbox.padded(padding)
extents = list(bbox.extents)
self.beginStream(
ob.id, None,
{'Type': Name('XObject'), 'Subtype': Name('Form'),
'BBox': extents})
self.output(GraphicsContextPdf.joinstyles[joinstyle],
Op.setlinejoin)
self.output(GraphicsContextPdf.capstyles[capstyle], Op.setlinecap)
self.output(*pathops)
self.output(Op.paint_path(filled, stroked))
self.endStream()
@staticmethod
def pathOperations(path, transform, clip=None, simplify=None, sketch=None):
return [Verbatim(_path.convert_to_string(
path, transform, clip, simplify, sketch,
6,
[Op.moveto.op, Op.lineto.op, b'', Op.curveto.op, Op.closepath.op],
True))]
def writePath(self, path, transform, clip=False, sketch=None):
if clip:
clip = (0.0, 0.0, self.width * 72, self.height * 72)
simplify = path.should_simplify
else:
clip = None
simplify = False
cmds = self.pathOperations(path, transform, clip, simplify=simplify,
sketch=sketch)
self.output(*cmds)
def reserveObject(self, name=''):
"""Reserve an ID for an indirect object.
The name is used for debugging in case we forget to print out
the object with writeObject.
"""
id = self.nextObject
self.nextObject += 1
self.xrefTable.append([None, 0, name])
return Reference(id)
def recordXref(self, id):
self.xrefTable[id][0] = self.fh.tell() - self.tell_base
def writeObject(self, object, contents):
self.recordXref(object.id)
object.write(contents, self)
def writeXref(self):
"""Write out the xref table."""
self.startxref = self.fh.tell() - self.tell_base
self.write(("xref\n0 %d\n" % self.nextObject).encode('ascii'))
i = 0
borken = False
for offset, generation, name in self.xrefTable:
if offset is None:
print('No offset for object %d (%s)' % (i, name),
file=sys.stderr)
borken = True
else:
if name == 'the zero object':
key = "f"
else:
key = "n"
text = "%010d %05d %s \n" % (offset, generation, key)
self.write(text.encode('ascii'))
i += 1
if borken:
raise AssertionError('Indirect object does not exist')
def writeInfoDict(self):
"""Write out the info dictionary, checking it for good form"""
def is_date(x):
return isinstance(x, datetime)
check_trapped = (lambda x: isinstance(x, Name) and
x.name in ('True', 'False', 'Unknown'))
keywords = {'Title': is_string_like,
'Author': is_string_like,
'Subject': is_string_like,
'Keywords': is_string_like,
'Creator': is_string_like,
'Producer': is_string_like,
'CreationDate': is_date,
'ModDate': is_date,
'Trapped': check_trapped}
for k in six.iterkeys(self.infoDict):
if k not in keywords:
warnings.warn('Unknown infodict keyword: %s' % k)
else:
if not keywords[k](self.infoDict[k]):
warnings.warn('Bad value for infodict keyword %s' % k)
self.infoObject = self.reserveObject('info')
self.writeObject(self.infoObject, self.infoDict)
def writeTrailer(self):
"""Write out the PDF trailer."""
self.write(b"trailer\n")
self.write(pdfRepr(
{'Size': self.nextObject,
'Root': self.rootObject,
'Info': self.infoObject}))
# Could add 'ID'
self.write(("\nstartxref\n%d\n%%%%EOF\n" %
self.startxref).encode('ascii'))
class RendererPdf(RendererBase):
afm_font_cache = maxdict(50)
def __init__(self, file, image_dpi, height, width):
RendererBase.__init__(self)
self.height = height
self.width = width
self.file = file
self.gc = self.new_gc()
self.mathtext_parser = MathTextParser("Pdf")
self.image_dpi = image_dpi
self.tex_font_map = None
def finalize(self):
self.file.output(*self.gc.finalize())
def check_gc(self, gc, fillcolor=None):
orig_fill = getattr(gc, '_fillcolor', (0., 0., 0.))
gc._fillcolor = fillcolor
orig_alphas = getattr(gc, '_effective_alphas', (1.0, 1.0))
if gc._forced_alpha:
gc._effective_alphas = (gc._alpha, gc._alpha)
elif fillcolor is None or len(fillcolor) < 4:
gc._effective_alphas = (gc._rgb[3], 1.0)
else:
gc._effective_alphas = (gc._rgb[3], fillcolor[3])
delta = self.gc.delta(gc)
if delta:
self.file.output(*delta)
# Restore gc to avoid unwanted side effects
gc._fillcolor = orig_fill
gc._effective_alphas = orig_alphas
def tex_font_mapping(self, texfont):
if self.tex_font_map is None:
self.tex_font_map = \
dviread.PsfontsMap(dviread.find_tex_file('pdftex.map'))
return self.tex_font_map[texfont]
def track_characters(self, font, s):
"""Keeps track of which characters are required from
each font."""
if isinstance(font, six.string_types):
fname = font
else:
fname = font.fname
realpath, stat_key = get_realpath_and_stat(fname)
used_characters = self.file.used_characters.setdefault(
stat_key, (realpath, set()))
used_characters[1].update([ord(x) for x in s])
def merge_used_characters(self, other):
for stat_key, (realpath, charset) in six.iteritems(other):
used_characters = self.file.used_characters.setdefault(
stat_key, (realpath, set()))
used_characters[1].update(charset)
def get_image_magnification(self):
return self.image_dpi/72.0
def option_scale_image(self):
"""
pdf backend support arbitrary scaling of image.
"""
return True
def option_image_nocomposite(self):
"""
return whether to generate a composite image from multiple images on
a set of axes
"""
return not rcParams['image.composite_image']
def draw_image(self, gc, x, y, im, transform=None):
h, w = im.shape[:2]
if w == 0 or h == 0:
return
if transform is None:
# If there's no transform, alpha has already been applied
gc.set_alpha(1.0)
self.check_gc(gc)
w = 72.0 * w / self.image_dpi
h = 72.0 * h / self.image_dpi
imob = self.file.imageObject(im)
if transform is None:
self.file.output(Op.gsave,
w, 0, 0, h, x, y, Op.concat_matrix,
imob, Op.use_xobject, Op.grestore)
else:
tr1, tr2, tr3, tr4, tr5, tr6 = transform.frozen().to_values()
self.file.output(Op.gsave,
1, 0, 0, 1, x, y, Op.concat_matrix,
tr1, tr2, tr3, tr4, tr5, tr6, Op.concat_matrix,
imob, Op.use_xobject, Op.grestore)
def draw_path(self, gc, path, transform, rgbFace=None):
self.check_gc(gc, rgbFace)
self.file.writePath(
path, transform,
rgbFace is None and gc.get_hatch_path() is None,
gc.get_sketch_params())
self.file.output(self.gc.paint())
def draw_path_collection(self, gc, master_transform, paths, all_transforms,
offsets, offsetTrans, facecolors, edgecolors,
linewidths, linestyles, antialiaseds, urls,
offset_position):
# We can only reuse the objects if the presence of fill and
# stroke (and the amount of alpha for each) is the same for
# all of them
can_do_optimization = True
facecolors = np.asarray(facecolors)
edgecolors = np.asarray(edgecolors)
if not len(facecolors):
filled = False
can_do_optimization = not gc.get_hatch()
else:
if np.all(facecolors[:, 3] == facecolors[0, 3]):
filled = facecolors[0, 3] != 0.0
else:
can_do_optimization = False
if not len(edgecolors):
stroked = False
else:
if np.all(np.asarray(linewidths) == 0.0):
stroked = False
elif np.all(edgecolors[:, 3] == edgecolors[0, 3]):
stroked = edgecolors[0, 3] != 0.0
else:
can_do_optimization = False
# Is the optimization worth it? Rough calculation:
# cost of emitting a path in-line is len_path * uses_per_path
# cost of XObject is len_path + 5 for the definition,
# uses_per_path for the uses
len_path = len(paths[0].vertices) if len(paths) > 0 else 0
uses_per_path = self._iter_collection_uses_per_path(
paths, all_transforms, offsets, facecolors, edgecolors)
should_do_optimization = \
len_path + uses_per_path + 5 < len_path * uses_per_path
if (not can_do_optimization) or (not should_do_optimization):
return RendererBase.draw_path_collection(
self, gc, master_transform, paths, all_transforms,
offsets, offsetTrans, facecolors, edgecolors,
linewidths, linestyles, antialiaseds, urls,
offset_position)
padding = np.max(linewidths)
path_codes = []
for i, (path, transform) in enumerate(self._iter_collection_raw_paths(
master_transform, paths, all_transforms)):
name = self.file.pathCollectionObject(
gc, path, transform, padding, filled, stroked)
path_codes.append(name)
output = self.file.output
output(*self.gc.push())
lastx, lasty = 0, 0
for xo, yo, path_id, gc0, rgbFace in self._iter_collection(
gc, master_transform, all_transforms, path_codes, offsets,
offsetTrans, facecolors, edgecolors, linewidths, linestyles,
antialiaseds, urls, offset_position):
self.check_gc(gc0, rgbFace)
dx, dy = xo - lastx, yo - lasty
output(1, 0, 0, 1, dx, dy, Op.concat_matrix, path_id,
Op.use_xobject)
lastx, lasty = xo, yo
output(*self.gc.pop())
def draw_markers(self, gc, marker_path, marker_trans, path, trans,
rgbFace=None):
# Same logic as in draw_path_collection
len_marker_path = len(marker_path)
uses = len(path)
if len_marker_path * uses < len_marker_path + uses + 5:
RendererBase.draw_markers(self, gc, marker_path, marker_trans,
path, trans, rgbFace)
return
self.check_gc(gc, rgbFace)
fill = gc.fill(rgbFace)
stroke = gc.stroke()
output = self.file.output
marker = self.file.markerObject(
marker_path, marker_trans, fill, stroke, self.gc._linewidth,
gc.get_joinstyle(), gc.get_capstyle())
output(Op.gsave)
lastx, lasty = 0, 0
for vertices, code in path.iter_segments(
trans,
clip=(0, 0, self.file.width*72, self.file.height*72),
simplify=False):
if len(vertices):
x, y = vertices[-2:]
if (x < 0 or y < 0 or
x > self.file.width * 72 or y > self.file.height * 72):
continue
dx, dy = x - lastx, y - lasty
output(1, 0, 0, 1, dx, dy, Op.concat_matrix,
marker, Op.use_xobject)
lastx, lasty = x, y
output(Op.grestore)
def draw_gouraud_triangle(self, gc, points, colors, trans):
self.draw_gouraud_triangles(gc, points.reshape((1, 3, 2)),
colors.reshape((1, 3, 4)), trans)
def draw_gouraud_triangles(self, gc, points, colors, trans):
assert len(points) == len(colors)
assert points.ndim == 3
assert points.shape[1] == 3
assert points.shape[2] == 2
assert colors.ndim == 3
assert colors.shape[1] == 3
assert colors.shape[2] == 4
shape = points.shape
points = points.reshape((shape[0] * shape[1], 2))
tpoints = trans.transform(points)
tpoints = tpoints.reshape(shape)
name = self.file.addGouraudTriangles(tpoints, colors)
self.check_gc(gc)
self.file.output(name, Op.shading)
def _setup_textpos(self, x, y, angle, oldx=0, oldy=0, oldangle=0):
if angle == oldangle == 0:
self.file.output(x - oldx, y - oldy, Op.textpos)
else:
angle = angle / 180.0 * pi
self.file.output(cos(angle), sin(angle),
-sin(angle), cos(angle),
x, y, Op.textmatrix)
self.file.output(0, 0, Op.textpos)
def draw_mathtext(self, gc, x, y, s, prop, angle):
# TODO: fix positioning and encoding
width, height, descent, glyphs, rects, used_characters = \
self.mathtext_parser.parse(s, 72, prop)
self.merge_used_characters(used_characters)
# When using Type 3 fonts, we can't use character codes higher
# than 255, so we use the "Do" command to render those
# instead.
global_fonttype = rcParams['pdf.fonttype']
# Set up a global transformation matrix for the whole math expression
a = angle / 180.0 * pi
self.file.output(Op.gsave)
self.file.output(cos(a), sin(a), -sin(a), cos(a), x, y,
Op.concat_matrix)
self.check_gc(gc, gc._rgb)
self.file.output(Op.begin_text)
prev_font = None, None
oldx, oldy = 0, 0
for ox, oy, fontname, fontsize, num, symbol_name in glyphs:
if is_opentype_cff_font(fontname):
fonttype = 42
else:
fonttype = global_fonttype
if fonttype == 42 or num <= 255:
self._setup_textpos(ox, oy, 0, oldx, oldy)
oldx, oldy = ox, oy
if (fontname, fontsize) != prev_font:
self.file.output(self.file.fontName(fontname), fontsize,
Op.selectfont)
prev_font = fontname, fontsize
self.file.output(self.encode_string(unichr(num), fonttype),
Op.show)
self.file.output(Op.end_text)
# If using Type 3 fonts, render all of the multi-byte characters
# as XObjects using the 'Do' command.
if global_fonttype == 3:
for ox, oy, fontname, fontsize, num, symbol_name in glyphs:
if is_opentype_cff_font(fontname):
fonttype = 42
else:
fonttype = global_fonttype
if fonttype == 3 and num > 255:
self.file.fontName(fontname)
self.file.output(Op.gsave,
0.001 * fontsize, 0,
0, 0.001 * fontsize,
ox, oy, Op.concat_matrix)
name = self.file._get_xobject_symbol_name(
fontname, symbol_name)
self.file.output(Name(name), Op.use_xobject)
self.file.output(Op.grestore)
# Draw any horizontal lines in the math layout
for ox, oy, width, height in rects:
self.file.output(Op.gsave, ox, oy, width, height,
Op.rectangle, Op.fill, Op.grestore)
# Pop off the global transformation
self.file.output(Op.grestore)
def draw_tex(self, gc, x, y, s, prop, angle, ismath='TeX!', mtext=None):
texmanager = self.get_texmanager()
fontsize = prop.get_size_in_points()
dvifile = texmanager.make_dvi(s, fontsize)
dvi = dviread.Dvi(dvifile, 72)
page = next(iter(dvi))
dvi.close()
# Gather font information and do some setup for combining
# characters into strings. The variable seq will contain a
# sequence of font and text entries. A font entry is a list
# ['font', name, size] where name is a Name object for the
# font. A text entry is ['text', x, y, glyphs, x+w] where x
# and y are the starting coordinates, w is the width, and
# glyphs is a list; in this phase it will always contain just
# one one-character string, but later it may have longer
# strings interspersed with kern amounts.
oldfont, seq = None, []
for x1, y1, dvifont, glyph, width in page.text:
if dvifont != oldfont:
pdfname = self.file.fontName(dvifont.texname)
if dvifont.texname not in self.file.dviFontInfo:
psfont = self.tex_font_mapping(dvifont.texname)
self.file.dviFontInfo[dvifont.texname] = Bunch(
fontfile=psfont.filename,
basefont=psfont.psname,
encodingfile=psfont.encoding,
effects=psfont.effects,
dvifont=dvifont)
seq += [['font', pdfname, dvifont.size]]
oldfont = dvifont
# We need to convert the glyph numbers to bytes, and the easiest
# way to do this on both Python 2 and 3 is .encode('latin-1')
seq += [['text', x1, y1,
[six.unichr(glyph).encode('latin-1')], x1+width]]
# Find consecutive text strings with constant y coordinate and
# combine into a sequence of strings and kerns, or just one
# string (if any kerns would be less than 0.1 points).
i, curx, fontsize = 0, 0, None
while i < len(seq)-1:
elt, nxt = seq[i:i+2]
if elt[0] == 'font':
fontsize = elt[2]
elif elt[0] == nxt[0] == 'text' and elt[2] == nxt[2]:
offset = elt[4] - nxt[1]
if abs(offset) < 0.1:
elt[3][-1] += nxt[3][0]
elt[4] += nxt[4]-nxt[1]
else:
elt[3] += [offset*1000.0/fontsize, nxt[3][0]]
elt[4] = nxt[4]
del seq[i+1]
continue
i += 1
# Create a transform to map the dvi contents to the canvas.
mytrans = Affine2D().rotate_deg(angle).translate(x, y)
# Output the text.
self.check_gc(gc, gc._rgb)
self.file.output(Op.begin_text)
curx, cury, oldx, oldy = 0, 0, 0, 0
for elt in seq:
if elt[0] == 'font':
self.file.output(elt[1], elt[2], Op.selectfont)
elif elt[0] == 'text':
curx, cury = mytrans.transform_point((elt[1], elt[2]))
self._setup_textpos(curx, cury, angle, oldx, oldy)
oldx, oldy = curx, cury
if len(elt[3]) == 1:
self.file.output(elt[3][0], Op.show)
else:
self.file.output(elt[3], Op.showkern)
else:
assert False
self.file.output(Op.end_text)
# Then output the boxes (e.g., variable-length lines of square
# roots).
boxgc = self.new_gc()
boxgc.copy_properties(gc)
boxgc.set_linewidth(0)
pathops = [Path.MOVETO, Path.LINETO, Path.LINETO, Path.LINETO,
Path.CLOSEPOLY]
for x1, y1, h, w in page.boxes:
path = Path([[x1, y1], [x1+w, y1], [x1+w, y1+h], [x1, y1+h],
[0, 0]], pathops)
self.draw_path(boxgc, path, mytrans, gc._rgb)
def encode_string(self, s, fonttype):
if fonttype in (1, 3):
return s.encode('cp1252', 'replace')
return s.encode('utf-16be', 'replace')
def draw_text(self, gc, x, y, s, prop, angle, ismath=False, mtext=None):
# TODO: combine consecutive texts into one BT/ET delimited section
# This function is rather complex, since there is no way to
# access characters of a Type 3 font with codes > 255. (Type
# 3 fonts can not have a CIDMap). Therefore, we break the
# string into chunks, where each chunk contains exclusively
# 1-byte or exclusively 2-byte characters, and output each
# chunk a separate command. 1-byte characters use the regular
# text show command (Tj), whereas 2-byte characters use the
# use XObject command (Do). If using Type 42 fonts, all of
# this complication is avoided, but of course, those fonts can
# not be subsetted.
self.check_gc(gc, gc._rgb)
if ismath:
return self.draw_mathtext(gc, x, y, s, prop, angle)
fontsize = prop.get_size_in_points()
if rcParams['pdf.use14corefonts']:
font = self._get_font_afm(prop)
l, b, w, h = font.get_str_bbox(s)
fonttype = 1
else:
font = self._get_font_ttf(prop)
self.track_characters(font, s)
font.set_text(s, 0.0, flags=LOAD_NO_HINTING)
fonttype = rcParams['pdf.fonttype']
# We can't subset all OpenType fonts, so switch to Type 42
# in that case.
if is_opentype_cff_font(font.fname):
fonttype = 42
def check_simple_method(s):
"""Determine if we should use the simple or woven method
to output this text, and chunks the string into 1-byte and
2-byte sections if necessary."""
use_simple_method = True
chunks = []
if not rcParams['pdf.use14corefonts']:
if fonttype == 3 and not isinstance(s, bytes) and len(s) != 0:
# Break the string into chunks where each chunk is either
# a string of chars <= 255, or a single character > 255.
s = six.text_type(s)
for c in s:
if ord(c) <= 255:
char_type = 1
else:
char_type = 2
if len(chunks) and chunks[-1][0] == char_type:
chunks[-1][1].append(c)
else:
chunks.append((char_type, [c]))
use_simple_method = (len(chunks) == 1 and
chunks[-1][0] == 1)
return use_simple_method, chunks
def draw_text_simple():
"""Outputs text using the simple method."""
self.file.output(Op.begin_text,
self.file.fontName(prop),
fontsize,
Op.selectfont)
self._setup_textpos(x, y, angle)
self.file.output(self.encode_string(s, fonttype), Op.show,
Op.end_text)
def draw_text_woven(chunks):
"""Outputs text using the woven method, alternating
between chunks of 1-byte characters and 2-byte characters.
Only used for Type 3 fonts."""
chunks = [(a, ''.join(b)) for a, b in chunks]
# Do the rotation and global translation as a single matrix
# concatenation up front
self.file.output(Op.gsave)
a = angle / 180.0 * pi
self.file.output(cos(a), sin(a), -sin(a), cos(a), x, y,
Op.concat_matrix)
# Output all the 1-byte characters in a BT/ET group, then
# output all the 2-byte characters.
for mode in (1, 2):
newx = oldx = 0
# Output a 1-byte character chunk
if mode == 1:
self.file.output(Op.begin_text,
self.file.fontName(prop),
fontsize,
Op.selectfont)
for chunk_type, chunk in chunks:
if mode == 1 and chunk_type == 1:
self._setup_textpos(newx, 0, 0, oldx, 0, 0)
self.file.output(self.encode_string(chunk, fonttype),
Op.show)
oldx = newx
lastgind = None
for c in chunk:
ccode = ord(c)
gind = font.get_char_index(ccode)
if gind is not None:
if mode == 2 and chunk_type == 2:
glyph_name = font.get_glyph_name(gind)
self.file.output(Op.gsave)
self.file.output(0.001 * fontsize, 0,
0, 0.001 * fontsize,
newx, 0, Op.concat_matrix)
name = self.file._get_xobject_symbol_name(
font.fname, glyph_name)
self.file.output(Name(name), Op.use_xobject)
self.file.output(Op.grestore)
# Move the pointer based on the character width
# and kerning
glyph = font.load_char(ccode,
flags=LOAD_NO_HINTING)
if lastgind is not None:
kern = font.get_kerning(
lastgind, gind, KERNING_UNFITTED)
else:
kern = 0
lastgind = gind
newx += kern/64.0 + glyph.linearHoriAdvance/65536.0
if mode == 1:
self.file.output(Op.end_text)
self.file.output(Op.grestore)
use_simple_method, chunks = check_simple_method(s)
if use_simple_method:
return draw_text_simple()
else:
return draw_text_woven(chunks)
def get_text_width_height_descent(self, s, prop, ismath):
if rcParams['text.usetex']:
texmanager = self.get_texmanager()
fontsize = prop.get_size_in_points()
w, h, d = texmanager.get_text_width_height_descent(s, fontsize,
renderer=self)
return w, h, d
if ismath:
w, h, d, glyphs, rects, used_characters = \
self.mathtext_parser.parse(s, 72, prop)
elif rcParams['pdf.use14corefonts']:
font = self._get_font_afm(prop)
l, b, w, h, d = font.get_str_bbox_and_descent(s)
scale = prop.get_size_in_points()
w *= scale / 1000
h *= scale / 1000
d *= scale / 1000
else:
font = self._get_font_ttf(prop)
font.set_text(s, 0.0, flags=LOAD_NO_HINTING)
w, h = font.get_width_height()
scale = (1.0 / 64.0)
w *= scale
h *= scale
d = font.get_descent()
d *= scale
return w, h, d
def _get_font_afm(self, prop):
key = hash(prop)
font = self.afm_font_cache.get(key)
if font is None:
filename = findfont(
prop, fontext='afm', directory=self.file._core14fontdir)
if filename is None:
filename = findfont(
"Helvetica", fontext='afm',
directory=self.file._core14fontdir)
font = self.afm_font_cache.get(filename)
if font is None:
with open(filename, 'rb') as fh:
font = AFM(fh)
self.afm_font_cache[filename] = font
self.afm_font_cache[key] = font
return font
def _get_font_ttf(self, prop):
filename = findfont(prop)
font = get_font(filename)
font.clear()
font.set_size(prop.get_size_in_points(), 72)
return font
def flipy(self):
return False
def get_canvas_width_height(self):
return self.file.width * 72.0, self.file.height * 72.0
def new_gc(self):
return GraphicsContextPdf(self.file)
class GraphicsContextPdf(GraphicsContextBase):
def __init__(self, file):
GraphicsContextBase.__init__(self)
self._fillcolor = (0.0, 0.0, 0.0)
self._effective_alphas = (1.0, 1.0)
self.file = file
self.parent = None
def __repr__(self):
d = dict(self.__dict__)
del d['file']
del d['parent']
return repr(d)
def stroke(self):
"""
Predicate: does the path need to be stroked (its outline drawn)?
This tests for the various conditions that disable stroking
the path, in which case it would presumably be filled.
"""
# _linewidth > 0: in pdf a line of width 0 is drawn at minimum
# possible device width, but e.g., agg doesn't draw at all
return (self._linewidth > 0 and self._alpha > 0 and
(len(self._rgb) <= 3 or self._rgb[3] != 0.0))
def fill(self, *args):
"""
Predicate: does the path need to be filled?
An optional argument can be used to specify an alternative
_fillcolor, as needed by RendererPdf.draw_markers.
"""
if len(args):
_fillcolor = args[0]
else:
_fillcolor = self._fillcolor
return (self._hatch or
(_fillcolor is not None and
(len(_fillcolor) <= 3 or _fillcolor[3] != 0.0)))
def paint(self):
"""
Return the appropriate pdf operator to cause the path to be
stroked, filled, or both.
"""
return Op.paint_path(self.fill(), self.stroke())
capstyles = {'butt': 0, 'round': 1, 'projecting': 2}
joinstyles = {'miter': 0, 'round': 1, 'bevel': 2}
def capstyle_cmd(self, style):
return [self.capstyles[style], Op.setlinecap]
def joinstyle_cmd(self, style):
return [self.joinstyles[style], Op.setlinejoin]
def linewidth_cmd(self, width):
return [width, Op.setlinewidth]
def dash_cmd(self, dashes):
offset, dash = dashes
if dash is None:
dash = []
offset = 0
return [list(dash), offset, Op.setdash]
def alpha_cmd(self, alpha, forced, effective_alphas):
name = self.file.alphaState(effective_alphas)
return [name, Op.setgstate]
def hatch_cmd(self, hatch):
if not hatch:
if self._fillcolor is not None:
return self.fillcolor_cmd(self._fillcolor)
else:
return [Name('DeviceRGB'), Op.setcolorspace_nonstroke]
else:
hatch_style = (self._hatch_color, self._fillcolor, hatch)
name = self.file.hatchPattern(hatch_style)
return [Name('Pattern'), Op.setcolorspace_nonstroke,
name, Op.setcolor_nonstroke]
def rgb_cmd(self, rgb):
if rcParams['pdf.inheritcolor']:
return []
if rgb[0] == rgb[1] == rgb[2]:
return [rgb[0], Op.setgray_stroke]
else:
return list(rgb[:3]) + [Op.setrgb_stroke]
def fillcolor_cmd(self, rgb):
if rgb is None or rcParams['pdf.inheritcolor']:
return []
elif rgb[0] == rgb[1] == rgb[2]:
return [rgb[0], Op.setgray_nonstroke]
else:
return list(rgb[:3]) + [Op.setrgb_nonstroke]
def push(self):
parent = GraphicsContextPdf(self.file)
parent.copy_properties(self)
parent.parent = self.parent
self.parent = parent
return [Op.gsave]
def pop(self):
assert self.parent is not None
self.copy_properties(self.parent)
self.parent = self.parent.parent
return [Op.grestore]
def clip_cmd(self, cliprect, clippath):
"""Set clip rectangle. Calls self.pop() and self.push()."""
cmds = []
# Pop graphics state until we hit the right one or the stack is empty
while ((self._cliprect, self._clippath) != (cliprect, clippath)
and self.parent is not None):
cmds.extend(self.pop())
# Unless we hit the right one, set the clip polygon
if ((self._cliprect, self._clippath) != (cliprect, clippath) or
self.parent is None):
cmds.extend(self.push())
if self._cliprect != cliprect:
cmds.extend([cliprect, Op.rectangle, Op.clip, Op.endpath])
if self._clippath != clippath:
path, affine = clippath.get_transformed_path_and_affine()
cmds.extend(
PdfFile.pathOperations(path, affine, simplify=False) +
[Op.clip, Op.endpath])
return cmds
commands = (
# must come first since may pop
(('_cliprect', '_clippath'), clip_cmd),
(('_alpha', '_forced_alpha', '_effective_alphas'), alpha_cmd),
(('_capstyle',), capstyle_cmd),
(('_fillcolor',), fillcolor_cmd),
(('_joinstyle',), joinstyle_cmd),
(('_linewidth',), linewidth_cmd),
(('_dashes',), dash_cmd),
(('_rgb',), rgb_cmd),
(('_hatch',), hatch_cmd), # must come after fillcolor and rgb
)
# TODO: _linestyle
def delta(self, other):
"""
Copy properties of other into self and return PDF commands
needed to transform self into other.
"""
cmds = []
fill_performed = False
for params, cmd in self.commands:
different = False
for p in params:
ours = getattr(self, p)
theirs = getattr(other, p)
try:
if (ours is None or theirs is None):
different = bool(not(ours is theirs))
else:
different = bool(ours != theirs)
except ValueError:
ours = np.asarray(ours)
theirs = np.asarray(theirs)
different = (ours.shape != theirs.shape or
np.any(ours != theirs))
if different:
break
# Need to update hatching if we also updated fillcolor
if params == ('_hatch',) and fill_performed:
different = True
if different:
if params == ('_fillcolor',):
fill_performed = True
theirs = [getattr(other, p) for p in params]
cmds.extend(cmd(self, *theirs))
for p in params:
setattr(self, p, getattr(other, p))
return cmds
def copy_properties(self, other):
"""
Copy properties of other into self.
"""
GraphicsContextBase.copy_properties(self, other)
fillcolor = getattr(other, '_fillcolor', self._fillcolor)
effective_alphas = getattr(other, '_effective_alphas',
self._effective_alphas)
self._fillcolor = fillcolor
self._effective_alphas = effective_alphas
def finalize(self):
"""
Make sure every pushed graphics state is popped.
"""
cmds = []
while self.parent is not None:
cmds.extend(self.pop())
return cmds
########################################################################
#
# The following functions and classes are for pylab and implement
# window/figure managers, etc...
#
########################################################################
def new_figure_manager(num, *args, **kwargs):
"""
Create a new figure manager instance
"""
# if a main-level app must be created, this is the usual place to
# do it -- see backend_wx, backend_wxagg and backend_tkagg for
# examples. Not all GUIs require explicit instantiation of a
# main-level app (egg backend_gtk, backend_gtkagg) for pylab
FigureClass = kwargs.pop('FigureClass', Figure)
thisFig = FigureClass(*args, **kwargs)
return new_figure_manager_given_figure(num, thisFig)
def new_figure_manager_given_figure(num, figure):
"""
Create a new figure manager instance for the given figure.
"""
canvas = FigureCanvasPdf(figure)
manager = FigureManagerPdf(canvas, num)
return manager
class PdfPages(object):
"""
A multi-page PDF file.
Examples
--------
>>> import matplotlib.pyplot as plt
>>> # Initialize:
>>> with PdfPages('foo.pdf') as pdf:
... # As many times as you like, create a figure fig and save it:
... fig = plt.figure()
... pdf.savefig(fig)
... # When no figure is specified the current figure is saved
... pdf.savefig()
Notes
-----
In reality :class:`PdfPages` is a thin wrapper around :class:`PdfFile`, in
order to avoid confusion when using :func:`~matplotlib.pyplot.savefig` and
forgetting the format argument.
"""
__slots__ = ('_file', 'keep_empty')
def __init__(self, filename, keep_empty=True):
"""
Create a new PdfPages object.
Parameters
----------
filename: str
Plots using :meth:`PdfPages.savefig` will be written to a file at
this location. The file is opened at once and any older file with
the same name is overwritten.
keep_empty: bool, optional
If set to False, then empty pdf files will be deleted automatically
when closed.
"""
self._file = PdfFile(filename)
self.keep_empty = keep_empty
def __enter__(self):
return self
def __exit__(self, exc_type, exc_val, exc_tb):
self.close()
def close(self):
"""
Finalize this object, making the underlying file a complete
PDF file.
"""
self._file.close()
if (self.get_pagecount() == 0 and not self.keep_empty and
not self._file.passed_in_file_object):
os.remove(self._file.fh.name)
self._file = None
def infodict(self):
"""
Return a modifiable information dictionary object
(see PDF reference section 10.2.1 'Document Information
Dictionary').
"""
return self._file.infoDict
def savefig(self, figure=None, **kwargs):
"""
Saves a :class:`~matplotlib.figure.Figure` to this file as a new page.
Any other keyword arguments are passed to
:meth:`~matplotlib.figure.Figure.savefig`.
Parameters
----------
figure: :class:`~matplotlib.figure.Figure` or int, optional
Specifies what figure is saved to file. If not specified, the
active figure is saved. If a :class:`~matplotlib.figure.Figure`
instance is provided, this figure is saved. If an int is specified,
the figure instance to save is looked up by number.
"""
if isinstance(figure, Figure):
figure.savefig(self, format='pdf', **kwargs)
else:
if figure is None:
figureManager = Gcf.get_active()
else:
figureManager = Gcf.get_fig_manager(figure)
if figureManager is None:
raise ValueError("No such figure: " + repr(figure))
else:
figureManager.canvas.figure.savefig(self, format='pdf',
**kwargs)
def get_pagecount(self):
"""
Returns the current number of pages in the multipage pdf file.
"""
return len(self._file.pageList)
def attach_note(self, text, positionRect=[-100, -100, 0, 0]):
"""
Add a new text note to the page to be saved next. The optional
positionRect specifies the position of the new note on the
page. It is outside the page per default to make sure it is
invisible on printouts.
"""
self._file.newTextnote(text, positionRect)
class FigureCanvasPdf(FigureCanvasBase):
"""
The canvas the figure renders into. Calls the draw and print fig
methods, creates the renderers, etc...
Public attribute
figure - A Figure instance
"""
fixed_dpi = 72
def draw(self):
pass
filetypes = {'pdf': 'Portable Document Format'}
def get_default_filetype(self):
return 'pdf'
def print_pdf(self, filename, **kwargs):
image_dpi = kwargs.get('dpi', 72) # dpi to use for images
self.figure.set_dpi(72) # there are 72 pdf points to an inch
width, height = self.figure.get_size_inches()
if isinstance(filename, PdfPages):
file = filename._file
else:
file = PdfFile(filename)
try:
file.newPage(width, height)
_bbox_inches_restore = kwargs.pop("bbox_inches_restore", None)
renderer = MixedModeRenderer(
self.figure, width, height, image_dpi,
RendererPdf(file, image_dpi, height, width),
bbox_inches_restore=_bbox_inches_restore)
self.figure.draw(renderer)
renderer.finalize()
finally:
if isinstance(filename, PdfPages): # finish off this page
file.endStream()
else: # we opened the file above; now finish it off
file.close()
class FigureManagerPdf(FigureManagerBase):
pass
FigureCanvas = FigureCanvasPdf
FigureManager = FigureManagerPdf
| gpl-3.0 |
kevinthesun/mxnet | python/mxnet/model.py | 17 | 39894 | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=fixme, invalid-name, too-many-arguments, too-many-locals, too-many-lines
# pylint: disable=too-many-branches, too-many-statements
"""MXNet model module"""
from __future__ import absolute_import, print_function
import time
import logging
import warnings
from collections import namedtuple
import numpy as np
from . import io
from . import nd
from . import symbol as sym
from . import optimizer as opt
from . import metric
from . import kvstore as kvs
from .context import Context, cpu
from .initializer import Uniform
from .optimizer import get_updater
from .executor_manager import DataParallelExecutorManager, _check_arguments, _load_data
from .io import DataDesc
from .base import mx_real_t
BASE_ESTIMATOR = object
try:
from sklearn.base import BaseEstimator
BASE_ESTIMATOR = BaseEstimator
except ImportError:
SKLEARN_INSTALLED = False
# Parameter to pass to batch_end_callback
BatchEndParam = namedtuple('BatchEndParams',
['epoch',
'nbatch',
'eval_metric',
'locals'])
def _create_kvstore(kvstore, num_device, arg_params):
"""Create kvstore
This function select and create a proper kvstore if given the kvstore type.
Parameters
----------
kvstore : KVStore or str
The kvstore.
num_device : int
The number of devices
arg_params : dict of str to `NDArray`.
Model parameter, dict of name to `NDArray` of net's weights.
"""
update_on_kvstore = True
if kvstore is None:
kv = None
elif isinstance(kvstore, kvs.KVStore):
kv = kvstore
elif isinstance(kvstore, str):
# create kvstore using the string type
if num_device is 1 and 'dist' not in kvstore:
# no need to use kv for single device and single machine
kv = None
else:
kv = kvs.create(kvstore)
if kvstore == 'local':
# automatically select a proper local
max_size = max(np.prod(param.shape) for param in
arg_params.values())
if max_size > 1024 * 1024 * 16:
update_on_kvstore = False
else:
raise TypeError('kvstore must be KVStore, str or None')
if kv is None:
update_on_kvstore = False
return (kv, update_on_kvstore)
def _initialize_kvstore(kvstore, param_arrays, arg_params, param_names, update_on_kvstore):
"""Initialize kvstore"""
for idx, param_on_devs in enumerate(param_arrays):
name = param_names[idx]
kvstore.init(name, arg_params[name])
if update_on_kvstore:
kvstore.pull(name, param_on_devs, priority=-idx)
def _update_params_on_kvstore(param_arrays, grad_arrays, kvstore, param_names):
"""Perform update of param_arrays from grad_arrays on kvstore."""
for index, pair in enumerate(zip(param_arrays, grad_arrays)):
arg_list, grad_list = pair
if grad_list[0] is None:
continue
name = param_names[index]
# push gradient, priority is negative index
kvstore.push(name, grad_list, priority=-index)
# pull back the weights
kvstore.pull(name, arg_list, priority=-index)
def _update_params(param_arrays, grad_arrays, updater, num_device,
kvstore=None, param_names=None):
"""Perform update of param_arrays from grad_arrays not on kvstore."""
for i, pair in enumerate(zip(param_arrays, grad_arrays)):
arg_list, grad_list = pair
if grad_list[0] is None:
continue
index = i
if kvstore:
name = param_names[index]
# push gradient, priority is negative index
kvstore.push(name, grad_list, priority=-index)
# pull back the sum gradients, to the same locations.
kvstore.pull(name, grad_list, priority=-index)
for k, p in enumerate(zip(arg_list, grad_list)):
# faked an index here, to make optimizer create diff
# state for the same index but on diff devs, TODO(mli)
# use a better solution later
w, g = p
updater(index*num_device+k, g, w)
def _multiple_callbacks(callbacks, *args, **kwargs):
"""Sends args and kwargs to any configured callbacks.
This handles the cases where the 'callbacks' variable
is ``None``, a single function, or a list.
"""
if isinstance(callbacks, list):
for cb in callbacks:
cb(*args, **kwargs)
return
if callbacks:
callbacks(*args, **kwargs)
def _train_multi_device(symbol, ctx, arg_names, param_names, aux_names,
arg_params, aux_params,
begin_epoch, end_epoch, epoch_size, optimizer,
kvstore, update_on_kvstore,
train_data, eval_data=None, eval_metric=None,
epoch_end_callback=None, batch_end_callback=None,
logger=None, work_load_list=None, monitor=None,
eval_end_callback=None,
eval_batch_end_callback=None, sym_gen=None):
"""Internal training function on multiple devices.
This function will also work for single device as well.
Parameters
----------
symbol : Symbol
The network configuration.
ctx : list of Context
The training devices.
arg_names: list of str
Name of all arguments of the network.
param_names: list of str
Name of all trainable parameters of the network.
aux_names: list of str
Name of all auxiliary states of the network.
arg_params : dict of str to NDArray
Model parameter, dict of name to NDArray of net's weights.
aux_params : dict of str to NDArray
Model parameter, dict of name to NDArray of net's auxiliary states.
begin_epoch : int
The begining training epoch.
end_epoch : int
The end training epoch.
epoch_size : int, optional
Number of batches in a epoch. In default, it is set to
``ceil(num_train_examples / batch_size)``.
optimizer : Optimizer
The optimization algorithm
train_data : DataIter
Training data iterator.
eval_data : DataIter
Validation data iterator.
eval_metric : EvalMetric
An evaluation function or a list of evaluation functions.
epoch_end_callback : callable(epoch, symbol, arg_params, aux_states)
A callback that is invoked at end of each epoch.
This can be used to checkpoint model each epoch.
batch_end_callback : callable(BatchEndParams)
A callback that is invoked at end of each batch.
This can be used to measure speed, get result from evaluation metric. etc.
kvstore : KVStore
The KVStore.
update_on_kvstore : bool
Whether or not perform weight updating on kvstore.
logger : logging logger
When not specified, default logger will be used.
work_load_list : list of float or int, optional
The list of work load for different devices,
in the same order as ``ctx``.
monitor : Monitor, optional
Monitor installed to executor,
for monitoring outputs, weights, and gradients for debugging.
Notes
-----
- This function will inplace update the NDArrays in `arg_params` and `aux_states`.
"""
if logger is None:
logger = logging
executor_manager = DataParallelExecutorManager(symbol=symbol,
sym_gen=sym_gen,
ctx=ctx,
train_data=train_data,
param_names=param_names,
arg_names=arg_names,
aux_names=aux_names,
work_load_list=work_load_list,
logger=logger)
if monitor:
executor_manager.install_monitor(monitor)
executor_manager.set_params(arg_params, aux_params)
if not update_on_kvstore:
updater = get_updater(optimizer)
if kvstore:
_initialize_kvstore(kvstore=kvstore,
param_arrays=executor_manager.param_arrays,
arg_params=arg_params,
param_names=executor_manager.param_names,
update_on_kvstore=update_on_kvstore)
if update_on_kvstore:
kvstore.set_optimizer(optimizer)
# Now start training
train_data.reset()
for epoch in range(begin_epoch, end_epoch):
# Training phase
tic = time.time()
eval_metric.reset()
nbatch = 0
# Iterate over training data.
while True:
do_reset = True
for data_batch in train_data:
executor_manager.load_data_batch(data_batch)
if monitor is not None:
monitor.tic()
executor_manager.forward(is_train=True)
executor_manager.backward()
if update_on_kvstore:
_update_params_on_kvstore(executor_manager.param_arrays,
executor_manager.grad_arrays,
kvstore, executor_manager.param_names)
else:
_update_params(executor_manager.param_arrays,
executor_manager.grad_arrays,
updater=updater,
num_device=len(ctx),
kvstore=kvstore,
param_names=executor_manager.param_names)
if monitor is not None:
monitor.toc_print()
# evaluate at end, so we can lazy copy
executor_manager.update_metric(eval_metric, data_batch.label)
nbatch += 1
# batch callback (for print purpose)
if batch_end_callback is not None:
batch_end_params = BatchEndParam(epoch=epoch,
nbatch=nbatch,
eval_metric=eval_metric,
locals=locals())
_multiple_callbacks(batch_end_callback, batch_end_params)
# this epoch is done possibly earlier
if epoch_size is not None and nbatch >= epoch_size:
do_reset = False
break
if do_reset:
logger.info('Epoch[%d] Resetting Data Iterator', epoch)
train_data.reset()
# this epoch is done
if epoch_size is None or nbatch >= epoch_size:
break
toc = time.time()
logger.info('Epoch[%d] Time cost=%.3f', epoch, (toc - tic))
if epoch_end_callback or epoch + 1 == end_epoch:
executor_manager.copy_to(arg_params, aux_params)
_multiple_callbacks(epoch_end_callback, epoch, symbol, arg_params, aux_params)
# evaluation
if eval_data:
eval_metric.reset()
eval_data.reset()
total_num_batch = 0
for i, eval_batch in enumerate(eval_data):
executor_manager.load_data_batch(eval_batch)
executor_manager.forward(is_train=False)
executor_manager.update_metric(eval_metric, eval_batch.label)
if eval_batch_end_callback is not None:
batch_end_params = BatchEndParam(epoch=epoch,
nbatch=i,
eval_metric=eval_metric,
locals=locals())
_multiple_callbacks(eval_batch_end_callback, batch_end_params)
total_num_batch += 1
if eval_end_callback is not None:
eval_end_params = BatchEndParam(epoch=epoch,
nbatch=total_num_batch,
eval_metric=eval_metric,
locals=locals())
_multiple_callbacks(eval_end_callback, eval_end_params)
eval_data.reset()
# end of all epochs
return
def save_checkpoint(prefix, epoch, symbol, arg_params, aux_params):
"""Checkpoint the model data into file.
Parameters
----------
prefix : str
Prefix of model name.
epoch : int
The epoch number of the model.
symbol : Symbol
The input Symbol.
arg_params : dict of str to NDArray
Model parameter, dict of name to NDArray of net's weights.
aux_params : dict of str to NDArray
Model parameter, dict of name to NDArray of net's auxiliary states.
Notes
-----
- ``prefix-symbol.json`` will be saved for symbol.
- ``prefix-epoch.params`` will be saved for parameters.
"""
if symbol is not None:
symbol.save('%s-symbol.json' % prefix)
save_dict = {('arg:%s' % k) : v.as_in_context(cpu()) for k, v in arg_params.items()}
save_dict.update({('aux:%s' % k) : v.as_in_context(cpu()) for k, v in aux_params.items()})
param_name = '%s-%04d.params' % (prefix, epoch)
nd.save(param_name, save_dict)
logging.info('Saved checkpoint to \"%s\"', param_name)
def load_checkpoint(prefix, epoch):
"""Load model checkpoint from file.
Parameters
----------
prefix : str
Prefix of model name.
epoch : int
Epoch number of model we would like to load.
Returns
-------
symbol : Symbol
The symbol configuration of computation network.
arg_params : dict of str to NDArray
Model parameter, dict of name to NDArray of net's weights.
aux_params : dict of str to NDArray
Model parameter, dict of name to NDArray of net's auxiliary states.
Notes
-----
- Symbol will be loaded from ``prefix-symbol.json``.
- Parameters will be loaded from ``prefix-epoch.params``.
"""
symbol = sym.load('%s-symbol.json' % prefix)
save_dict = nd.load('%s-%04d.params' % (prefix, epoch))
arg_params = {}
aux_params = {}
for k, v in save_dict.items():
tp, name = k.split(':', 1)
if tp == 'arg':
arg_params[name] = v
if tp == 'aux':
aux_params[name] = v
return (symbol, arg_params, aux_params)
from .callback import LogValidationMetricsCallback # pylint: disable=wrong-import-position
class FeedForward(BASE_ESTIMATOR):
"""Model class of MXNet for training and predicting feedforward nets.
This class is designed for a single-data single output supervised network.
Parameters
----------
symbol : Symbol
The symbol configuration of computation network.
ctx : Context or list of Context, optional
The device context of training and prediction.
To use multi GPU training, pass in a list of gpu contexts.
num_epoch : int, optional
Training parameter, number of training epochs(epochs).
epoch_size : int, optional
Number of batches in a epoch. In default, it is set to
``ceil(num_train_examples / batch_size)``.
optimizer : str or Optimizer, optional
Training parameter, name or optimizer object for training.
initializer : initializer function, optional
Training parameter, the initialization scheme used.
numpy_batch_size : int, optional
The batch size of training data.
Only needed when input array is numpy.
arg_params : dict of str to NDArray, optional
Model parameter, dict of name to NDArray of net's weights.
aux_params : dict of str to NDArray, optional
Model parameter, dict of name to NDArray of net's auxiliary states.
allow_extra_params : boolean, optional
Whether allow extra parameters that are not needed by symbol
to be passed by aux_params and ``arg_params``.
If this is True, no error will be thrown when ``aux_params`` and ``arg_params``
contain more parameters than needed.
begin_epoch : int, optional
The begining training epoch.
kwargs : dict
The additional keyword arguments passed to optimizer.
"""
def __init__(self, symbol, ctx=None,
num_epoch=None, epoch_size=None, optimizer='sgd',
initializer=Uniform(0.01),
numpy_batch_size=128,
arg_params=None, aux_params=None,
allow_extra_params=False,
begin_epoch=0,
**kwargs):
warnings.warn(
'\033[91mmxnet.model.FeedForward has been deprecated. ' + \
'Please use mxnet.mod.Module instead.\033[0m',
DeprecationWarning, stacklevel=2)
if isinstance(symbol, sym.Symbol):
self.symbol = symbol
self.sym_gen = None
else:
assert(callable(symbol))
self.symbol = None
self.sym_gen = symbol
# model parameters
self.arg_params = arg_params
self.aux_params = aux_params
self.allow_extra_params = allow_extra_params
self.argument_checked = False
if self.sym_gen is None:
self._check_arguments()
# basic configuration
if ctx is None:
ctx = [cpu()]
elif isinstance(ctx, Context):
ctx = [ctx]
self.ctx = ctx
# training parameters
self.num_epoch = num_epoch
self.epoch_size = epoch_size
self.kwargs = kwargs.copy()
self.optimizer = optimizer
self.initializer = initializer
self.numpy_batch_size = numpy_batch_size
# internal helper state
self._pred_exec = None
self.begin_epoch = begin_epoch
def _check_arguments(self):
"""verify the argument of the default symbol and user provided parameters"""
if self.argument_checked:
return
assert(self.symbol is not None)
self.argument_checked = True
# check if symbol contain duplicated names.
_check_arguments(self.symbol)
# rematch parameters to delete useless ones
if self.allow_extra_params:
if self.arg_params:
arg_names = set(self.symbol.list_arguments())
self.arg_params = {k : v for k, v in self.arg_params.items()
if k in arg_names}
if self.aux_params:
aux_names = set(self.symbol.list_auxiliary_states())
self.aux_params = {k : v for k, v in self.aux_params.items()
if k in aux_names}
@staticmethod
def _is_data_arg(name):
"""Check if name is a data argument."""
return name.endswith('data') or name.endswith('label')
def _init_params(self, inputs, overwrite=False):
"""Initialize weight parameters and auxiliary states."""
inputs = [x if isinstance(x, DataDesc) else DataDesc(*x) for x in inputs]
input_shapes = {item.name: item.shape for item in inputs}
arg_shapes, _, aux_shapes = self.symbol.infer_shape(**input_shapes)
assert arg_shapes is not None
input_dtypes = {item.name: item.dtype for item in inputs}
arg_dtypes, _, aux_dtypes = self.symbol.infer_type(**input_dtypes)
assert arg_dtypes is not None
arg_names = self.symbol.list_arguments()
input_names = input_shapes.keys()
param_names = [key for key in arg_names if key not in input_names]
aux_names = self.symbol.list_auxiliary_states()
param_name_attrs = [x for x in zip(arg_names, arg_shapes, arg_dtypes)
if x[0] in param_names]
arg_params = {k : nd.zeros(shape=s, dtype=t)
for k, s, t in param_name_attrs}
aux_name_attrs = [x for x in zip(aux_names, aux_shapes, aux_dtypes)
if x[0] in aux_names]
aux_params = {k : nd.zeros(shape=s, dtype=t)
for k, s, t in aux_name_attrs}
for k, v in arg_params.items():
if self.arg_params and k in self.arg_params and (not overwrite):
arg_params[k][:] = self.arg_params[k][:]
else:
self.initializer(k, v)
for k, v in aux_params.items():
if self.aux_params and k in self.aux_params and (not overwrite):
aux_params[k][:] = self.aux_params[k][:]
else:
self.initializer(k, v)
self.arg_params = arg_params
self.aux_params = aux_params
return (arg_names, list(param_names), aux_names)
def __getstate__(self):
this = self.__dict__.copy()
this['_pred_exec'] = None
return this
def __setstate__(self, state):
self.__dict__.update(state)
def _init_predictor(self, input_shapes, type_dict=None):
"""Initialize the predictor module for running prediction."""
if self._pred_exec is not None:
arg_shapes, _, _ = self.symbol.infer_shape(**dict(input_shapes))
assert arg_shapes is not None, "Incomplete input shapes"
pred_shapes = [x.shape for x in self._pred_exec.arg_arrays]
if arg_shapes == pred_shapes:
return
# for now only use the first device
pred_exec = self.symbol.simple_bind(
self.ctx[0], grad_req='null', type_dict=type_dict, **dict(input_shapes))
pred_exec.copy_params_from(self.arg_params, self.aux_params)
_check_arguments(self.symbol)
self._pred_exec = pred_exec
def _init_iter(self, X, y, is_train):
"""Initialize the iterator given input."""
if isinstance(X, (np.ndarray, nd.NDArray)):
if y is None:
if is_train:
raise ValueError('y must be specified when X is numpy.ndarray')
else:
y = np.zeros(X.shape[0])
if not isinstance(y, (np.ndarray, nd.NDArray)):
raise TypeError('y must be ndarray when X is numpy.ndarray')
if X.shape[0] != y.shape[0]:
raise ValueError("The numbers of data points and labels not equal")
if y.ndim == 2 and y.shape[1] == 1:
y = y.flatten()
if y.ndim != 1:
raise ValueError("Label must be 1D or 2D (with 2nd dimension being 1)")
if is_train:
return io.NDArrayIter(X, y, min(X.shape[0], self.numpy_batch_size),
shuffle=is_train, last_batch_handle='roll_over')
else:
return io.NDArrayIter(X, y, min(X.shape[0], self.numpy_batch_size), shuffle=False)
if not isinstance(X, io.DataIter):
raise TypeError('X must be DataIter, NDArray or numpy.ndarray')
return X
def _init_eval_iter(self, eval_data):
"""Initialize the iterator given eval_data."""
if eval_data is None:
return eval_data
if isinstance(eval_data, (tuple, list)) and len(eval_data) == 2:
if eval_data[0] is not None:
if eval_data[1] is None and isinstance(eval_data[0], io.DataIter):
return eval_data[0]
input_data = (np.array(eval_data[0]) if isinstance(eval_data[0], list)
else eval_data[0])
input_label = (np.array(eval_data[1]) if isinstance(eval_data[1], list)
else eval_data[1])
return self._init_iter(input_data, input_label, is_train=True)
else:
raise ValueError("Eval data is NONE")
if not isinstance(eval_data, io.DataIter):
raise TypeError('Eval data must be DataIter, or ' \
'NDArray/numpy.ndarray/list pair (i.e. tuple/list of length 2)')
return eval_data
def predict(self, X, num_batch=None, return_data=False, reset=True):
"""Run the prediction, always only use one device.
Parameters
----------
X : mxnet.DataIter
num_batch : int or None
The number of batch to run. Go though all batches if ``None``.
Returns
-------
y : numpy.ndarray or a list of numpy.ndarray if the network has multiple outputs.
The predicted value of the output.
"""
X = self._init_iter(X, None, is_train=False)
if reset:
X.reset()
data_shapes = X.provide_data
data_names = [x[0] for x in data_shapes]
type_dict = dict((key, value.dtype) for (key, value) in self.arg_params.items())
for x in X.provide_data:
if isinstance(x, DataDesc):
type_dict[x.name] = x.dtype
else:
type_dict[x[0]] = mx_real_t
self._init_predictor(data_shapes, type_dict)
batch_size = X.batch_size
data_arrays = [self._pred_exec.arg_dict[name] for name in data_names]
output_list = [[] for _ in range(len(self._pred_exec.outputs))]
if return_data:
data_list = [[] for _ in X.provide_data]
label_list = [[] for _ in X.provide_label]
i = 0
for batch in X:
_load_data(batch, data_arrays)
self._pred_exec.forward(is_train=False)
padded = batch.pad
real_size = batch_size - padded
for o_list, o_nd in zip(output_list, self._pred_exec.outputs):
o_list.append(o_nd[0:real_size].asnumpy())
if return_data:
for j, x in enumerate(batch.data):
data_list[j].append(x[0:real_size].asnumpy())
for j, x in enumerate(batch.label):
label_list[j].append(x[0:real_size].asnumpy())
i += 1
if num_batch is not None and i == num_batch:
break
outputs = [np.concatenate(x) for x in output_list]
if len(outputs) == 1:
outputs = outputs[0]
if return_data:
data = [np.concatenate(x) for x in data_list]
label = [np.concatenate(x) for x in label_list]
if len(data) == 1:
data = data[0]
if len(label) == 1:
label = label[0]
return outputs, data, label
else:
return outputs
def score(self, X, eval_metric='acc', num_batch=None, batch_end_callback=None, reset=True):
"""Run the model given an input and calculate the score
as assessed by an evaluation metric.
Parameters
----------
X : mxnet.DataIter
eval_metric : metric.metric
The metric for calculating score.
num_batch : int or None
The number of batches to run. Go though all batches if ``None``.
Returns
-------
s : float
The final score.
"""
# setup metric
if not isinstance(eval_metric, metric.EvalMetric):
eval_metric = metric.create(eval_metric)
X = self._init_iter(X, None, is_train=False)
if reset:
X.reset()
data_shapes = X.provide_data
data_names = [x[0] for x in data_shapes]
type_dict = dict((key, value.dtype) for (key, value) in self.arg_params.items())
for x in X.provide_data:
if isinstance(x, DataDesc):
type_dict[x.name] = x.dtype
else:
type_dict[x[0]] = mx_real_t
self._init_predictor(data_shapes, type_dict)
data_arrays = [self._pred_exec.arg_dict[name] for name in data_names]
for i, batch in enumerate(X):
if num_batch is not None and i == num_batch:
break
_load_data(batch, data_arrays)
self._pred_exec.forward(is_train=False)
eval_metric.update(batch.label, self._pred_exec.outputs)
if batch_end_callback is not None:
batch_end_params = BatchEndParam(epoch=0,
nbatch=i,
eval_metric=eval_metric,
locals=locals())
_multiple_callbacks(batch_end_callback, batch_end_params)
return eval_metric.get()[1]
def fit(self, X, y=None, eval_data=None, eval_metric='acc',
epoch_end_callback=None, batch_end_callback=None, kvstore='local', logger=None,
work_load_list=None, monitor=None, eval_end_callback=LogValidationMetricsCallback(),
eval_batch_end_callback=None):
"""Fit the model.
Parameters
----------
X : DataIter, or numpy.ndarray/NDArray
Training data. If `X` is a `DataIter`, the name or (if name not available)
the position of its outputs should match the corresponding variable
names defined in the symbolic graph.
y : numpy.ndarray/NDArray, optional
Training set label.
If X is ``numpy.ndarray`` or `NDArray`, `y` is required to be set.
While y can be 1D or 2D (with 2nd dimension as 1), its first dimension must be
the same as `X`, i.e. the number of data points and labels should be equal.
eval_data : DataIter or numpy.ndarray/list/NDArray pair
If eval_data is numpy.ndarray/list/NDArray pair,
it should be ``(valid_data, valid_label)``.
eval_metric : metric.EvalMetric or str or callable
The evaluation metric. This could be the name of evaluation metric
or a custom evaluation function that returns statistics
based on a minibatch.
epoch_end_callback : callable(epoch, symbol, arg_params, aux_states)
A callback that is invoked at end of each epoch.
This can be used to checkpoint model each epoch.
batch_end_callback: callable(epoch)
A callback that is invoked at end of each batch for purposes of printing.
kvstore: KVStore or str, optional
The KVStore or a string kvstore type: 'local', 'dist_sync', 'dist_async'
In default uses 'local', often no need to change for single machiine.
logger : logging logger, optional
When not specified, default logger will be used.
work_load_list : float or int, optional
The list of work load for different devices,
in the same order as `ctx`.
Note
----
KVStore behavior
- 'local', multi-devices on a single machine, will automatically choose best type.
- 'dist_sync', multiple machines communicating via BSP.
- 'dist_async', multiple machines with asynchronous communication.
"""
data = self._init_iter(X, y, is_train=True)
eval_data = self._init_eval_iter(eval_data)
if self.sym_gen:
self.symbol = self.sym_gen(data.default_bucket_key) # pylint: disable=no-member
self._check_arguments()
self.kwargs["sym"] = self.symbol
arg_names, param_names, aux_names = \
self._init_params(data.provide_data+data.provide_label)
# setup metric
if not isinstance(eval_metric, metric.EvalMetric):
eval_metric = metric.create(eval_metric)
# create kvstore
(kvstore, update_on_kvstore) = _create_kvstore(
kvstore, len(self.ctx), self.arg_params)
param_idx2name = {}
if update_on_kvstore:
param_idx2name.update(enumerate(param_names))
else:
for i, n in enumerate(param_names):
for k in range(len(self.ctx)):
param_idx2name[i*len(self.ctx)+k] = n
self.kwargs["param_idx2name"] = param_idx2name
# init optmizer
if isinstance(self.optimizer, str):
batch_size = data.batch_size
if kvstore and 'dist' in kvstore.type and not '_async' in kvstore.type:
batch_size *= kvstore.num_workers
optimizer = opt.create(self.optimizer,
rescale_grad=(1.0/batch_size),
**(self.kwargs))
elif isinstance(self.optimizer, opt.Optimizer):
optimizer = self.optimizer
# do training
_train_multi_device(self.symbol, self.ctx, arg_names, param_names, aux_names,
self.arg_params, self.aux_params,
begin_epoch=self.begin_epoch, end_epoch=self.num_epoch,
epoch_size=self.epoch_size,
optimizer=optimizer,
train_data=data, eval_data=eval_data,
eval_metric=eval_metric,
epoch_end_callback=epoch_end_callback,
batch_end_callback=batch_end_callback,
kvstore=kvstore, update_on_kvstore=update_on_kvstore,
logger=logger, work_load_list=work_load_list, monitor=monitor,
eval_end_callback=eval_end_callback,
eval_batch_end_callback=eval_batch_end_callback,
sym_gen=self.sym_gen)
def save(self, prefix, epoch=None):
"""Checkpoint the model checkpoint into file.
You can also use `pickle` to do the job if you only work on Python.
The advantage of `load` and `save` (as compared to `pickle`) is that
the resulting file can be loaded from other MXNet language bindings.
One can also directly `load`/`save` from/to cloud storage(S3, HDFS)
Parameters
----------
prefix : str
Prefix of model name.
Notes
-----
- ``prefix-symbol.json`` will be saved for symbol.
- ``prefix-epoch.params`` will be saved for parameters.
"""
if epoch is None:
epoch = self.num_epoch
assert epoch is not None
save_checkpoint(prefix, epoch, self.symbol, self.arg_params, self.aux_params)
@staticmethod
def load(prefix, epoch, ctx=None, **kwargs):
"""Load model checkpoint from file.
Parameters
----------
prefix : str
Prefix of model name.
epoch : int
epoch number of model we would like to load.
ctx : Context or list of Context, optional
The device context of training and prediction.
kwargs : dict
Other parameters for model, including `num_epoch`, optimizer and `numpy_batch_size`.
Returns
-------
model : FeedForward
The loaded model that can be used for prediction.
Notes
-----
- ``prefix-symbol.json`` will be saved for symbol.
- ``prefix-epoch.params`` will be saved for parameters.
"""
symbol, arg_params, aux_params = load_checkpoint(prefix, epoch)
return FeedForward(symbol, ctx=ctx,
arg_params=arg_params, aux_params=aux_params,
begin_epoch=epoch,
**kwargs)
@staticmethod
def create(symbol, X, y=None, ctx=None,
num_epoch=None, epoch_size=None, optimizer='sgd', initializer=Uniform(0.01),
eval_data=None, eval_metric='acc',
epoch_end_callback=None, batch_end_callback=None,
kvstore='local', logger=None, work_load_list=None,
eval_end_callback=LogValidationMetricsCallback(),
eval_batch_end_callback=None, **kwargs):
"""Functional style to create a model.
This function is more consistent with functional
languages such as R, where mutation is not allowed.
Parameters
----------
symbol : Symbol
The symbol configuration of a computation network.
X : DataIter
Training data.
y : numpy.ndarray, optional
If `X` is a ``numpy.ndarray``, `y` must be set.
ctx : Context or list of Context, optional
The device context of training and prediction.
To use multi-GPU training, pass in a list of GPU contexts.
num_epoch : int, optional
The number of training epochs(epochs).
epoch_size : int, optional
Number of batches in a epoch. In default, it is set to
``ceil(num_train_examples / batch_size)``.
optimizer : str or Optimizer, optional
The name of the chosen optimizer, or an optimizer object, used for training.
initializer : initializer function, optional
The initialization scheme used.
eval_data : DataIter or numpy.ndarray pair
If `eval_set` is ``numpy.ndarray`` pair, it should
be (`valid_data`, `valid_label`).
eval_metric : metric.EvalMetric or str or callable
The evaluation metric. Can be the name of an evaluation metric
or a custom evaluation function that returns statistics
based on a minibatch.
epoch_end_callback : callable(epoch, symbol, arg_params, aux_states)
A callback that is invoked at end of each epoch.
This can be used to checkpoint model each epoch.
batch_end_callback: callable(epoch)
A callback that is invoked at end of each batch for print purposes.
kvstore: KVStore or str, optional
The KVStore or a string kvstore type: 'local', 'dist_sync', 'dis_async'.
Defaults to 'local', often no need to change for single machine.
logger : logging logger, optional
When not specified, default logger will be used.
work_load_list : list of float or int, optional
The list of work load for different devices,
in the same order as `ctx`.
"""
model = FeedForward(symbol, ctx=ctx, num_epoch=num_epoch,
epoch_size=epoch_size,
optimizer=optimizer, initializer=initializer, **kwargs)
model.fit(X, y, eval_data=eval_data, eval_metric=eval_metric,
epoch_end_callback=epoch_end_callback,
batch_end_callback=batch_end_callback,
kvstore=kvstore,
logger=logger,
work_load_list=work_load_list,
eval_end_callback=eval_end_callback,
eval_batch_end_callback=eval_batch_end_callback)
return model
| apache-2.0 |
sensarliar/paparazzi | sw/airborne/test/math/compare_utm_enu.py | 77 | 2714 | #!/usr/bin/env python
from __future__ import division, print_function, absolute_import
import sys
import os
PPRZ_SRC = os.getenv("PAPARAZZI_SRC", "../../../..")
sys.path.append(PPRZ_SRC + "/sw/lib/python")
from pprz_math.geodetic import *
from pprz_math.algebra import DoubleRMat, DoubleEulers, DoubleVect3
from math import radians, degrees, tan
import matplotlib.pyplot as plt
import numpy as np
# Origin at ENAC
UTM_EAST0 = 377349 # in m
UTM_NORTH0 = 4824583 # in m
UTM_ZONE0 = 31
ALT0 = 147.000 # in m
utm_origin = UtmCoor_d(north=UTM_NORTH0, east=UTM_EAST0, alt=ALT0, zone=UTM_ZONE0)
print("origin %s" % utm_origin)
lla_origin = utm_origin.to_lla()
ecef_origin = lla_origin.to_ecef()
ltp_origin = ecef_origin.to_ltp_def()
print(ltp_origin)
# convergence angle to "true north" is approx 1 deg here
earth_radius = 6378137.0
n = 0.9996 * earth_radius
UTM_DELTA_EAST = 500000.
dist_to_meridian = utm_origin.east - UTM_DELTA_EAST
conv = dist_to_meridian / n * tan(lla_origin.lat)
# or (middle meridian of UTM zone 31 is at 3deg)
#conv = atan(tan(lla_origin.lon - radians(3))*sin(lla_origin.lat))
print("approx. convergence angle (north error compared to meridian): %f deg" % degrees(conv))
# Rotation matrix to correct for "true north"
R = DoubleEulers(psi=-conv).to_rmat()
# calculate ENU coordinates for 100 points in 100m distance
nb_points = 100
dist_points = 100
enu_res = np.zeros((nb_points, 2))
enu_res_c = np.zeros((nb_points, 2))
utm_res = np.zeros((nb_points, 2))
for i in range(0, nb_points):
utm = UtmCoor_d()
utm.north = i * dist_points + utm_origin.north
utm.east = i * dist_points+ utm_origin.east
utm.alt = utm_origin.alt
utm.zone = utm_origin.zone
#print(utm)
utm_res[i, 0] = utm.east - utm_origin.east
utm_res[i, 1] = utm.north - utm_origin.north
lla = utm.to_lla()
#print(lla)
ecef = lla.to_ecef()
enu = ecef.to_enu(ltp_origin)
enu_res[i, 0] = enu.x
enu_res[i, 1] = enu.y
enu_c = R * DoubleVect3(enu.x, enu.y, enu.z)
enu_res_c[i, 0] = enu_c.x
enu_res_c[i, 1] = enu_c.y
#print(enu)
dist = np.linalg.norm(utm_res, axis=1)
error = np.linalg.norm(utm_res - enu_res, axis=1)
error_c = np.linalg.norm(utm_res - enu_res_c, axis=1)
plt.figure(1)
plt.subplot(311)
plt.title("utm vs. enu")
plt.plot(enu_res[:, 0], enu_res[:, 1], 'g', label="ENU")
plt.plot(utm_res[:, 0], utm_res[:, 1], 'r', label="UTM")
plt.ylabel("y/north [m]")
plt.xlabel("x/east [m]")
plt.legend(loc='upper left')
plt.subplot(312)
plt.plot(dist, error, 'r')
plt.xlabel("dist from origin [m]")
plt.ylabel("error [m]")
plt.subplot(313)
plt.plot(dist, error_c, 'r')
plt.xlabel("dist from origin [m]")
plt.ylabel("error with north fix [m]")
plt.show()
| gpl-2.0 |
david-ragazzi/nupic | examples/opf/tools/testDiagnostics.py | 58 | 1606 | import numpy as np
def printMatrix(inputs, spOutput):
''' (i,j)th cell of the diff matrix will have the number of inputs for which the input and output
pattern differ by i bits and the cells activated differ at j places.
Parameters:
--------------------------------------------------------------------
inputs: the input encodings
spOutput: the coincidences activated in response to each input
'''
from pylab import matplotlib as mat
w=len(np.nonzero(inputs[0])[0])
numActive=len(np.nonzero(spOutput[0])[0])
matrix = np.zeros([2*w+1,2*numActive+1])
for x in xrange(len(inputs)):
i = [_hammingDistance(inputs[x], z) for z in inputs[x:]]
j = [_hammingDistance(spOutput[x], a) for a in spOutput[x:]]
for p, q in zip(i,j):
matrix[p,q]+=1
for y in xrange(len(matrix)) :
matrix[y]=[max(10*x, 100) if (x<100 and x>0) else x for x in matrix[y]]
cdict = {'red':((0.0,0.0,0.0),(0.01,0.7,0.5),(0.3,1.0,0.7),(1.0,1.0,1.0)),\
'green': ((0.0,0.0,0.0),(0.01,0.7,0.5),(0.3,1.0,0.0),(1.0,1.0,1.0)),\
'blue': ((0.0,0.0,0.0),(0.01,0.7,0.5),(0.3,1.0,0.0),(1.0,0.5,1.0))}
my_cmap = mat.colors.LinearSegmentedColormap('my_colormap',cdict,256)
pyl=mat.pyplot
pyl.matshow(matrix, cmap = my_cmap)
pyl.colorbar()
pyl.ylabel('Number of bits by which the inputs differ')
pyl.xlabel('Number of cells by which input and output differ')
pyl.title('The difference matrix')
pyl.show()
def _hammingDistance(s1, s2):
"""Hamming distance between two numpy arrays s1 and s2"""
return sum(abs(s1-s2))
| gpl-3.0 |
wschenck/nest-simulator | pynest/examples/urbanczik_synapse_example.py | 6 | 12225 | # -*- coding: utf-8 -*-
#
# urbanczik_synapse_example.py
#
# This file is part of NEST.
#
# Copyright (C) 2004 The NEST Initiative
#
# NEST is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 2 of the License, or
# (at your option) any later version.
#
# NEST is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with NEST. If not, see <http://www.gnu.org/licenses/>.
'''
Weight adaptation according to the Urbanczik-Senn plasticity
------------------
This script demonstrates the learning in a compartmental neuron where the
dendritic synapses adapt their weight according to the plasticity rule by
Urbanczik and Senn [1]. In this simple setup, a spike pattern of 200 poisson
spike trains is repeatedly presented to a neuron that is composed of one
somatic and one dendritic compartment. At the same time, the somatic
conductances are activated to produce a time-varying matching potential.
After the learning, this signal is then reproreproduced by the membrane
potential of the neuron. This script produces Fig. 1B in [1] but uses standard
units instead of the unitless quantities used in the paper.
[1] R. Urbanczik, W. Senn (2014): Learning by the Dendritic Prediction of
Somatic Spiking. Neuron, 81, 521-528.
'''
import numpy as np
from matplotlib import pyplot as plt
import nest
def g_inh(amplitude, t_start, t_end):
'''
returns weights for the spike generator that drives the inhibitory
somatic conductance.
'''
return lambda t: np.piecewise(t, [(t >= t_start) & (t < t_end)],
[amplitude, 0.0])
def g_exc(amplitude, freq, offset, t_start, t_end):
'''
returns weights for the spike generator that drives the excitatory
somatic conductance.
'''
return lambda t: np.piecewise(t, [(t >= t_start) & (t < t_end)],
[lambda t: amplitude*np.sin(freq*t) + offset, 0.0])
def matching_potential(g_E, g_I, nrn_params):
'''
returns the matching potential as a function of the somatic conductances.
'''
E_E = nrn_params['soma']['E_ex']
E_I = nrn_params['soma']['E_in']
return (g_E*E_E + g_I*E_I) / (g_E + g_I)
def V_w_star(V_w, nrn_params):
'''
returns the dendritic prediction of the somatic membrane potential.
'''
g_D = nrn_params['g_sp']
g_L = nrn_params['soma']['g_L']
E_L = nrn_params['soma']['E_L']
return (g_L*E_L + g_D*V_w) / (g_L + g_D)
def phi(U, nrn_params):
'''
rate function of the soma
'''
phi_max = nrn_params['phi_max']
k = nrn_params['rate_slope']
beta = nrn_params['beta']
theta = nrn_params['theta']
return phi_max / (1.0 + k*np.exp(beta*(theta - U)))
def h(U, nrn_params):
'''
derivative of the rate function phi
'''
phi_max = nrn_params['phi_max']
k = nrn_params['rate_slope']
beta = nrn_params['beta']
theta = nrn_params['theta']
return 15.0*beta / (1.0 + np.exp(-beta*(theta - U)) / k)
'''
simulation params
'''
n_pattern_rep = 100 # number of repetitions of the spike pattern
pattern_duration = 200.0
t_start = 2.0*pattern_duration
t_end = n_pattern_rep*pattern_duration + t_start
simulation_time = t_end + 2.0*pattern_duration
n_rep_total = int(np.around(simulation_time / pattern_duration))
resolution = 0.1
nest.SetKernelStatus({'resolution': resolution})
'''
neuron parameters
'''
nrn_model = 'pp_cond_exp_mc_urbanczik'
nrn_params = {
't_ref': 3.0, # refractory period
'g_sp': 600.0, # soma-to-dendritic coupling conductance
'soma': {
'V_m': -70.0, # initial value of V_m
'C_m': 300.0, # capacitance of membrane
'E_L': -70.0, # resting potential
'g_L': 30.0, # somatic leak conductance
'E_ex': 0.0, # resting potential for exc input
'E_in': -75.0, # resting potential for inh input
'tau_syn_ex': 3.0, # time constant of exc conductance
'tau_syn_in': 3.0, # time constant of inh conductance
},
'dendritic': {
'V_m': -70.0, # initial value of V_m
'C_m': 300.0, # capacitance of membrane
'E_L': -70.0, # resting potential
'g_L': 30.0, # dendritic leak conductance
'tau_syn_ex': 3.0, # time constant of exc input current
'tau_syn_in': 3.0, # time constant of inh input current
},
# parameters of rate function
'phi_max': 0.15, # max rate
'rate_slope': 0.5, # called 'k' in the paper
'beta': 1.0 / 3.0,
'theta': -55.0,
}
'''
synapse params
'''
syns = nest.GetDefaults(nrn_model)['receptor_types']
init_w = 0.3*nrn_params['dendritic']['C_m']
syn_params = {
'synapse_model': 'urbanczik_synapse_wr',
'receptor_type': syns['dendritic_exc'],
'tau_Delta': 100.0, # time constant of low pass filtering of the weight change
'eta': 0.17, # learning rate
'weight': init_w,
'Wmax': 4.5*nrn_params['dendritic']['C_m'],
'delay': resolution,
}
'''
# in case you want to use the unitless quantities as in [1]:
# neuron params:
nrn_model = 'pp_cond_exp_mc_urbanczik'
nrn_params = {
't_ref': 3.0,
'g_sp': 2.0,
'soma': {
'V_m': 0.0,
'C_m': 1.0,
'E_L': 0.0,
'g_L': 0.1,
'E_ex': 14.0 / 3.0,
'E_in': -1.0 / 3.0,
'tau_syn_ex': 3.0,
'tau_syn_in': 3.0,
},
'dendritic': {
'V_m': 0.0,
'C_m': 1.0,
'E_L': 0.0,
'g_L': 0.1,
'tau_syn_ex': 3.0,
'tau_syn_in': 3.0,
},
# parameters of rate function
'phi_max': 0.15,
'rate_slope': 0.5,
'beta': 5.0,
'theta': 1.0,
}
# synapse params:
syns = nest.GetDefaults(nrn_model)['receptor_types']
init_w = 0.2*nrn_params['dendritic']['g_L']
syn_params = {
'synapse_model': 'urbanczik_synapse_wr',
'receptor_type': syns['dendritic_exc'],
'tau_Delta': 100.0,
'eta': 0.0003 / (15.0*15.0*nrn_params['dendritic']['C_m']),
'weight': init_w,
'Wmax': 3.0*nrn_params['dendritic']['g_L'],
'delay': resolution,
}
'''
'''
somatic input
'''
ampl_exc = 0.016*nrn_params['dendritic']['C_m']
offset = 0.018*nrn_params['dendritic']['C_m']
ampl_inh = 0.06*nrn_params['dendritic']['C_m']
freq = 2.0 / pattern_duration
soma_exc_inp = g_exc(ampl_exc, 2.0*np.pi*freq, offset, t_start, t_end)
soma_inh_inp = g_inh(ampl_inh, t_start, t_end)
'''
dendritic input
create spike pattern by recording the spikes of a simulation of n_pg
poisson generators. The recorded spike times are then given to spike
generators.
'''
n_pg = 200 # number of poisson generators
p_rate = 10.0 # rate in Hz
pgs = nest.Create('poisson_generator', n=n_pg, params={'rate': p_rate})
prrt_nrns_pg = nest.Create('parrot_neuron', n_pg)
nest.Connect(pgs, prrt_nrns_pg, {'rule': 'one_to_one'})
sr = nest.Create('spike_recorder', n_pg)
nest.Connect(prrt_nrns_pg, sr, {'rule': 'one_to_one'})
nest.Simulate(pattern_duration)
t_srs = []
for i, ssr in enumerate(nest.GetStatus(sr)):
t_sr = ssr['events']['times']
t_srs.append(t_sr)
nest.ResetKernel()
nest.SetKernelStatus({'resolution': resolution})
'''
neuron and devices
'''
nest.SetDefaults(nrn_model, nrn_params)
nrn = nest.Create(nrn_model)
# poisson generators are connected to parrot neurons which are
# connected to the mc neuron
prrt_nrns = nest.Create('parrot_neuron', n_pg)
# excitatory input to the soma
spike_times_soma_inp = np.arange(resolution, simulation_time, resolution)
sg_soma_exc = nest.Create('spike_generator',
params={'spike_times': spike_times_soma_inp,
'spike_weights': soma_exc_inp(spike_times_soma_inp)})
# inhibitory input to the soma
sg_soma_inh = nest.Create('spike_generator',
params={'spike_times': spike_times_soma_inp,
'spike_weights': soma_inh_inp(spike_times_soma_inp)})
# excitatory input to the dendrite
sg_prox = nest.Create('spike_generator', n=n_pg)
# for recording all parameters of the Urbanczik neuron
rqs = nest.GetDefaults(nrn_model)['recordables']
mm = nest.Create('multimeter', params={'record_from': rqs, 'interval': 0.1})
# for recoding the synaptic weights of the Urbanczik synapses
wr = nest.Create('weight_recorder')
# for recording the spiking of the soma
sr_soma = nest.Create('spike_recorder')
'''
create connections
'''
nest.Connect(sg_prox, prrt_nrns, {'rule': 'one_to_one'})
nest.CopyModel('urbanczik_synapse', 'urbanczik_synapse_wr',
{'weight_recorder': wr[0]})
nest.Connect(prrt_nrns, nrn, syn_spec=syn_params)
nest.Connect(mm, nrn, syn_spec={'delay': 0.1})
nest.Connect(sg_soma_exc, nrn,
syn_spec={'receptor_type': syns['soma_exc'], 'weight': 10.0*resolution, 'delay': resolution})
nest.Connect(sg_soma_inh, nrn,
syn_spec={'receptor_type': syns['soma_inh'], 'weight': 10.0*resolution, 'delay': resolution})
nest.Connect(nrn, sr_soma)
'''
simulation divided into intervals of the pattern duration
'''
for i in np.arange(n_rep_total):
# Set the spike times of the pattern for each spike generator
for (sg, t_sp) in zip(sg_prox, t_srs):
nest.SetStatus(
sg, {'spike_times': np.array(t_sp) + i*pattern_duration})
nest.Simulate(pattern_duration)
'''
read out devices
'''
# multimeter
rec = nest.GetStatus(mm)[0]['events']
t = rec['times']
V_s = rec['V_m.s']
V_d = rec['V_m.p']
V_d_star = V_w_star(V_d, nrn_params)
g_in = rec['g_in.s']
g_ex = rec['g_ex.s']
I_ex = rec['I_ex.p']
I_in = rec['I_in.p']
U_M = matching_potential(g_ex, g_in, nrn_params)
# weight recorder
data = nest.GetStatus(wr)
senders = data[0]['events']['senders']
targets = data[0]['events']['targets']
weights = data[0]['events']['weights']
times = data[0]['events']['times']
# spike recorder
data = nest.GetStatus(sr_soma)[0]['events']
spike_times_soma = data['times']
'''
plot results
'''
fs = 22
lw = 2.5
fig1, (axA, axB, axC, axD) = plt.subplots(4, 1, sharex=True)
# membrane potentials and matching potential
axA.plot(t, V_s, lw=lw, label=r'$U$ (soma)', color='darkblue')
axA.plot(t, V_d, lw=lw, label=r'$V_W$ (dendrit)', color='deepskyblue')
axA.plot(t, V_d_star, lw=lw, label=r'$V_W^\ast$ (dendrit)', color='b', ls='--')
axA.plot(t, U_M, lw=lw, label=r'$U_M$ (soma)', color='r', ls='-')
axA.set_ylabel('membrane pot [mV]', fontsize=fs)
axA.legend(fontsize=fs)
# somatic conductances
axB.plot(t, g_in, lw=lw, label=r'$g_I$', color='r')
axB.plot(t, g_ex, lw=lw, label=r'$g_E$', color='coral')
axB.set_ylabel('somatic cond', fontsize=fs)
axB.legend(fontsize=fs)
# dendritic currents
axC.plot(t, I_ex, lw=lw, label=r'$I_ex$', color='r')
axC.plot(t, I_in, lw=lw, label=r'$I_in$', color='coral')
axC.set_ylabel('dend current', fontsize=fs)
axC.legend(fontsize=fs)
# rates
axD.plot(t, phi(V_s, nrn_params), lw=lw, label=r'$\phi(U)$', color='darkblue')
axD.plot(t, phi(V_d, nrn_params), lw=lw,
label=r'$\phi(V_W)$', color='deepskyblue')
axD.plot(t, phi(V_d_star, nrn_params), lw=lw,
label=r'$\phi(V_W^\ast)$', color='b', ls='--')
axD.plot(t, h(V_d_star, nrn_params), lw=lw,
label=r'$h(V_W^\ast)$', color='g', ls='--')
axD.plot(t, phi(V_s, nrn_params) - phi(V_d_star, nrn_params), lw=lw,
label=r'$\phi(U) - \phi(V_W^\ast)$', color='r', ls='-')
axD.plot(spike_times_soma, 0.0*np.ones(len(spike_times_soma)),
's', color='k', markersize=2)
axD.legend(fontsize=fs)
# synaptic weights
fig2, axA = plt.subplots(1, 1)
for i in np.arange(2, 200, 10):
index = np.intersect1d(np.where(senders == i), np.where(targets == 1))
if not len(index) == 0:
axA.step(times[index], weights[index], label='pg_{}'.format(i - 2),
lw=lw)
axA.set_title('Synaptic weights of Urbanczik synapses')
axA.set_xlabel('time [ms]', fontsize=fs)
axA.set_ylabel('weight', fontsize=fs)
axA.legend(fontsize=fs - 4)
plt.show()
| gpl-2.0 |
kpespinosa/BuildingMachineLearningSystemsWithPython | ch09/02_ceps_based_classifier.py | 24 | 3574 | # This code is supporting material for the book
# Building Machine Learning Systems with Python
# by Willi Richert and Luis Pedro Coelho
# published by PACKT Publishing
#
# It is made available under the MIT License
import numpy as np
from collections import defaultdict
from sklearn.metrics import precision_recall_curve, roc_curve
from sklearn.metrics import auc
from sklearn.cross_validation import ShuffleSplit
from sklearn.metrics import confusion_matrix
from utils import plot_roc, plot_confusion_matrix, GENRE_LIST
from ceps import read_ceps
genre_list = GENRE_LIST
def train_model(clf_factory, X, Y, name, plot=False):
labels = np.unique(Y)
cv = ShuffleSplit(
n=len(X), n_iter=1, test_size=0.3, indices=True, random_state=0)
train_errors = []
test_errors = []
scores = []
pr_scores = defaultdict(list)
precisions, recalls, thresholds = defaultdict(
list), defaultdict(list), defaultdict(list)
roc_scores = defaultdict(list)
tprs = defaultdict(list)
fprs = defaultdict(list)
clfs = [] # just to later get the median
cms = []
for train, test in cv:
X_train, y_train = X[train], Y[train]
X_test, y_test = X[test], Y[test]
clf = clf_factory()
clf.fit(X_train, y_train)
clfs.append(clf)
train_score = clf.score(X_train, y_train)
test_score = clf.score(X_test, y_test)
scores.append(test_score)
train_errors.append(1 - train_score)
test_errors.append(1 - test_score)
y_pred = clf.predict(X_test)
cm = confusion_matrix(y_test, y_pred)
cms.append(cm)
for label in labels:
y_label_test = np.asarray(y_test == label, dtype=int)
proba = clf.predict_proba(X_test)
proba_label = proba[:, label]
precision, recall, pr_thresholds = precision_recall_curve(
y_label_test, proba_label)
pr_scores[label].append(auc(recall, precision))
precisions[label].append(precision)
recalls[label].append(recall)
thresholds[label].append(pr_thresholds)
fpr, tpr, roc_thresholds = roc_curve(y_label_test, proba_label)
roc_scores[label].append(auc(fpr, tpr))
tprs[label].append(tpr)
fprs[label].append(fpr)
if plot:
for label in labels:
print("Plotting %s" % genre_list[label])
scores_to_sort = roc_scores[label]
median = np.argsort(scores_to_sort)[len(scores_to_sort) / 2]
desc = "%s %s" % (name, genre_list[label])
plot_roc(roc_scores[label][median], desc, tprs[label][median],
fprs[label][median], label='%s vs rest' % genre_list[label])
all_pr_scores = np.asarray(pr_scores.values()).flatten()
summary = (np.mean(scores), np.std(scores),
np.mean(all_pr_scores), np.std(all_pr_scores))
print("%.3f\t%.3f\t%.3f\t%.3f\t" % summary)
return np.mean(train_errors), np.mean(test_errors), np.asarray(cms)
def create_model():
from sklearn.linear_model.logistic import LogisticRegression
clf = LogisticRegression()
return clf
if __name__ == "__main__":
X, y = read_ceps(genre_list)
train_avg, test_avg, cms = train_model(
create_model, X, y, "Log Reg CEPS", plot=True)
cm_avg = np.mean(cms, axis=0)
cm_norm = cm_avg / np.sum(cm_avg, axis=0)
plot_confusion_matrix(cm_norm, genre_list, "ceps",
"Confusion matrix of a CEPS based classifier")
| mit |
eclee25/flu-SDI-exploratory-age | scripts/create_fluseverity_figs/Supp_zOR_SDI_ILINet.py | 1 | 4369 | #!/usr/bin/python
##############################################
###Python template
###Author: Elizabeth Lee
###Date: 8/4/14
###Function: plot SDI zOR vs. ILINet zOR(supp figure)
###Import data:
###Command Line: python Supp_zOR_SDI_ILINet.py
##############################################
### notes ###
### packages/modules ###
import csv
import matplotlib.pyplot as plt
import numpy as np
## local modules ##
import functions as fxn
### data structures ###
### called/local plotting parameters ###
ps = fxn.gp_plotting_seasons
sl = fxn.gp_seasonlabels
fs = 24
fssml = 16
### data files ###
# SDI classifications file
SDIclassif_in = open('/home/elee/Dropbox/Elizabeth_Bansal_Lab/SDI_Data/explore/Py_export/SDI_national_classifications.csv', 'r')
SDIclassif_in.readline() # remove header
SDIclassif = csv.reader(SDIclassif_in, delimiter=',')
# ILINet classifications file
ILINetclassif_in = open('/home/elee/Dropbox/Elizabeth_Bansal_Lab/SDI_Data/explore/Py_export/ILINet_national_classifications.csv', 'r')
ILINetclassif_in.readline() # remove header
ILINetclassif = csv.reader(ILINetclassif_in, delimiter=',')
### program ###
# d_dataset_classif[season] = (mn_retro_zOR, mn_early_zOR)
## import SDI zOR classifications ##
d_SDI_classif = fxn.readNationalClassifFile(SDIclassif)
## import ILINet zOR classifications ##
d_ILINet_classif = fxn.readNationalClassifFile(ILINetclassif)
# plot values
SDI_retro = [d_SDI_classif[s][0] for s in ps]
ILINet_retro = [d_ILINet_classif[s][0] for s in ps]
SDI_early = [d_SDI_classif[s][1] for s in ps]
ILINet_early = [d_ILINet_classif[s][1] for s in ps]
print 'retro - SDI/ILINet', np.corrcoef(SDI_retro, ILINet_retro)
print 'early - SDI/ILINet', np.corrcoef(SDI_early, ILINet_early)
# draw plots
# SDI vs ILINet retrospective zOR
fig1 = plt.figure()
ax1 = plt.subplot(111)
ax1.plot(ILINet_retro, SDI_retro, marker = 'o', color = 'black', linestyle = 'None')
for s, x, y in zip(sl, ILINet_retro, SDI_retro):
ax1.annotate(s, xy=(x,y), xytext=(-25,-15), textcoords='offset points', fontsize=fssml)
ax1.vlines([-1, 1], -10, 20, colors='k', linestyles='solid')
ax1.hlines([-1, 1], -10, 20, colors='k', linestyles='solid')
ax1.fill([20, 1, 1, 20], [1, 1, 20, 20], facecolor='blue', alpha=0.4)
ax1.fill([-1, 1, 1, -1], [-1, -1, 1, 1], facecolor='yellow', alpha=0.4)
ax1.fill([-10, -1, -1, -10], [-1, -1, -10, -10], facecolor='red', alpha=0.4)
ax1.annotate('Mild', xy=(4,19), fontsize=fssml)
ax1.annotate('Severe', xy=(-6,-8.5), fontsize=fssml)
ax1.set_title(fxn.gp_sigma_r, fontsize=fs)
ax1.set_ylabel('SDI', fontsize=fs)
ax1.set_xlabel('ILINet', fontsize=fs)
ax1.tick_params(axis='both', labelsize=fssml)
ax1.set_xlim([-10,20])
ax1.set_ylim([-10,20])
ax1.invert_yaxis()
ax1.invert_xaxis()
plt.savefig('/home/elee/Dropbox/Elizabeth_Bansal_Lab/Manuscripts/Age_Severity/fluseverity_figs/Supp/zOR_SDI_ILINet/zOR_SDI_ILINet_retro.png', transparent=False, bbox_inches='tight', pad_inches=0)
plt.close()
# SDI vs ILINet early warning zOR
fig2 = plt.figure()
ax2 = plt.subplot(111)
ax2.plot(ILINet_early, SDI_early, marker = 'o', color = 'black', linestyle = 'None')
for s, x, y in zip(sl, ILINet_early, SDI_early):
ax2.annotate(s, xy=(x,y), xytext=(-25,5), textcoords='offset points', fontsize=fssml)
ax2.vlines([-1, 1], -10, 20, colors='k', linestyles='solid')
ax2.hlines([-1, 1], -10, 20, colors='k', linestyles='solid')
# ax1.fill([20, 1, 1, 20], [1, 1, 20, 20], facecolor='blue', alpha=0.4)
# ax1.fill([-1, 1, 1, -1], [-1, -1, 1, 1], facecolor='yellow', alpha=0.4)
# ax1.fill([-10, -1, -1, -10], [-1, -1, -10, -10], facecolor='red', alpha=0.4)
ax2.fill([8, 1, 1, 8], [1, 1, 20, 20], facecolor='blue', alpha=0.4)
ax2.fill([-1, 1, 1, -1], [-1, -1, 1, 1], facecolor='yellow', alpha=0.4)
ax2.fill([-4, -1, -1, -4], [-1, -1, -10, -10], facecolor='red', alpha=0.4)
ax2.annotate('Mild', xy=(2.5, 7.5), fontsize=fssml)
ax2.annotate('Severe', xy=(-2.3,-3.3), fontsize=fssml)
ax2.set_title(fxn.gp_sigma_w, fontsize=fs)
ax2.set_ylabel('SDI', fontsize=fs)
ax2.set_xlabel('ILINet', fontsize=fs)
ax2.tick_params(axis='both', labelsize=fssml)
ax2.set_xlim([-4,8])
ax2.set_ylim([-4,8])
ax2.invert_yaxis()
ax2.invert_xaxis()
plt.savefig('/home/elee/Dropbox/Elizabeth_Bansal_Lab/Manuscripts/Age_Severity/fluseverity_figs/Supp/zOR_SDI_ILINet/zOR_SDI_ILINet_early.png', transparent=False, bbox_inches='tight', pad_inches=0)
plt.close()
| mit |
bloyl/mne-python | tutorials/inverse/85_brainstorm_phantom_ctf.py | 5 | 4676 | # -*- coding: utf-8 -*-
"""
.. _plot_brainstorm_phantom_ctf:
=======================================
Brainstorm CTF phantom dataset tutorial
=======================================
Here we compute the evoked from raw for the Brainstorm CTF phantom
tutorial dataset. For comparison, see :footcite:`TadelEtAl2011` and:
https://neuroimage.usc.edu/brainstorm/Tutorials/PhantomCtf
References
----------
.. footbibliography::
"""
# Authors: Eric Larson <[email protected]>
#
# License: BSD (3-clause)
import os.path as op
import warnings
import numpy as np
import matplotlib.pyplot as plt
import mne
from mne import fit_dipole
from mne.datasets.brainstorm import bst_phantom_ctf
from mne.io import read_raw_ctf
print(__doc__)
###############################################################################
# The data were collected with a CTF system at 2400 Hz.
data_path = bst_phantom_ctf.data_path(verbose=True)
# Switch to these to use the higher-SNR data:
# raw_path = op.join(data_path, 'phantom_200uA_20150709_01.ds')
# dip_freq = 7.
raw_path = op.join(data_path, 'phantom_20uA_20150603_03.ds')
dip_freq = 23.
erm_path = op.join(data_path, 'emptyroom_20150709_01.ds')
raw = read_raw_ctf(raw_path, preload=True)
###############################################################################
# The sinusoidal signal is generated on channel HDAC006, so we can use
# that to obtain precise timing.
sinusoid, times = raw[raw.ch_names.index('HDAC006-4408')]
plt.figure()
plt.plot(times[times < 1.], sinusoid.T[times < 1.])
###############################################################################
# Let's create some events using this signal by thresholding the sinusoid.
events = np.where(np.diff(sinusoid > 0.5) > 0)[1] + raw.first_samp
events = np.vstack((events, np.zeros_like(events), np.ones_like(events))).T
###############################################################################
# The CTF software compensation works reasonably well:
raw.plot()
###############################################################################
# But here we can get slightly better noise suppression, lower localization
# bias, and a better dipole goodness of fit with spatio-temporal (tSSS)
# Maxwell filtering:
raw.apply_gradient_compensation(0) # must un-do software compensation first
mf_kwargs = dict(origin=(0., 0., 0.), st_duration=10.)
raw = mne.preprocessing.maxwell_filter(raw, **mf_kwargs)
raw.plot()
###############################################################################
# Our choice of tmin and tmax should capture exactly one cycle, so
# we can make the unusual choice of baselining using the entire epoch
# when creating our evoked data. We also then crop to a single time point
# (@t=0) because this is a peak in our signal.
tmin = -0.5 / dip_freq
tmax = -tmin
epochs = mne.Epochs(raw, events, event_id=1, tmin=tmin, tmax=tmax,
baseline=(None, None))
evoked = epochs.average()
evoked.plot(time_unit='s')
evoked.crop(0., 0.)
###############################################################################
# .. _plt_brainstorm_phantom_ctf_eeg_sphere_geometry:
#
# Let's use a :ref:`sphere head geometry model <eeg_sphere_model>`
# and let's see the coordinate alignment and the sphere location.
sphere = mne.make_sphere_model(r0=(0., 0., 0.), head_radius=0.08)
mne.viz.plot_alignment(raw.info, subject='sample',
meg='helmet', bem=sphere, dig=True,
surfaces=['brain'])
del raw, epochs
###############################################################################
# To do a dipole fit, let's use the covariance provided by the empty room
# recording.
raw_erm = read_raw_ctf(erm_path).apply_gradient_compensation(0)
raw_erm = mne.preprocessing.maxwell_filter(raw_erm, coord_frame='meg',
**mf_kwargs)
cov = mne.compute_raw_covariance(raw_erm)
del raw_erm
with warnings.catch_warnings(record=True):
# ignore warning about data rank exceeding that of info (75 > 71)
warnings.simplefilter('ignore')
dip, residual = fit_dipole(evoked, cov, sphere, verbose=True)
###############################################################################
# Compare the actual position with the estimated one.
expected_pos = np.array([18., 0., 49.])
diff = np.sqrt(np.sum((dip.pos[0] * 1000 - expected_pos) ** 2))
print('Actual pos: %s mm' % np.array_str(expected_pos, precision=1))
print('Estimated pos: %s mm' % np.array_str(dip.pos[0] * 1000, precision=1))
print('Difference: %0.1f mm' % diff)
print('Amplitude: %0.1f nAm' % (1e9 * dip.amplitude[0]))
print('GOF: %0.1f %%' % dip.gof[0])
| bsd-3-clause |
r-mart/scikit-learn | examples/cluster/plot_feature_agglomeration_vs_univariate_selection.py | 218 | 3893 | """
==============================================
Feature agglomeration vs. univariate selection
==============================================
This example compares 2 dimensionality reduction strategies:
- univariate feature selection with Anova
- feature agglomeration with Ward hierarchical clustering
Both methods are compared in a regression problem using
a BayesianRidge as supervised estimator.
"""
# Author: Alexandre Gramfort <[email protected]>
# License: BSD 3 clause
print(__doc__)
import shutil
import tempfile
import numpy as np
import matplotlib.pyplot as plt
from scipy import linalg, ndimage
from sklearn.feature_extraction.image import grid_to_graph
from sklearn import feature_selection
from sklearn.cluster import FeatureAgglomeration
from sklearn.linear_model import BayesianRidge
from sklearn.pipeline import Pipeline
from sklearn.grid_search import GridSearchCV
from sklearn.externals.joblib import Memory
from sklearn.cross_validation import KFold
###############################################################################
# Generate data
n_samples = 200
size = 40 # image size
roi_size = 15
snr = 5.
np.random.seed(0)
mask = np.ones([size, size], dtype=np.bool)
coef = np.zeros((size, size))
coef[0:roi_size, 0:roi_size] = -1.
coef[-roi_size:, -roi_size:] = 1.
X = np.random.randn(n_samples, size ** 2)
for x in X: # smooth data
x[:] = ndimage.gaussian_filter(x.reshape(size, size), sigma=1.0).ravel()
X -= X.mean(axis=0)
X /= X.std(axis=0)
y = np.dot(X, coef.ravel())
noise = np.random.randn(y.shape[0])
noise_coef = (linalg.norm(y, 2) / np.exp(snr / 20.)) / linalg.norm(noise, 2)
y += noise_coef * noise # add noise
###############################################################################
# Compute the coefs of a Bayesian Ridge with GridSearch
cv = KFold(len(y), 2) # cross-validation generator for model selection
ridge = BayesianRidge()
cachedir = tempfile.mkdtemp()
mem = Memory(cachedir=cachedir, verbose=1)
# Ward agglomeration followed by BayesianRidge
connectivity = grid_to_graph(n_x=size, n_y=size)
ward = FeatureAgglomeration(n_clusters=10, connectivity=connectivity,
memory=mem)
clf = Pipeline([('ward', ward), ('ridge', ridge)])
# Select the optimal number of parcels with grid search
clf = GridSearchCV(clf, {'ward__n_clusters': [10, 20, 30]}, n_jobs=1, cv=cv)
clf.fit(X, y) # set the best parameters
coef_ = clf.best_estimator_.steps[-1][1].coef_
coef_ = clf.best_estimator_.steps[0][1].inverse_transform(coef_)
coef_agglomeration_ = coef_.reshape(size, size)
# Anova univariate feature selection followed by BayesianRidge
f_regression = mem.cache(feature_selection.f_regression) # caching function
anova = feature_selection.SelectPercentile(f_regression)
clf = Pipeline([('anova', anova), ('ridge', ridge)])
# Select the optimal percentage of features with grid search
clf = GridSearchCV(clf, {'anova__percentile': [5, 10, 20]}, cv=cv)
clf.fit(X, y) # set the best parameters
coef_ = clf.best_estimator_.steps[-1][1].coef_
coef_ = clf.best_estimator_.steps[0][1].inverse_transform(coef_)
coef_selection_ = coef_.reshape(size, size)
###############################################################################
# Inverse the transformation to plot the results on an image
plt.close('all')
plt.figure(figsize=(7.3, 2.7))
plt.subplot(1, 3, 1)
plt.imshow(coef, interpolation="nearest", cmap=plt.cm.RdBu_r)
plt.title("True weights")
plt.subplot(1, 3, 2)
plt.imshow(coef_selection_, interpolation="nearest", cmap=plt.cm.RdBu_r)
plt.title("Feature Selection")
plt.subplot(1, 3, 3)
plt.imshow(coef_agglomeration_, interpolation="nearest", cmap=plt.cm.RdBu_r)
plt.title("Feature Agglomeration")
plt.subplots_adjust(0.04, 0.0, 0.98, 0.94, 0.16, 0.26)
plt.show()
# Attempt to remove the temporary cachedir, but don't worry if it fails
shutil.rmtree(cachedir, ignore_errors=True)
| bsd-3-clause |
sbraden/circle-craters | circle_craters.py | 1 | 23925 | # -*- coding: utf-8 -*-
"""
/***************************************************************************
CircleCraters
A QGIS plugin
A crater counting tool for planetary science
-------------------
begin : 2015-01-28
git sha : $Format:%H$
copyright : (C) 2015 by Sarah E Braden
email : [email protected]
***************************************************************************/
"""
from __future__ import absolute_import
#from PyQt5 import QtCore
from builtins import str
from builtins import range
from builtins import object
import pdb
import os.path
import datetime
from matplotlib.path import Path
from qgis.PyQt.QtCore import QCoreApplication, QSettings, QTranslator, QVariant, qVersion
#from qgis.PyQt.QtGui import (
# QAction,
# QIcon,
#)
from qgis.PyQt.QtWidgets import QApplication, QAction, QMessageBox
from PyQt5.QtGui import QIcon
#from qgis.PyQt import QIcon
from qgis.core import (
QgsDistanceArea,
QgsFeature,
QgsField,
QgsGeometry,
QgsMapLayer,
QgsCoordinateTransform,
QgsCoordinateTransformContext,
QgsCoordinateReferenceSystem,
QgsPoint,
QgsPointXY,
QgsProject,
QgsWkbTypes
)
# QgsMapLayerRegistry has been moved to QgsProject.
from qgis.core import QgsSettings, QgsMessageLog, QgsMapLayer, QgsProject, QgsWkbTypes
from qgis.gui import (
QgsMapToolEmitPoint,
QgsMessageBar,
)
from osgeo import osr
# Initialize Qt resources from file resources.py
#from . import resources_rc
import CircleCraters.resources_rc
from CircleCraters.errors import CircleCraterError
from CircleCraters.shapes import Point, Circle
from CircleCraters.export_dialog import ExportDialog
from CircleCraters.choose_layers_dialog import ChooseLayersDialog
# TODO: put units on attribute table headings
# TODO: put polygon area in attribute table for that layer
class CircleCraters(object):
"""QGIS Plugin Implementation."""
def __init__(self, iface):
"""Constructor.
:param iface: An interface instance that will be passed to this class
which provides the hook by which you can manipulate the QGIS
application at run time.
:type iface: QgsInterface
"""
# Save reference to the QGIS interface
self.iface = iface
self.canvas = self.iface.mapCanvas()
# initialize plugin directory
self.plugin_dir = os.path.dirname(__file__)
# initialize locale
locale = QSettings().value('locale/userLocale')[0:2]
locale_path = os.path.join(
self.plugin_dir,
'i18n',
'CircleCraters_{}.qm'.format(locale))
if os.path.exists(locale_path):
self.translator = QTranslator()
self.translator.load(locale_path)
if qVersion() > '4.3.3':
QCoreApplication.installTranslator(self.translator)
# Create the dialog (after translation) and keep reference
self.export_dlg = ExportDialog()
self.export_dlg.selected.connect(self.export)
self.choose_dlg = ChooseLayersDialog()
self.choose_dlg.selected.connect(self.on_layer_select)
# Declare instance attributes
self.actions = []
self.menu = self.tr(u'&Circle Craters')
self.toolbar = self.iface.addToolBar(u'CircleCraters')
self.toolbar.setObjectName(u'CircleCraters')
self.tool = QgsMapToolEmitPoint(self.canvas)
self.tool.canvasClicked.connect(self.handle_click)
self.tool.deactivated.connect(self.reset_clicks)
self.clicks = []
self.layer = None
# noinspection PyMethodMayBeStatic
def tr(self, message):
"""Get the translation for a string using Qt translation API.
We implement this ourselves since we do not inherit QObject.
:param message: String for translation.
:type message: str, QString
:returns: Translated version of message.
:rtype: QString
"""
# noinspection PyTypeChecker,PyArgumentList,PyCallByClass
return QCoreApplication.translate('CircleCraters', message)
def show_error(self, message, title='Error', **kwargs):
# QgsMessageBar.CRITICAL
self.iface.messageBar().pushMessage(
title, message, level=4, **kwargs)
def show_info(self, message, **kwargs):
# QgsMessageBar.INFO -> Qgis::Info
self.iface.messageBar().pushMessage(
message, level=3, **kwargs)
def add_action(
self,
icon_path,
text,
callback,
enabled_flag=True,
add_to_menu=True,
add_to_toolbar=True,
status_tip=None,
whats_this=None,
parent=None
):
"""Add a toolbar icon to the toolbar.
:param icon_path: Path to the icon for this action. Can be a resource
path (e.g. ':/plugins/foo/bar.png') or a normal file system path.
:type icon_path: str
:param text: Text that should be shown in menu items for this action.
:type text: str
:param callback: Function to be called when the action is triggered.
:type callback: function
:param enabled_flag: A flag indicating if the action should be enabled
by default. Defaults to True.
:type enabled_flag: bool
:param add_to_menu: Flag indicating whether the action should also
be added to the menu. Defaults to True.
:type add_to_menu: bool
:param add_to_toolbar: Flag indicating whether the action should also
be added to the toolbar. Defaults to True.
:type add_to_toolbar: bool
:param status_tip: Optional text to show in a popup when mouse pointer
hovers over the action.
:type status_tip: str
:param parent: Parent widget for the new action. Defaults None.
:type parent: QWidget
:param whats_this: Optional text to show in the status bar when the
mouse pointer hovers over the action.
:returns: The action that was created. Note that the action is also
added to self.actions list.
:rtype: QAction
"""
icon = QIcon(icon_path)
action = QAction(icon, text, parent)
action.triggered.connect(callback)
action.setEnabled(enabled_flag)
if status_tip is not None:
action.setStatusTip(status_tip)
if whats_this is not None:
action.setWhatsThis(whats_this)
if add_to_toolbar:
self.toolbar.addAction(action)
if add_to_menu:
self.iface.addPluginToMenu(
self.menu,
action)
self.actions.append(action)
return action
def initGui(self): # noqa
"""Create the menu entries and toolbar icons inside the QGIS GUI."""
self.start_action = self.add_action(
':/plugins/CircleCraters/icons/ic_layers_48px.svg',
text=self.tr(u'Select Crater Counting Layer'),
callback=self.show_layer_select,
parent=self.iface.mainWindow(),
)
self.stop_action = self.add_action(
':/plugins/CircleCraters/icons/ic_layers_clear_48px.svg',
text=self.tr(u'Stop Crater Counting'),
enabled_flag=False,
callback=self.stop_tool,
parent=self.iface.mainWindow(),
)
self.circle_action = self.add_action(
':/plugins/CircleCraters/icons/ic_add_circle_outline_48px.svg',
text=self.tr(u'Circle Craters'),
enabled_flag=False,
callback=self.set_tool,
parent=self.iface.mainWindow(),
)
self.export_action = self.add_action(
':/plugins/CircleCraters/icons/ic_archive_48px.svg',
text=self.tr(u'Export Data'),
callback=self.show_export_dialog,
parent=self.iface.mainWindow(),
)
def unload(self):
"""Removes the plugin menu item and icon from QGIS GUI."""
for action in self.actions:
self.iface.removePluginMenu(
self.tr(u'&Circle Craters'),
action)
self.iface.removeToolBarIcon(action)
def reset_clicks(self):
self.clicks = []
def handle_click(self, point, button):
self.clicks.append(Point(point.x(), point.y()))
if len(self.clicks) != 3:
return
self.draw_circle(Circle(*self.clicks))
self.reset_clicks()
def set_tool(self):
"""Run method that performs all the real work"""
if not self.layer:
error = 'No crater counting layer selected. Please choose a layer.'
self.show_error(error)
self.canvas.setMapTool(self.tool)
def stop_tool(self):
"""Run method that deactivates the crater counting tool"""
self.canvas.unsetMapTool(self.tool)
self.stop_action.setEnabled(False)
self.circle_action.setEnabled(False)
self.layer = None
def is_valid_layer(self, layer):
if layer.type() != QgsMapLayer.VectorLayer:
return False
return layer.geometryType() == QgsWkbTypes.PolygonGeometry
def get_layer_choices(self):
root = QgsProject.instance().layerTreeRoot()
layers = root.findLayers()
return [layer.layer() for layer in layers if self.is_valid_layer(layer.layer())]
def show_layer_select(self):
""" Run method that lets users choose layer for crater shapefile.
Sets self.layer
"""
try:
self.choose_dlg.show(self.get_layer_choices())
except CircleCraterError as error:
self.show_error(error.message)
def on_layer_select(self, layer):
self.layer = layer
self.set_field_attributes()
msg = 'The layer "{!s}" is set as the crater counting layer'
self.show_info(msg.format(layer.name()))
self.stop_action.setEnabled(True)
self.circle_action.setEnabled(True)
self.set_tool()
def set_field_attributes(self):
self.layer.startEditing()
# fieldNameIndex has been renamed to lookupField.
if self.layer.fields().lookupField('diameter') == -1:
field_attribute = QgsField('diameter', QVariant.Double)
self.layer.dataProvider().addAttributes([field_attribute])
if self.layer.fields().lookupField('center_lon') == -1:
field_attribute = QgsField('center_lon', QVariant.Double)
self.layer.dataProvider().addAttributes([field_attribute])
if self.layer.fields().lookupField('center_lat') == -1:
field_attribute = QgsField('center_lat', QVariant.Double)
self.layer.dataProvider().addAttributes([field_attribute])
self.layer.updateFields()
self.layer.commitChanges()
def show_export_dialog(self):
""" Run method that exports data to a file"""
try:
self.export_dlg.show(self.get_layer_choices())
except CircleCraterError as error:
self.show_error(error.message)
def export(self, crater_layer, area_layer, filename):
try:
self.write_diam_file(crater_layer, area_layer, filename)
except CircleCraterError as error:
self.show_error(error.message)
def create_diam_header(self, total_area, crater_layer):
current_datetime = str(datetime.datetime.now())
# a,b = self.get_a_and_b(self.layer)
da = self.get_distance_area(self.layer)
if da.willUseEllipsoid():
header = [
'# Diam file for Craterstats',
'# Date of measurement export = {}'.format(current_datetime),
'',
'Ellipsoid {}'.format(da.ellipsoid()),
'',
'layer CRS: {}'.format(crater_layer.crs().description()),
'',
'Total Crater Area <km^2> = {}'.format(total_area),
'',
'# diameter(m), lon, lat',
'',
]
else:
header = [
'# Diam file for Craterstats',
'# Date of measurement export = {}'.format(current_datetime),
'',
'# Ellipsoid is not available, Area and Diameter unit may not right.',
'',
'Total Crater Area = {}'.format(total_area),
'',
'# diameter, lon, lat',
'',
]
return '\n'.join(header)
def write_diam_file(self, crater_layer, area_layer, filename):
"""Method writes crater data to a special formatted file."""
total_area = self.compute_area(area_layer)
km_squared = self.convert_square_meters_to_km(total_area)
header = self.create_diam_header(km_squared, crater_layer)
nested_list = self.format_diam_data(crater_layer, area_layer)
# tab delimited datafile
with open(filename, 'w') as fp:
fp.write(header)
fp.writelines('\t'.join(i) + '\n' for i in nested_list)
def get_distance_area(self, layer):
destination = layer.crs()
# Using the general purpose distance and area calculator,
# capable of performing ellipsoid based calculations.
distance_area = QgsDistanceArea()
c = QgsCoordinateTransformContext()
distance_area.setSourceCrs(layer.crs(), c )
ellips = destination.ellipsoidAcronym()
if ellips == '' :
ellips = QgsProject.instance().ellipsoid()
distance_area.setEllipsoid(ellips)
# sets whether coordinates must be projected to ellipsoid before measuring
# distance_area.setEllipsoidalMode(True)
return distance_area
def convert_meters_to_km(self, meters):
return meters * 0.001
def convert_square_meters_to_km(self, square_meters):
return square_meters * 1.0e-6
def measure(self, layer, geometry):
return self.get_distance_area(layer).measureLength(geometry)
def get_actual_area(self, feature, distance_area, xform):
# TODO: distance_area and xform should probably be class variables
QgsMessageLog.logMessage("message", "name")
print("======>",feature.geometry())
if feature.geometry().isMultipart(): # new part for multipolylines
points = feature.geometry().asMultiPolygon()
print("multipart:",len(points))
print("First point: ",points[0][0])
for p in points[0][0]:
print(p)
points = points[0][0]
else:
points = feature.geometry().asPolygon()
points = points[0]
transformed = [self.transform_point(xform, point) for point in points]
new_polygon = QgsGeometry.fromPolygonXY([transformed])
actual_area = distance_area.measureArea(new_polygon)
return actual_area
def compute_area(self, layer):
"""Returns values are in meters resp. square meters
http://qgis.org/api/2.8/classQgsDistanceArea.html
use measure() takes QgsGeometry as a parameter and calculates distance
or area
"""
destination = layer.crs()
source = layer.crs()
xform = self.crs_transform(source, destination)
distance_area = self.get_distance_area(layer)
features = list(layer.getFeatures())
return sum([self.get_actual_area(f, distance_area, xform) for f in features])
def get_fields(self, feature, diameter, lon, lat):
"""Retrieves fields from the attribute table in the order required
for .diam file: diameter, lon, lat
And casts as strings"""
# diameter is in units of km
attributes = feature.attributes()
# fraction is always 1
fraction = 1
# refer to an attribute by its index
field_list = [
str(self.convert_meters_to_km(attributes[diameter])),
#str(1),
# fraction was in old craterstats
# str(fraction),
str(attributes[lon]),
str(attributes[lat])
#str(1)
]
return field_list
def crater_center(self, crater, lat, lon):
print(crater)
print("ATT:",crater.attributes(),lat,lon,crater.attributes()[lon],crater.attributes()[lat] )
center_point = QgsPointXY(
float(crater.attributes()[lon]),
float(crater.attributes()[lat]),
)
return QgsGeometry.fromPointXY(center_point)
def experiment(self, feature_geom, point_geom):
"""
feature and point are geometrys
Is a QgsPoint within an arbitrary QgsPolygon?
"""
polygon = feature_geom.asPolygon()
point = point_geom.asPoint()
codes = []
codes.append(Path.MOVETO)
for i in range(0, len(polygon[0]) - 2):
codes.append(Path.LINETO)
codes.append(Path.CLOSEPOLY)
path = Path(polygon[0], codes)
if path.contains_point(point):
return True
else:
return False
def intersects(self, crater, area_geometries, lat, lon):
# This geometry is in units of degrees
center_geometry = self.crater_center(crater, lat, lon)
# temp = any(center_geometry.within(a) for a in area_geometries)
return any(self.experiment(a, center_geometry) for a in area_geometries)
def format_diam_data(self, crater_layer, area_layer):
"""Formats crater diameter data for export as .diam file
Checks to see if craters intersect with area polygons in area layer
"""
diameter = crater_layer.fields().indexFromName('diameter')
lon = crater_layer.fields().indexFromName('center_lon')
lat = crater_layer.fields().indexFromName('center_lat')
craters = list(crater_layer.getFeatures())
areas = list(area_layer.getFeatures())
# TODO: distance_area and xform should probably be class variables
destination = crater_layer.crs()
source = area_layer.crs()
xform = self.crs_transform(source, destination)
distance_area = self.get_distance_area(area_layer)
# Get area geometry in units of degrees
new_geometries = [self.get_transformed_polygon(a, distance_area, xform) for a in areas]
# WARNING INTERSECTS
craters = [c for c in craters if self.intersects(c, new_geometries, lat, lon)]
print("CRATERS: ",craters)
# Craterstats 2.0 line is:
# crater = {diam, fraction, lon, lat, topo_scale_factor
# 12.0185588932 1 159.43028979 16.9521753319 1
return [self.get_fields(c, diameter, lon, lat) for c in craters]
def get_transformed_polygon(self, feature, distance_area, xform):
"""Returns transformd polygon geometry"""
# TODO: distance_area and xform should probably be class variables
if feature.geometry().isMultipart(): # new part for multipolylines
points = feature.geometry().asMultiPolygon()
print("multipart:",len(points))
print("First point: ",points[0][0])
for p in points[0][0]:
print(p)
points = points[0][0]
else:
points = feature.geometry().asPolygon()
points = points[0]
transformed = [self.transform_point(xform, point) for point in points]
print("TRANSFORMED->",transformed)
return QgsGeometry.fromPolygonXY( [transformed] )
def crs_transform(self, source, destination):
print(source, destination, QgsProject.instance() )
return QgsCoordinateTransform(source, destination, QgsProject.instance())
def transform_point(self, xform, point):
return xform.transform(point)
def get_destination_crs(self):
# moon = '+proj=longlat +a=1737400 +b=1737400 +no_defs'
# destination = QgsCoordinateReferenceSystem()
# destination.createFromProj4(moon)
destination = self.layer.crs()
return destination
def get_latlong_srs(self):
p = QgsProject.instance()
e = p.ellipsoid()
crs = p.crs()
srs = osr.SpatialReference()
srs.ImportFromProj4(crs.toProj4())
a = srs.GetSemiMajor()
b = srs.GetSemiMinor()
proj4 = "+proj=latlong +a={} +b={}".format(a,b)
srs_ll = osr.SpatialReference()
srs_ll.ImportFromProj4(proj4)
return srs_ll
def get_srs(self):
p = QgsProject.instance()
e = p.ellipsoid()
crs = p.crs()
srs = osr.SpatialReference()
srs.ImportFromProj4(crs.toProj4())
return srs
def get_a_and_b(self,layer):
#this_crs = layer.crs()
#wkt = this_crs.toWkt()
#srs = osr.SpatialReference()
#srs.importFromWkt(wkt)
#print(srs)
#print(dir(this_crs))
p = QgsProject.instance()
e = p.ellipsoid()
crs = p.crs()
srs = osr.SpatialReference()
srs.ImportFromProj4(crs.toProj4())
print("******",e)
da = QgsDistanceArea()
da.willUseEllipsoid() # should be true
a = srs.GetSemiMajor()
b = srs.GetSemiMinor()
return a,b
def draw_circle(self, circle):
polygon = [QgsPointXY(*point) for point in circle.to_polygon()]
print(circle)
print(polygon)
print(type(polygon))
#gPnt = QgsGeometry.fromPointXY(QgsPointXY(1,1))
#gLine = QgsGeometry.fromPolyline([QgsPoint(1, 1), QgsPoint(2, 2)])
#gPolygon = QgsGeometry.fromPolygonXY([[QgsPointXY(1, 1), QgsPointXY(2, 2), QgsPointXY(2, 1)]])
#geometry = QgsGeometry.fromPolygon([polygon])
geometry = QgsGeometry.fromPolygonXY([polygon])
feature = QgsFeature()
feature.setGeometry(geometry)
feature.setFields(self.layer.fields())
destination = self.layer.crs()
source = self.layer.crs()
xform = self.crs_transform(source, destination)
#print circle.center.x, circle.center.y
#print(circle.center.x, circle.center.y)
line = [
QgsPointXY(circle.center.x, circle.center.y),
QgsPointXY(circle.center.x + circle.radius, circle.center.y),
]
transformed = [
self.transform_point(xform, line[0]),
self.transform_point(xform, line[1]),
]
print("****",transformed)
#new_line_geometry = QgsGeometry.fromPolyline( [ QgsGeometry.fromPointXY(transformed[0]), QgsGeometry.fromPointXY(transformed[1]) ] )
new_line_geometry = QgsGeometry.fromPolyline([QgsPoint(transformed[0][0], transformed[0][1]), QgsPoint(transformed[1][0], transformed[1][1])])
distance_area = self.get_distance_area(self.layer)
actual_line_distance = distance_area.measureLength(new_line_geometry)
# Translate circle center to units of degrees
center_in_degrees = xform.transform(circle.center.x, circle.center.y)
# circle_feature.id() is NULL for .shp file
# and assigned automaticly for .gpkg
# order is id, diameter, lon, lat
feature.setAttribute('diameter',actual_line_distance * 2)
feature.setAttribute('center_lon',center_in_degrees[0])
feature.setAttribute('center_lat',center_in_degrees[1])
self.layer.startEditing()
self.layer.dataProvider().addFeatures([feature])
#self.layer.addFeature(feature, True)
self.layer.commitChanges()
# update layer's extent when new features have been added
# because change of extent in provider is not propagated to the layer
self.layer.updateExtents()
| bsd-3-clause |
brainiak/brainiak | brainiak/eventseg/event.py | 2 | 26617 | # Copyright 2020 Princeton University
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Event segmentation using a Hidden Markov Model
Given an ROI timeseries, this class uses an annealed fitting procedure to
segment the timeseries into events with stable activity patterns. After
learning the signature activity pattern of each event, the model can then be
applied to other datasets to identify a corresponding sequence of events.
Full details are available in:
Christopher Baldassano, Janice Chen, Asieh Zadbood,
Jonathan W Pillow, Uri Hasson, Kenneth A Norman
Discovering event structure in continuous narrative perception and memory
Neuron, Volume 95, Issue 3, 709 - 721.e5
https://doi.org/10.1016/j.neuron.2017.06.041
This class also extends the model described in the Neuron paper:
1) It allows transition matrices that are composed of multiple separate
chains of events rather than a single linear path. This allows a model to
contain patterns for multiple event sequences (e.g. narratives), and
fit probabilities along each of these chains on a new, unlabeled timeseries.
To use this option, pass in an event_chain vector labeling which events
belong to each chain, define event patterns using set_event_patterns(),
then fit to a new dataset with find_events.
2) To obtain better fits when the underlying event structure contains
events that vary substantially in length, the split_merge option allows
the fit() function to re-distribute events during fitting. The number of
merge/split proposals is controlled by split_merge_proposals, which
controls how thorough versus fast the fitting process is.
"""
# Authors: Chris Baldassano and Cătălin Iordan (Princeton University)
import numpy as np
from scipy import stats
import logging
import copy
from sklearn.base import BaseEstimator
from sklearn.utils.validation import check_is_fitted, check_array
from sklearn.exceptions import NotFittedError
import itertools
from . import _utils as utils # type: ignore
logger = logging.getLogger(__name__)
__all__ = [
"EventSegment",
]
class EventSegment(BaseEstimator):
"""Class for event segmentation of continuous fMRI data
Parameters
----------
n_events: int
Number of segments to learn
step_var: Callable[[int], float] : default 4 * (0.98 ** (step - 1))
The Gaussian variance to use during fitting, as a function of the
number of steps. Should decrease slowly over time.
n_iter: int, default: 500
Maximum number of steps to run during fitting
event_chains: ndarray with length = n_events
Array with unique value for each separate chain of events, each linked
in the order they appear in the array
split_merge: bool, default: False
Determines whether merge/split proposals are used during fitting with
fit(). This can improve fitting performance when events are highly
uneven in size, but requires additional time
split_merge_proposals: int, default: 1
Number of merges and splits to consider at each step. Computation time
scales as O(proposals^2) so this should usually be a small value
Attributes
----------
p_start, p_end: length n_events+1 ndarray
initial and final prior distributions over events
P: n_events+1 by n_events+1 ndarray
HMM transition matrix
ll_ : ndarray with length = number of training datasets
Log-likelihood for training datasets over the course of training
segments_: list of (time by event) ndarrays
Learned (soft) segmentation for training datasets
event_var_ : float
Gaussian variance at the end of learning
event_pat_ : voxel by event ndarray
Learned mean patterns for each event
"""
def _default_var_schedule(step):
return 4 * (0.98 ** (step - 1))
def __init__(self, n_events=2,
step_var=_default_var_schedule,
n_iter=500, event_chains=None,
split_merge=False, split_merge_proposals=1):
self.n_events = n_events
self.step_var = step_var
self.n_iter = n_iter
self.split_merge = split_merge
self.split_merge_proposals = split_merge_proposals
if event_chains is None:
self.event_chains = np.zeros(n_events)
else:
self.event_chains = event_chains
def _fit_validate(self, X):
"""Validate input to fit()
Validate data passed to fit(). Includes a transpose operation to
change the row/column order of X and z-scoring in time.
Parameters
----------
X: time by voxel ndarray, or a list of such ndarrays
fMRI data to be segmented
Returns
-------
X: list of voxel by time ndarrays
"""
if len(np.unique(self.event_chains)) > 1:
raise RuntimeError("Cannot fit chains, use set_event_patterns")
# Copy X into a list and transpose
X = copy.deepcopy(X)
if type(X) is not list:
X = [X]
for i in range(len(X)):
X[i] = check_array(X[i])
X[i] = X[i].T
# Check that number of voxels is consistent across datasets
n_dim = X[0].shape[0]
for i in range(len(X)):
assert (X[i].shape[0] == n_dim)
# Double-check that data is z-scored in time
for i in range(len(X)):
X[i] = stats.zscore(X[i], axis=1, ddof=1)
return X
def fit(self, X, y=None):
"""Learn a segmentation on training data
Fits event patterns and a segmentation to training data. After
running this function, the learned event patterns can be used to
segment other datasets using find_events
Parameters
----------
X: time by voxel ndarray, or a list of such ndarrays
fMRI data to be segmented. If a list is given, then all datasets
are segmented simultaneously with the same event patterns
y: not used (added to comply with BaseEstimator definition)
Returns
-------
self: the EventSegment object
"""
X = self._fit_validate(X)
n_train = len(X)
n_dim = X[0].shape[0]
self.classes_ = np.arange(self.n_events)
# Initialize variables for fitting
log_gamma = []
for i in range(n_train):
log_gamma.append(np.zeros((X[i].shape[1], self.n_events)))
step = 1
best_ll = float("-inf")
self.ll_ = np.empty((0, n_train))
while step <= self.n_iter:
iteration_var = self.step_var(step)
# Based on the current segmentation, compute the mean pattern
# for each event
seg_prob = [np.exp(lg) / np.sum(np.exp(lg), axis=0)
for lg in log_gamma]
mean_pat = np.empty((n_train, n_dim, self.n_events))
for i in range(n_train):
mean_pat[i, :, :] = X[i].dot(seg_prob[i])
mean_pat = np.mean(mean_pat, axis=0)
# Based on the current mean patterns, compute the event
# segmentation
self.ll_ = np.append(self.ll_, np.empty((1, n_train)), axis=0)
for i in range(n_train):
logprob = self._logprob_obs(X[i], mean_pat, iteration_var)
log_gamma[i], self.ll_[-1, i] = self._forward_backward(logprob)
if step > 1 and self.split_merge:
curr_ll = np.mean(self.ll_[-1, :])
self.ll_[-1, :], log_gamma, mean_pat = \
self._split_merge(X, log_gamma, iteration_var, curr_ll)
# If log-likelihood has started decreasing, undo last step and stop
if np.mean(self.ll_[-1, :]) < best_ll:
self.ll_ = self.ll_[:-1, :]
break
self.segments_ = [np.exp(lg) for lg in log_gamma]
self.event_var_ = iteration_var
self.event_pat_ = mean_pat
best_ll = np.mean(self.ll_[-1, :])
logger.debug("Fitting step %d, LL=%f", step, best_ll)
step += 1
return self
def _logprob_obs(self, data, mean_pat, var):
"""Log probability of observing each timepoint under each event model
Computes the log probability of each observed timepoint being
generated by the Gaussian distribution for each event pattern
Parameters
----------
data: voxel by time ndarray
fMRI data on which to compute log probabilities
mean_pat: voxel by event ndarray
Centers of the Gaussians for each event
var: float or 1D array of length equal to the number of events
Variance of the event Gaussians. If scalar, all events are
assumed to have the same variance
Returns
-------
logprob : time by event ndarray
Log probability of each timepoint under each event Gaussian
"""
n_vox = data.shape[0]
t = data.shape[1]
# z-score both data and mean patterns in space, so that Gaussians
# are measuring Pearson correlations and are insensitive to overall
# activity changes
data_z = stats.zscore(data, axis=0, ddof=1)
mean_pat_z = stats.zscore(mean_pat, axis=0, ddof=1)
logprob = np.empty((t, self.n_events))
if type(var) is not np.ndarray:
var = var * np.ones(self.n_events)
for k in range(self.n_events):
logprob[:, k] = -0.5 * n_vox * np.log(
2 * np.pi * var[k]) - 0.5 * np.sum(
(data_z.T - mean_pat_z[:, k]).T ** 2, axis=0) / var[k]
logprob /= n_vox
return logprob
def _forward_backward(self, logprob):
"""Runs forward-backward algorithm on observation log probs
Given the log probability of each timepoint being generated by
each event, run the HMM forward-backward algorithm to find the
probability that each timepoint belongs to each event (based on the
transition priors in p_start, p_end, and P)
See https://en.wikipedia.org/wiki/Forward-backward_algorithm for
mathematical details
Parameters
----------
logprob : time by event ndarray
Log probability of each timepoint under each event Gaussian
Returns
-------
log_gamma : time by event ndarray
Log probability of each timepoint belonging to each event
ll : float
Log-likelihood of fit
"""
logprob = copy.copy(logprob)
t = logprob.shape[0]
logprob = np.hstack((logprob, float("-inf") * np.ones((t, 1))))
# Initialize variables
log_scale = np.zeros(t)
log_alpha = np.zeros((t, self.n_events + 1))
log_beta = np.zeros((t, self.n_events + 1))
# Set up transition matrix, with final sink state
self.p_start = np.zeros(self.n_events + 1)
self.p_end = np.zeros(self.n_events + 1)
self.P = np.zeros((self.n_events + 1, self.n_events + 1))
label_ind = np.unique(self.event_chains, return_inverse=True)[1]
n_chains = np.max(label_ind) + 1
# For each chain of events, link them together and then to sink state
for c in range(n_chains):
chain_ind = np.nonzero(label_ind == c)[0]
self.p_start[chain_ind[0]] = 1 / n_chains
self.p_end[chain_ind[-1]] = 1 / n_chains
p_trans = (len(chain_ind) - 1) / t
if p_trans >= 1:
raise ValueError('Too few timepoints')
for i in range(len(chain_ind)):
self.P[chain_ind[i], chain_ind[i]] = 1 - p_trans
if i < len(chain_ind) - 1:
self.P[chain_ind[i], chain_ind[i+1]] = p_trans
else:
self.P[chain_ind[i], -1] = p_trans
self.P[-1, -1] = 1
# Forward pass
for i in range(t):
if i == 0:
log_alpha[0, :] = self._log(self.p_start) + logprob[0, :]
else:
log_alpha[i, :] = self._log(np.exp(log_alpha[i - 1, :])
.dot(self.P)) + logprob[i, :]
log_scale[i] = np.logaddexp.reduce(log_alpha[i, :])
log_alpha[i] -= log_scale[i]
# Backward pass
log_beta[-1, :] = self._log(self.p_end) - log_scale[-1]
for i in reversed(range(t - 1)):
obs_weighted = log_beta[i + 1, :] + logprob[i + 1, :]
offset = np.max(obs_weighted)
log_beta[i, :] = offset + self._log(
np.exp(obs_weighted - offset).dot(self.P.T)) - log_scale[i]
# Combine and normalize
log_gamma = log_alpha + log_beta
log_gamma -= np.logaddexp.reduce(log_gamma, axis=1, keepdims=True)
ll = np.sum(log_scale[:(t - 1)]) + np.logaddexp.reduce(
log_alpha[-1, :] + log_scale[-1] + self._log(self.p_end))
log_gamma = log_gamma[:, :-1]
return log_gamma, ll
def _log(self, x):
"""Modified version of np.log that manually sets values <=0 to -inf
Parameters
----------
x: ndarray of floats
Input to the log function
Returns
-------
log_ma: ndarray of floats
log of x, with x<=0 values replaced with -inf
"""
xshape = x.shape
_x = x.flatten()
y = utils.masked_log(_x)
return y.reshape(xshape)
def set_event_patterns(self, event_pat):
"""Set HMM event patterns manually
Rather than fitting the event patterns automatically using fit(), this
function allows them to be set explicitly. They can then be used to
find corresponding events in a new dataset, using find_events().
Parameters
----------
event_pat: voxel by event ndarray
"""
if event_pat.shape[1] != self.n_events:
raise ValueError(("Number of columns of event_pat must match "
"number of events"))
self.event_pat_ = event_pat.copy()
def find_events(self, testing_data, var=None, scramble=False):
"""Applies learned event segmentation to new testing dataset
After fitting an event segmentation using fit() or setting event
patterns directly using set_event_patterns(), this function finds the
same sequence of event patterns in a new testing dataset.
Parameters
----------
testing_data: timepoint by voxel ndarray
fMRI data to segment based on previously-learned event patterns
var: float or 1D ndarray of length equal to the number of events
default: uses variance that maximized training log-likelihood
Variance of the event Gaussians. If scalar, all events are
assumed to have the same variance. If fit() has not previously
been run, this must be specifed (cannot be None).
scramble: bool : default False
If true, the order of the learned events are shuffled before
fitting, to give a null distribution
Returns
-------
segments : time by event ndarray
The resulting soft segmentation. segments[t,e] = probability
that timepoint t is in event e
test_ll : float
Log-likelihood of model fit
"""
if var is None:
if not hasattr(self, 'event_var_'):
raise NotFittedError(("Event variance must be provided, if "
"not previously set by fit()"))
else:
var = self.event_var_
if not hasattr(self, 'event_pat_'):
raise NotFittedError(("The event patterns must first be set "
"by fit() or set_event_patterns()"))
if scramble:
mean_pat = self.event_pat_[:, np.random.permutation(self.n_events)]
else:
mean_pat = self.event_pat_
logprob = self._logprob_obs(testing_data.T, mean_pat, var)
lg, test_ll = self._forward_backward(logprob)
segments = np.exp(lg)
return segments, test_ll
def predict(self, X):
"""Applies learned event segmentation to new testing dataset
Alternative function for segmenting a new dataset after using
fit() to learn a sequence of events, to comply with the sklearn
Classifier interface
Parameters
----------
X: timepoint by voxel ndarray
fMRI data to segment based on previously-learned event patterns
Returns
-------
Event label for each timepoint
"""
check_is_fitted(self, ["event_pat_", "event_var_"])
X = check_array(X)
segments, test_ll = self.find_events(X)
return np.argmax(segments, axis=1)
def calc_weighted_event_var(self, D, weights, event_pat):
"""Computes normalized weighted variance around event pattern
Utility function for computing variance in a training set of weighted
event examples. For each event, the sum of squared differences for all
timepoints from the event pattern is computed, and then the weights
specify how much each of these differences contributes to the
variance (normalized by the number of voxels).
Parameters
----------
D : timepoint by voxel ndarray
fMRI data for which to compute event variances
weights : timepoint by event ndarray
specifies relative weights of timepoints for each event
event_pat : voxel by event ndarray
mean event patterns to compute variance around
Returns
-------
ev_var : ndarray of variances for each event
"""
Dz = stats.zscore(D, axis=1, ddof=1)
ev_var = np.empty(event_pat.shape[1])
for e in range(event_pat.shape[1]):
# Only compute variances for weights > 0.1% of max weight
nz = weights[:, e] > np.max(weights[:, e])/1000
sumsq = np.dot(weights[nz, e],
np.sum(np.square(Dz[nz, :] -
event_pat[:, e]), axis=1))
ev_var[e] = sumsq/(np.sum(weights[nz, e]) -
np.sum(np.square(weights[nz, e])) /
np.sum(weights[nz, e]))
ev_var = ev_var / D.shape[1]
return ev_var
def model_prior(self, t):
"""Returns the prior probability of the HMM
Runs forward-backward without any data, showing the prior distribution
of the model (for comparison with a posterior).
Parameters
----------
t: int
Number of timepoints
Returns
-------
segments : time by event ndarray
segments[t,e] = prior probability that timepoint t is in event e
test_ll : float
Log-likelihood of model (data-independent term)"""
lg, test_ll = self._forward_backward(np.zeros((t, self.n_events)))
segments = np.exp(lg)
return segments, test_ll
def _split_merge(self, X, log_gamma, iteration_var, curr_ll):
"""Attempt to improve log-likelihood with a merge/split
The simulated annealing used in fit() is susceptible to getting
stuck in a local minimum if there are some very short events. This
function attempts to find
a) pairs of neighboring events that are highly similar, to merge
b) events that can be split into two dissimilar events
It then tests to see whether simultaneously merging one of the
pairs from (a) and splitting one of the events from (b) can improve
the log-likelihood. The number of (a)/(b) pairs tested is determined
by the split_merge_proposals class attribute.
Parameters
----------
X: list of voxel by time ndarrays
fMRI datasets being fit
log_gamma : list of time by event ndarrays
Log probability of each timepoint belonging to each event,
for each dataset
iteration_var : float
Current variance in simulated annealing
curr_ll: float
Log-likelihood of current model
Returns
-------
return_ll : ndarray with length equal to length of X
Log-likelihood after merge/split (same as curr_ll if no
merge/split improved curr_ll)
return_lg : list of time by event ndarrays
Log probability of each timepoint belonging to each event,
for each dataset (same as log_gamma if no merge/split
improved curr_ll)
return_mp : voxel by event ndarray
Mean patterns of events (after possible merge/split)
"""
# Compute current probabilities and mean patterns
n_train = len(X)
n_dim = X[0].shape[0]
seg_prob = [np.exp(lg) / np.sum(np.exp(lg), axis=0)
for lg in log_gamma]
mean_pat = np.empty((n_train, n_dim, self.n_events))
for i in range(n_train):
mean_pat[i, :, :] = X[i].dot(seg_prob[i])
mean_pat = np.mean(mean_pat, axis=0)
# For each event, merge its probability distribution
# with the next event, and also split its probability
# distribution at its median into two separate events.
# Use these new event probability distributions to compute
# merged and split event patterns.
merge_pat = np.empty((n_train, n_dim, self.n_events))
split_pat = np.empty((n_train, n_dim, 2 * self.n_events))
for i, sp in enumerate(seg_prob): # Iterate over datasets
m_evprob = np.zeros((sp.shape[0], sp.shape[1]))
s_evprob = np.zeros((sp.shape[0], 2 * sp.shape[1]))
cs = np.cumsum(sp, axis=0)
for e in range(sp.shape[1]):
# Split distribution at midpoint and normalize each half
mid = np.where(cs[:, e] >= 0.5)[0][0]
cs_first = cs[mid, e] - sp[mid, e]
cs_second = 1 - cs_first
s_evprob[:mid, 2 * e] = sp[:mid, e] / cs_first
s_evprob[mid:, 2 * e + 1] = sp[mid:, e] / cs_second
# Merge distribution with next event distribution
m_evprob[:, e] = sp[:, e:(e + 2)].mean(1)
# Weight data by distribution to get event patterns
merge_pat[i, :, :] = X[i].dot(m_evprob)
split_pat[i, :, :] = X[i].dot(s_evprob)
# Average across datasets
merge_pat = np.mean(merge_pat, axis=0)
split_pat = np.mean(split_pat, axis=0)
# Correlate the current event patterns with the split and
# merged patterns
merge_corr = np.zeros(self.n_events)
split_corr = np.zeros(self.n_events)
for e in range(self.n_events):
split_corr[e] = np.corrcoef(mean_pat[:, e],
split_pat[:, (2 * e):(2 * e + 2)],
rowvar=False)[0, 1:3].max()
merge_corr[e] = np.corrcoef(merge_pat[:, e],
mean_pat[:, e:(e + 2)],
rowvar=False)[0, 1:3].min()
merge_corr = merge_corr[:-1]
# Find best merge/split candidates
# A high value of merge_corr indicates that a pair of events are
# very similar to their merged pattern, and are good candidates for
# being merged.
# A low value of split_corr indicates that an event's pattern is
# very dissimilar from the patterns in its first and second half,
# and is a good candidate for being split.
best_merge = np.flipud(np.argsort(merge_corr))
best_merge = best_merge[:self.split_merge_proposals]
best_split = np.argsort(split_corr)
best_split = best_split[:self.split_merge_proposals]
# For every pair of merge/split candidates, attempt the merge/split
# and measure the log-likelihood. If any are better than curr_ll,
# accept this best merge/split
mean_pat_last = mean_pat.copy()
return_ll = curr_ll
return_lg = copy.deepcopy(log_gamma)
return_mp = mean_pat.copy()
for m_e, s_e in itertools.product(best_merge, best_split):
if m_e == s_e or m_e+1 == s_e:
# Don't attempt to merge/split same event
continue
# Construct new set of patterns with merge/split
mean_pat_ms = np.delete(mean_pat_last, s_e, axis=1)
mean_pat_ms = np.insert(mean_pat_ms, [s_e, s_e],
split_pat[:, (2 * s_e):(2 * s_e + 2)],
axis=1)
mean_pat_ms = np.delete(mean_pat_ms,
[m_e + (s_e < m_e), m_e + (s_e < m_e) + 1],
axis=1)
mean_pat_ms = np.insert(mean_pat_ms, m_e + (s_e < m_e),
merge_pat[:, m_e], axis=1)
# Measure log-likelihood with these new patterns
ll_ms = np.zeros(n_train)
log_gamma_ms = list()
for i in range(n_train):
logprob = self._logprob_obs(X[i],
mean_pat_ms, iteration_var)
lg, ll_ms[i] = self._forward_backward(logprob)
log_gamma_ms.append(lg)
# If better than best ll so far, save to return to fit()
if ll_ms.mean() > return_ll:
return_mp = mean_pat_ms.copy()
return_ll = ll_ms
for i in range(n_train):
return_lg[i] = log_gamma_ms[i].copy()
logger.debug("Identified merge %d,%d and split %d",
m_e, m_e+1, s_e)
return return_ll, return_lg, return_mp
| apache-2.0 |
joakim-hove/ert | python/python/ert_gui/plottery/plot_data_gatherer.py | 1 | 5835 | from pandas import DataFrame
from res.enkf.export import GenKwCollector, SummaryCollector, GenDataCollector, SummaryObservationCollector, \
GenDataObservationCollector, CustomKWCollector
class PlotDataGatherer(object):
def __init__(self, dataGatherFunc, conditionFunc, refcaseGatherFunc=None, observationGatherFunc=None, historyGatherFunc=None):
super(PlotDataGatherer, self).__init__()
self._dataGatherFunction = dataGatherFunc
self._conditionFunction = conditionFunc
self._refcaseGatherFunction = refcaseGatherFunc
self._observationGatherFunction = observationGatherFunc
self._historyGatherFunc = historyGatherFunc
def hasHistoryGatherFunction(self):
""" :rtype: bool """
return self._historyGatherFunc is not None
def hasRefcaseGatherFunction(self):
""" :rtype: bool """
return self._refcaseGatherFunction is not None
def hasObservationGatherFunction(self):
""" :rtype: bool """
return self._observationGatherFunction is not None
def canGatherDataForKey(self, key):
""" :rtype: bool """
return self._conditionFunction(key)
def gatherData(self, ert, case, key):
""" :rtype: pandas.DataFrame """
if not self.canGatherDataForKey(key):
raise UserWarning("Unable to gather data for key: %s" % key)
return self._dataGatherFunction(ert, case, key)
def gatherRefcaseData(self, ert, key):
""" :rtype: pandas.DataFrame """
if not self.canGatherDataForKey(key) or not self.hasRefcaseGatherFunction():
raise UserWarning("Unable to gather refcase data for key: %s" % key)
return self._refcaseGatherFunction(ert, key)
def gatherObservationData(self, ert, case, key):
""" :rtype: pandas.DataFrame """
if not self.canGatherDataForKey(key) or not self.hasObservationGatherFunction():
raise UserWarning("Unable to gather observation data for key: %s" % key)
return self._observationGatherFunction(ert, case, key)
def gatherHistoryData(self, ert, case, key):
""" :rtype: pandas.DataFrame """
if not self.canGatherDataForKey(key) or not self.hasHistoryGatherFunction():
raise UserWarning("Unable to gather history data for key: %s" % key)
return self._historyGatherFunc(ert, case, key)
@staticmethod
def gatherGenKwData(ert, case, key):
""" :rtype: pandas.DataFrame """
data = GenKwCollector.loadAllGenKwData(ert, case, [key])
return data[key].dropna()
@staticmethod
def gatherSummaryData(ert, case, key):
""" :rtype: pandas.DataFrame """
data = SummaryCollector.loadAllSummaryData(ert, case, [key])
if not data.empty:
data = data.reset_index()
if any(data.duplicated()):
print("** Warning: The simulation data contains duplicate "
"timestamps. A possible explanation is that your "
"simulation timestep is less than a second.")
data = data.drop_duplicates()
data = data.pivot(index="Date", columns="Realization", values=key)
return data #.dropna()
@staticmethod
def gatherSummaryRefcaseData(ert, key):
refcase = ert.eclConfig().getRefcase()
if refcase is None or not key in refcase:
return DataFrame()
vector = refcase.get_vector(key, report_only=False)
rows = []
for index in range(1, len(vector)):
node = vector[index]
row = {
"Date": node.date,
key: node.value
}
rows.append(row)
data = DataFrame(rows)
data = data.set_index("Date")
return data
@staticmethod
def gatherSummaryHistoryData(ert, case, key):
# create history key
if ":" in key:
head, tail = key.split(":", 2)
key = "%sH:%s" % (head, tail)
else:
key = "%sH" % key
data = PlotDataGatherer.gatherSummaryRefcaseData(ert, key)
if data.empty and case is not None:
data = PlotDataGatherer.gatherSummaryData(ert, case, key)
return data
@staticmethod
def gatherSummaryObservationData(ert, case, key):
if ert.getKeyManager().isKeyWithObservations(key):
return SummaryObservationCollector.loadObservationData(ert, case, [key]).dropna()
else:
return DataFrame()
@staticmethod
def gatherGenDataData(ert, case, key):
""" :rtype: pandas.DataFrame """
key, report_step = key.split("@", 1)
report_step = int(report_step)
try:
data = GenDataCollector.loadGenData(ert, case, key, report_step)
except ValueError:
data = DataFrame()
return data.dropna() # removes all rows that has a NaN
@staticmethod
def gatherGenDataObservationData(ert, case, key_with_report_step):
""" :rtype: pandas.DataFrame """
key, report_step = key_with_report_step.split("@", 1)
report_step = int(report_step)
obs_key = GenDataObservationCollector.getObservationKeyForDataKey(ert, key, report_step)
if obs_key is not None:
obs_data = GenDataObservationCollector.loadGenDataObservations(ert, case, obs_key)
columns = {obs_key: key_with_report_step, "STD_%s" % obs_key: "STD_%s" % key_with_report_step}
obs_data = obs_data.rename(columns=columns)
else:
obs_data = DataFrame()
return obs_data.dropna()
@staticmethod
def gatherCustomKwData(ert, case, key):
""" :rtype: pandas.DataFrame """
data = CustomKWCollector.loadAllCustomKWData(ert, case, [key])[key]
return data
| gpl-3.0 |
bartosh/zipline | zipline/finance/risk/cumulative.py | 3 | 12424 | #
# Copyright 2014 Quantopian, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import functools
import logbook
import numpy as np
import pandas as pd
from pandas.tseries.tools import normalize_date
from six import iteritems
from . risk import (
check_entry,
choose_treasury
)
from empyrical import (
alpha_beta_aligned,
annual_volatility,
cum_returns,
downside_risk,
max_drawdown,
sharpe_ratio,
sortino_ratio,
)
log = logbook.Logger('Risk Cumulative')
choose_treasury = functools.partial(choose_treasury, lambda *args: '10year',
compound=False)
class RiskMetricsCumulative(object):
"""
:Usage:
Instantiate RiskMetricsCumulative once.
Call update() method on each dt to update the metrics.
"""
METRIC_NAMES = (
'alpha',
'beta',
'sharpe',
'algorithm_volatility',
'benchmark_volatility',
'downside_risk',
'sortino',
)
def __init__(self, sim_params, treasury_curves, trading_calendar,
create_first_day_stats=False):
self.treasury_curves = treasury_curves
self.trading_calendar = trading_calendar
self.start_session = sim_params.start_session
self.end_session = sim_params.end_session
self.sessions = trading_calendar.sessions_in_range(
self.start_session, self.end_session
)
# Hold on to the trading day before the start,
# used for index of the zero return value when forcing returns
# on the first day.
self.day_before_start = self.start_session - self.sessions.freq
last_day = normalize_date(sim_params.end_session)
if last_day not in self.sessions:
last_day = pd.tseries.index.DatetimeIndex(
[last_day]
)
self.sessions = self.sessions.append(last_day)
self.sim_params = sim_params
self.create_first_day_stats = create_first_day_stats
cont_index = self.sessions
self.cont_index = cont_index
self.cont_len = len(self.cont_index)
empty_cont = np.full(self.cont_len, np.nan)
self.algorithm_returns_cont = empty_cont.copy()
self.benchmark_returns_cont = empty_cont.copy()
self.algorithm_cumulative_leverages_cont = empty_cont.copy()
self.mean_returns_cont = empty_cont.copy()
self.annualized_mean_returns_cont = empty_cont.copy()
self.mean_benchmark_returns_cont = empty_cont.copy()
self.annualized_mean_benchmark_returns_cont = empty_cont.copy()
# The returns at a given time are read and reset from the respective
# returns container.
self.algorithm_returns = None
self.benchmark_returns = None
self.mean_returns = None
self.annualized_mean_returns = None
self.mean_benchmark_returns = None
self.annualized_mean_benchmark_returns = None
self.algorithm_cumulative_returns = empty_cont.copy()
self.benchmark_cumulative_returns = empty_cont.copy()
self.algorithm_cumulative_leverages = empty_cont.copy()
self.excess_returns = empty_cont.copy()
self.latest_dt_loc = 0
self.latest_dt = cont_index[0]
self.benchmark_volatility = empty_cont.copy()
self.algorithm_volatility = empty_cont.copy()
self.beta = empty_cont.copy()
self.alpha = empty_cont.copy()
self.sharpe = empty_cont.copy()
self.downside_risk = empty_cont.copy()
self.sortino = empty_cont.copy()
self.drawdowns = empty_cont.copy()
self.max_drawdowns = empty_cont.copy()
self.max_drawdown = 0
self.max_leverages = empty_cont.copy()
self.max_leverage = 0
self.current_max = -np.inf
self.daily_treasury = pd.Series(index=self.sessions)
self.treasury_period_return = np.nan
self.num_trading_days = 0
def update(self, dt, algorithm_returns, benchmark_returns, leverage):
# Keep track of latest dt for use in to_dict and other methods
# that report current state.
self.latest_dt = dt
dt_loc = self.cont_index.get_loc(dt)
self.latest_dt_loc = dt_loc
self.algorithm_returns_cont[dt_loc] = algorithm_returns
self.algorithm_returns = self.algorithm_returns_cont[:dt_loc + 1]
self.num_trading_days = len(self.algorithm_returns)
if self.create_first_day_stats:
if len(self.algorithm_returns) == 1:
self.algorithm_returns = np.append(0.0, self.algorithm_returns)
self.algorithm_cumulative_returns[dt_loc] = cum_returns(
self.algorithm_returns
)[-1]
algo_cumulative_returns_to_date = \
self.algorithm_cumulative_returns[:dt_loc + 1]
self.mean_returns_cont[dt_loc] = \
algo_cumulative_returns_to_date[dt_loc] / self.num_trading_days
self.mean_returns = self.mean_returns_cont[:dt_loc + 1]
self.annualized_mean_returns_cont[dt_loc] = \
self.mean_returns_cont[dt_loc] * 252
self.annualized_mean_returns = \
self.annualized_mean_returns_cont[:dt_loc + 1]
if self.create_first_day_stats:
if len(self.mean_returns) == 1:
self.mean_returns = np.append(0.0, self.mean_returns)
self.annualized_mean_returns = np.append(
0.0, self.annualized_mean_returns)
self.benchmark_returns_cont[dt_loc] = benchmark_returns
self.benchmark_returns = self.benchmark_returns_cont[:dt_loc + 1]
if self.create_first_day_stats:
if len(self.benchmark_returns) == 1:
self.benchmark_returns = np.append(0.0, self.benchmark_returns)
self.benchmark_cumulative_returns[dt_loc] = cum_returns(
self.benchmark_returns
)[-1]
benchmark_cumulative_returns_to_date = \
self.benchmark_cumulative_returns[:dt_loc + 1]
self.mean_benchmark_returns_cont[dt_loc] = \
benchmark_cumulative_returns_to_date[dt_loc] / \
self.num_trading_days
self.mean_benchmark_returns = self.mean_benchmark_returns_cont[:dt_loc]
self.annualized_mean_benchmark_returns_cont[dt_loc] = \
self.mean_benchmark_returns_cont[dt_loc] * 252
self.annualized_mean_benchmark_returns = \
self.annualized_mean_benchmark_returns_cont[:dt_loc + 1]
self.algorithm_cumulative_leverages_cont[dt_loc] = leverage
self.algorithm_cumulative_leverages = \
self.algorithm_cumulative_leverages_cont[:dt_loc + 1]
if self.create_first_day_stats:
if len(self.algorithm_cumulative_leverages) == 1:
self.algorithm_cumulative_leverages = np.append(
0.0,
self.algorithm_cumulative_leverages)
if not len(self.algorithm_returns) and len(self.benchmark_returns):
message = "Mismatch between benchmark_returns ({bm_count}) and \
algorithm_returns ({algo_count}) in range {start} : {end} on {dt}"
message = message.format(
bm_count=len(self.benchmark_returns),
algo_count=len(self.algorithm_returns),
start=self.start_session,
end=self.end_session,
dt=dt
)
raise Exception(message)
self.update_current_max()
self.benchmark_volatility[dt_loc] = annual_volatility(
self.benchmark_returns
)
self.algorithm_volatility[dt_loc] = annual_volatility(
self.algorithm_returns
)
# caching the treasury rates for the minutely case is a
# big speedup, because it avoids searching the treasury
# curves on every minute.
# In both minutely and daily, the daily curve is always used.
treasury_end = dt.replace(hour=0, minute=0)
if np.isnan(self.daily_treasury[treasury_end]):
treasury_period_return = choose_treasury(
self.treasury_curves,
self.start_session,
treasury_end,
self.trading_calendar,
)
self.daily_treasury[treasury_end] = treasury_period_return
self.treasury_period_return = self.daily_treasury[treasury_end]
self.excess_returns[dt_loc] = (
self.algorithm_cumulative_returns[dt_loc] -
self.treasury_period_return)
self.alpha[dt_loc], self.beta[dt_loc] = alpha_beta_aligned(
self.algorithm_returns,
self.benchmark_returns,
)
self.sharpe[dt_loc] = sharpe_ratio(
self.algorithm_returns,
)
self.downside_risk[dt_loc] = downside_risk(
self.algorithm_returns
)
self.sortino[dt_loc] = sortino_ratio(
self.algorithm_returns,
_downside_risk=self.downside_risk[dt_loc]
)
self.max_drawdown = max_drawdown(
self.algorithm_returns
)
self.max_drawdowns[dt_loc] = self.max_drawdown
self.max_leverage = self.calculate_max_leverage()
self.max_leverages[dt_loc] = self.max_leverage
def to_dict(self):
"""
Creates a dictionary representing the state of the risk report.
Returns a dict object of the form:
"""
dt = self.latest_dt
dt_loc = self.latest_dt_loc
period_label = dt.strftime("%Y-%m")
rval = {
'trading_days': self.num_trading_days,
'benchmark_volatility':
self.benchmark_volatility[dt_loc],
'algo_volatility':
self.algorithm_volatility[dt_loc],
'treasury_period_return': self.treasury_period_return,
# Though the two following keys say period return,
# they would be more accurately called the cumulative return.
# However, the keys need to stay the same, for now, for backwards
# compatibility with existing consumers.
'algorithm_period_return':
self.algorithm_cumulative_returns[dt_loc],
'benchmark_period_return':
self.benchmark_cumulative_returns[dt_loc],
'beta': self.beta[dt_loc],
'alpha': self.alpha[dt_loc],
'sharpe': self.sharpe[dt_loc],
'sortino': self.sortino[dt_loc],
'excess_return': self.excess_returns[dt_loc],
'max_drawdown': self.max_drawdown,
'max_leverage': self.max_leverage,
'period_label': period_label
}
return {k: (None if check_entry(k, v) else v)
for k, v in iteritems(rval)}
def __repr__(self):
statements = []
for metric in self.METRIC_NAMES:
value = getattr(self, metric)[-1]
if isinstance(value, list):
if len(value) == 0:
value = np.nan
else:
value = value[-1]
statements.append("{m}:{v}".format(m=metric, v=value))
return '\n'.join(statements)
def update_current_max(self):
if len(self.algorithm_cumulative_returns) == 0:
return
current_cumulative_return = \
self.algorithm_cumulative_returns[self.latest_dt_loc]
if self.current_max < current_cumulative_return:
self.current_max = current_cumulative_return
def calculate_max_leverage(self):
# The leverage is defined as: the gross_exposure/net_liquidation
# gross_exposure = long_exposure + abs(short_exposure)
# net_liquidation = ending_cash + long_exposure + short_exposure
cur_leverage = self.algorithm_cumulative_leverages_cont[
self.latest_dt_loc]
return max(cur_leverage, self.max_leverage)
| apache-2.0 |
pydata/pandas-gbq | tests/unit/test_load.py | 1 | 3944 | # -*- coding: utf-8 -*-
import textwrap
from io import StringIO
from unittest import mock
import numpy
import pandas
import pytest
from pandas_gbq.features import FEATURES
from pandas_gbq import load
def load_method(bqclient):
if FEATURES.bigquery_has_from_dataframe_with_csv:
return bqclient.load_table_from_dataframe
return bqclient.load_table_from_file
def test_encode_chunk_with_unicode():
"""Test that a dataframe containing unicode can be encoded as a file.
See: https://github.com/pydata/pandas-gbq/issues/106
"""
df = pandas.DataFrame(
numpy.random.randn(6, 4), index=range(6), columns=list("ABCD")
)
df["s"] = u"信用卡"
csv_buffer = load.encode_chunk(df)
csv_bytes = csv_buffer.read()
csv_string = csv_bytes.decode("utf-8")
assert u"信用卡" in csv_string
def test_encode_chunk_with_floats():
"""Test that floats in a dataframe are encoded with at most 17 significant
figures.
See: https://github.com/pydata/pandas-gbq/issues/192 and
https://github.com/pydata/pandas-gbq/issues/326
"""
input_csv = textwrap.dedent(
"""01/01/17 23:00,0.14285714285714285,4
01/02/17 22:00,1.05148,3
01/03/17 21:00,1.05153,2
01/04/17 20:00,3.141592653589793,1
01/05/17 19:00,2.0988936657440586e+43,0
"""
)
input_df = pandas.read_csv(
StringIO(input_csv), header=None, float_precision="round_trip"
)
csv_buffer = load.encode_chunk(input_df)
round_trip = pandas.read_csv(
csv_buffer, header=None, float_precision="round_trip"
)
pandas.testing.assert_frame_equal(
round_trip,
input_df,
check_exact=True,
)
def test_encode_chunk_with_newlines():
"""See: https://github.com/pydata/pandas-gbq/issues/180"""
df = pandas.DataFrame({"s": ["abcd", "ef\ngh", "ij\r\nkl"]})
csv_buffer = load.encode_chunk(df)
csv_bytes = csv_buffer.read()
csv_string = csv_bytes.decode("utf-8")
assert "abcd" in csv_string
assert '"ef\ngh"' in csv_string
assert '"ij\r\nkl"' in csv_string
def test_split_dataframe():
df = pandas.DataFrame(numpy.random.randn(6, 4), index=range(6))
chunks = list(load.split_dataframe(df, chunksize=2))
assert len(chunks) == 3
remaining, chunk = chunks[0]
assert remaining == 4
assert len(chunk.index) == 2
def test_encode_chunks_with_chunksize_none():
df = pandas.DataFrame(numpy.random.randn(6, 4), index=range(6))
chunks = list(load.split_dataframe(df))
assert len(chunks) == 1
remaining, chunk = chunks[0]
assert remaining == 0
assert len(chunk.index) == 6
@pytest.mark.parametrize(
["bigquery_has_from_dataframe_with_csv"], [(True,), (False,)]
)
def test_load_chunks_omits_policy_tags(
monkeypatch, mock_bigquery_client, bigquery_has_from_dataframe_with_csv
):
"""Ensure that policyTags are omitted.
We don't want to change the policyTags via a load job, as this can cause
403 error. See: https://github.com/googleapis/python-bigquery/pull/557
"""
import google.cloud.bigquery
monkeypatch.setattr(
type(FEATURES),
"bigquery_has_from_dataframe_with_csv",
mock.PropertyMock(return_value=bigquery_has_from_dataframe_with_csv),
)
df = pandas.DataFrame({"col1": [1, 2, 3]})
destination = google.cloud.bigquery.TableReference.from_string(
"my-project.my_dataset.my_table"
)
schema = {
"fields": [
{"name": "col1", "type": "INT64", "policyTags": ["tag1", "tag2"]}
]
}
_ = list(
load.load_chunks(mock_bigquery_client, df, destination, schema=schema)
)
mock_load = load_method(mock_bigquery_client)
assert mock_load.called
_, kwargs = mock_load.call_args
assert "job_config" in kwargs
sent_field = kwargs["job_config"].schema[0].to_api_repr()
assert "policyTags" not in sent_field
| bsd-3-clause |
deepesch/scikit-learn | sklearn/decomposition/pca.py | 192 | 23117 | """ Principal Component Analysis
"""
# Author: Alexandre Gramfort <[email protected]>
# Olivier Grisel <[email protected]>
# Mathieu Blondel <[email protected]>
# Denis A. Engemann <[email protected]>
# Michael Eickenberg <[email protected]>
#
# License: BSD 3 clause
from math import log, sqrt
import numpy as np
from scipy import linalg
from scipy.special import gammaln
from ..base import BaseEstimator, TransformerMixin
from ..utils import check_random_state, as_float_array
from ..utils import check_array
from ..utils.extmath import fast_dot, fast_logdet, randomized_svd
from ..utils.validation import check_is_fitted
def _assess_dimension_(spectrum, rank, n_samples, n_features):
"""Compute the likelihood of a rank ``rank`` dataset
The dataset is assumed to be embedded in gaussian noise of shape(n,
dimf) having spectrum ``spectrum``.
Parameters
----------
spectrum: array of shape (n)
Data spectrum.
rank: int
Tested rank value.
n_samples: int
Number of samples.
n_features: int
Number of features.
Returns
-------
ll: float,
The log-likelihood
Notes
-----
This implements the method of `Thomas P. Minka:
Automatic Choice of Dimensionality for PCA. NIPS 2000: 598-604`
"""
if rank > len(spectrum):
raise ValueError("The tested rank cannot exceed the rank of the"
" dataset")
pu = -rank * log(2.)
for i in range(rank):
pu += (gammaln((n_features - i) / 2.)
- log(np.pi) * (n_features - i) / 2.)
pl = np.sum(np.log(spectrum[:rank]))
pl = -pl * n_samples / 2.
if rank == n_features:
pv = 0
v = 1
else:
v = np.sum(spectrum[rank:]) / (n_features - rank)
pv = -np.log(v) * n_samples * (n_features - rank) / 2.
m = n_features * rank - rank * (rank + 1.) / 2.
pp = log(2. * np.pi) * (m + rank + 1.) / 2.
pa = 0.
spectrum_ = spectrum.copy()
spectrum_[rank:n_features] = v
for i in range(rank):
for j in range(i + 1, len(spectrum)):
pa += log((spectrum[i] - spectrum[j]) *
(1. / spectrum_[j] - 1. / spectrum_[i])) + log(n_samples)
ll = pu + pl + pv + pp - pa / 2. - rank * log(n_samples) / 2.
return ll
def _infer_dimension_(spectrum, n_samples, n_features):
"""Infers the dimension of a dataset of shape (n_samples, n_features)
The dataset is described by its spectrum `spectrum`.
"""
n_spectrum = len(spectrum)
ll = np.empty(n_spectrum)
for rank in range(n_spectrum):
ll[rank] = _assess_dimension_(spectrum, rank, n_samples, n_features)
return ll.argmax()
class PCA(BaseEstimator, TransformerMixin):
"""Principal component analysis (PCA)
Linear dimensionality reduction using Singular Value Decomposition of the
data and keeping only the most significant singular vectors to project the
data to a lower dimensional space.
This implementation uses the scipy.linalg implementation of the singular
value decomposition. It only works for dense arrays and is not scalable to
large dimensional data.
The time complexity of this implementation is ``O(n ** 3)`` assuming
n ~ n_samples ~ n_features.
Read more in the :ref:`User Guide <PCA>`.
Parameters
----------
n_components : int, None or string
Number of components to keep.
if n_components is not set all components are kept::
n_components == min(n_samples, n_features)
if n_components == 'mle', Minka\'s MLE is used to guess the dimension
if ``0 < n_components < 1``, select the number of components such that
the amount of variance that needs to be explained is greater than the
percentage specified by n_components
copy : bool
If False, data passed to fit are overwritten and running
fit(X).transform(X) will not yield the expected results,
use fit_transform(X) instead.
whiten : bool, optional
When True (False by default) the `components_` vectors are divided
by n_samples times singular values to ensure uncorrelated outputs
with unit component-wise variances.
Whitening will remove some information from the transformed signal
(the relative variance scales of the components) but can sometime
improve the predictive accuracy of the downstream estimators by
making there data respect some hard-wired assumptions.
Attributes
----------
components_ : array, [n_components, n_features]
Principal axes in feature space, representing the directions of
maximum variance in the data.
explained_variance_ratio_ : array, [n_components]
Percentage of variance explained by each of the selected components.
If ``n_components`` is not set then all components are stored and the
sum of explained variances is equal to 1.0
mean_ : array, [n_features]
Per-feature empirical mean, estimated from the training set.
n_components_ : int
The estimated number of components. Relevant when n_components is set
to 'mle' or a number between 0 and 1 to select using explained
variance.
noise_variance_ : float
The estimated noise covariance following the Probabilistic PCA model
from Tipping and Bishop 1999. See "Pattern Recognition and
Machine Learning" by C. Bishop, 12.2.1 p. 574 or
http://www.miketipping.com/papers/met-mppca.pdf. It is required to
computed the estimated data covariance and score samples.
Notes
-----
For n_components='mle', this class uses the method of `Thomas P. Minka:
Automatic Choice of Dimensionality for PCA. NIPS 2000: 598-604`
Implements the probabilistic PCA model from:
M. Tipping and C. Bishop, Probabilistic Principal Component Analysis,
Journal of the Royal Statistical Society, Series B, 61, Part 3, pp. 611-622
via the score and score_samples methods.
See http://www.miketipping.com/papers/met-mppca.pdf
Due to implementation subtleties of the Singular Value Decomposition (SVD),
which is used in this implementation, running fit twice on the same matrix
can lead to principal components with signs flipped (change in direction).
For this reason, it is important to always use the same estimator object to
transform data in a consistent fashion.
Examples
--------
>>> import numpy as np
>>> from sklearn.decomposition import PCA
>>> X = np.array([[-1, -1], [-2, -1], [-3, -2], [1, 1], [2, 1], [3, 2]])
>>> pca = PCA(n_components=2)
>>> pca.fit(X)
PCA(copy=True, n_components=2, whiten=False)
>>> print(pca.explained_variance_ratio_) # doctest: +ELLIPSIS
[ 0.99244... 0.00755...]
See also
--------
RandomizedPCA
KernelPCA
SparsePCA
TruncatedSVD
"""
def __init__(self, n_components=None, copy=True, whiten=False):
self.n_components = n_components
self.copy = copy
self.whiten = whiten
def fit(self, X, y=None):
"""Fit the model with X.
Parameters
----------
X: array-like, shape (n_samples, n_features)
Training data, where n_samples in the number of samples
and n_features is the number of features.
Returns
-------
self : object
Returns the instance itself.
"""
self._fit(X)
return self
def fit_transform(self, X, y=None):
"""Fit the model with X and apply the dimensionality reduction on X.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Training data, where n_samples is the number of samples
and n_features is the number of features.
Returns
-------
X_new : array-like, shape (n_samples, n_components)
"""
U, S, V = self._fit(X)
U = U[:, :self.n_components_]
if self.whiten:
# X_new = X * V / S * sqrt(n_samples) = U * sqrt(n_samples)
U *= sqrt(X.shape[0])
else:
# X_new = X * V = U * S * V^T * V = U * S
U *= S[:self.n_components_]
return U
def _fit(self, X):
"""Fit the model on X
Parameters
----------
X: array-like, shape (n_samples, n_features)
Training vector, where n_samples in the number of samples and
n_features is the number of features.
Returns
-------
U, s, V : ndarrays
The SVD of the input data, copied and centered when
requested.
"""
X = check_array(X)
n_samples, n_features = X.shape
X = as_float_array(X, copy=self.copy)
# Center data
self.mean_ = np.mean(X, axis=0)
X -= self.mean_
U, S, V = linalg.svd(X, full_matrices=False)
explained_variance_ = (S ** 2) / n_samples
explained_variance_ratio_ = (explained_variance_ /
explained_variance_.sum())
components_ = V
n_components = self.n_components
if n_components is None:
n_components = n_features
elif n_components == 'mle':
if n_samples < n_features:
raise ValueError("n_components='mle' is only supported "
"if n_samples >= n_features")
n_components = _infer_dimension_(explained_variance_,
n_samples, n_features)
elif not 0 <= n_components <= n_features:
raise ValueError("n_components=%r invalid for n_features=%d"
% (n_components, n_features))
if 0 < n_components < 1.0:
# number of components for which the cumulated explained variance
# percentage is superior to the desired threshold
ratio_cumsum = explained_variance_ratio_.cumsum()
n_components = np.sum(ratio_cumsum < n_components) + 1
# Compute noise covariance using Probabilistic PCA model
# The sigma2 maximum likelihood (cf. eq. 12.46)
if n_components < n_features:
self.noise_variance_ = explained_variance_[n_components:].mean()
else:
self.noise_variance_ = 0.
# store n_samples to revert whitening when getting covariance
self.n_samples_ = n_samples
self.components_ = components_[:n_components]
self.explained_variance_ = explained_variance_[:n_components]
explained_variance_ratio_ = explained_variance_ratio_[:n_components]
self.explained_variance_ratio_ = explained_variance_ratio_
self.n_components_ = n_components
return (U, S, V)
def get_covariance(self):
"""Compute data covariance with the generative model.
``cov = components_.T * S**2 * components_ + sigma2 * eye(n_features)``
where S**2 contains the explained variances.
Returns
-------
cov : array, shape=(n_features, n_features)
Estimated covariance of data.
"""
components_ = self.components_
exp_var = self.explained_variance_
if self.whiten:
components_ = components_ * np.sqrt(exp_var[:, np.newaxis])
exp_var_diff = np.maximum(exp_var - self.noise_variance_, 0.)
cov = np.dot(components_.T * exp_var_diff, components_)
cov.flat[::len(cov) + 1] += self.noise_variance_ # modify diag inplace
return cov
def get_precision(self):
"""Compute data precision matrix with the generative model.
Equals the inverse of the covariance but computed with
the matrix inversion lemma for efficiency.
Returns
-------
precision : array, shape=(n_features, n_features)
Estimated precision of data.
"""
n_features = self.components_.shape[1]
# handle corner cases first
if self.n_components_ == 0:
return np.eye(n_features) / self.noise_variance_
if self.n_components_ == n_features:
return linalg.inv(self.get_covariance())
# Get precision using matrix inversion lemma
components_ = self.components_
exp_var = self.explained_variance_
exp_var_diff = np.maximum(exp_var - self.noise_variance_, 0.)
precision = np.dot(components_, components_.T) / self.noise_variance_
precision.flat[::len(precision) + 1] += 1. / exp_var_diff
precision = np.dot(components_.T,
np.dot(linalg.inv(precision), components_))
precision /= -(self.noise_variance_ ** 2)
precision.flat[::len(precision) + 1] += 1. / self.noise_variance_
return precision
def transform(self, X):
"""Apply the dimensionality reduction on X.
X is projected on the first principal components previous extracted
from a training set.
Parameters
----------
X : array-like, shape (n_samples, n_features)
New data, where n_samples is the number of samples
and n_features is the number of features.
Returns
-------
X_new : array-like, shape (n_samples, n_components)
"""
check_is_fitted(self, 'mean_')
X = check_array(X)
if self.mean_ is not None:
X = X - self.mean_
X_transformed = fast_dot(X, self.components_.T)
if self.whiten:
X_transformed /= np.sqrt(self.explained_variance_)
return X_transformed
def inverse_transform(self, X):
"""Transform data back to its original space, i.e.,
return an input X_original whose transform would be X
Parameters
----------
X : array-like, shape (n_samples, n_components)
New data, where n_samples is the number of samples
and n_components is the number of components.
Returns
-------
X_original array-like, shape (n_samples, n_features)
"""
check_is_fitted(self, 'mean_')
if self.whiten:
return fast_dot(
X,
np.sqrt(self.explained_variance_[:, np.newaxis]) *
self.components_) + self.mean_
else:
return fast_dot(X, self.components_) + self.mean_
def score_samples(self, X):
"""Return the log-likelihood of each sample
See. "Pattern Recognition and Machine Learning"
by C. Bishop, 12.2.1 p. 574
or http://www.miketipping.com/papers/met-mppca.pdf
Parameters
----------
X: array, shape(n_samples, n_features)
The data.
Returns
-------
ll: array, shape (n_samples,)
Log-likelihood of each sample under the current model
"""
check_is_fitted(self, 'mean_')
X = check_array(X)
Xr = X - self.mean_
n_features = X.shape[1]
log_like = np.zeros(X.shape[0])
precision = self.get_precision()
log_like = -.5 * (Xr * (np.dot(Xr, precision))).sum(axis=1)
log_like -= .5 * (n_features * log(2. * np.pi)
- fast_logdet(precision))
return log_like
def score(self, X, y=None):
"""Return the average log-likelihood of all samples
See. "Pattern Recognition and Machine Learning"
by C. Bishop, 12.2.1 p. 574
or http://www.miketipping.com/papers/met-mppca.pdf
Parameters
----------
X: array, shape(n_samples, n_features)
The data.
Returns
-------
ll: float
Average log-likelihood of the samples under the current model
"""
return np.mean(self.score_samples(X))
class RandomizedPCA(BaseEstimator, TransformerMixin):
"""Principal component analysis (PCA) using randomized SVD
Linear dimensionality reduction using approximated Singular Value
Decomposition of the data and keeping only the most significant
singular vectors to project the data to a lower dimensional space.
Read more in the :ref:`User Guide <RandomizedPCA>`.
Parameters
----------
n_components : int, optional
Maximum number of components to keep. When not given or None, this
is set to n_features (the second dimension of the training data).
copy : bool
If False, data passed to fit are overwritten and running
fit(X).transform(X) will not yield the expected results,
use fit_transform(X) instead.
iterated_power : int, optional
Number of iterations for the power method. 3 by default.
whiten : bool, optional
When True (False by default) the `components_` vectors are divided
by the singular values to ensure uncorrelated outputs with unit
component-wise variances.
Whitening will remove some information from the transformed signal
(the relative variance scales of the components) but can sometime
improve the predictive accuracy of the downstream estimators by
making their data respect some hard-wired assumptions.
random_state : int or RandomState instance or None (default)
Pseudo Random Number generator seed control. If None, use the
numpy.random singleton.
Attributes
----------
components_ : array, [n_components, n_features]
Components with maximum variance.
explained_variance_ratio_ : array, [n_components]
Percentage of variance explained by each of the selected components. \
k is not set then all components are stored and the sum of explained \
variances is equal to 1.0
mean_ : array, [n_features]
Per-feature empirical mean, estimated from the training set.
Examples
--------
>>> import numpy as np
>>> from sklearn.decomposition import RandomizedPCA
>>> X = np.array([[-1, -1], [-2, -1], [-3, -2], [1, 1], [2, 1], [3, 2]])
>>> pca = RandomizedPCA(n_components=2)
>>> pca.fit(X) # doctest: +ELLIPSIS +NORMALIZE_WHITESPACE
RandomizedPCA(copy=True, iterated_power=3, n_components=2,
random_state=None, whiten=False)
>>> print(pca.explained_variance_ratio_) # doctest: +ELLIPSIS
[ 0.99244... 0.00755...]
See also
--------
PCA
TruncatedSVD
References
----------
.. [Halko2009] `Finding structure with randomness: Stochastic algorithms
for constructing approximate matrix decompositions Halko, et al., 2009
(arXiv:909)`
.. [MRT] `A randomized algorithm for the decomposition of matrices
Per-Gunnar Martinsson, Vladimir Rokhlin and Mark Tygert`
"""
def __init__(self, n_components=None, copy=True, iterated_power=3,
whiten=False, random_state=None):
self.n_components = n_components
self.copy = copy
self.iterated_power = iterated_power
self.whiten = whiten
self.random_state = random_state
def fit(self, X, y=None):
"""Fit the model with X by extracting the first principal components.
Parameters
----------
X: array-like, shape (n_samples, n_features)
Training data, where n_samples in the number of samples
and n_features is the number of features.
Returns
-------
self : object
Returns the instance itself.
"""
self._fit(check_array(X))
return self
def _fit(self, X):
"""Fit the model to the data X.
Parameters
----------
X: array-like, shape (n_samples, n_features)
Training vector, where n_samples in the number of samples and
n_features is the number of features.
Returns
-------
X : ndarray, shape (n_samples, n_features)
The input data, copied, centered and whitened when requested.
"""
random_state = check_random_state(self.random_state)
X = np.atleast_2d(as_float_array(X, copy=self.copy))
n_samples = X.shape[0]
# Center data
self.mean_ = np.mean(X, axis=0)
X -= self.mean_
if self.n_components is None:
n_components = X.shape[1]
else:
n_components = self.n_components
U, S, V = randomized_svd(X, n_components,
n_iter=self.iterated_power,
random_state=random_state)
self.explained_variance_ = exp_var = (S ** 2) / n_samples
full_var = np.var(X, axis=0).sum()
self.explained_variance_ratio_ = exp_var / full_var
if self.whiten:
self.components_ = V / S[:, np.newaxis] * sqrt(n_samples)
else:
self.components_ = V
return X
def transform(self, X, y=None):
"""Apply dimensionality reduction on X.
X is projected on the first principal components previous extracted
from a training set.
Parameters
----------
X : array-like, shape (n_samples, n_features)
New data, where n_samples in the number of samples
and n_features is the number of features.
Returns
-------
X_new : array-like, shape (n_samples, n_components)
"""
check_is_fitted(self, 'mean_')
X = check_array(X)
if self.mean_ is not None:
X = X - self.mean_
X = fast_dot(X, self.components_.T)
return X
def fit_transform(self, X, y=None):
"""Fit the model with X and apply the dimensionality reduction on X.
Parameters
----------
X : array-like, shape (n_samples, n_features)
New data, where n_samples in the number of samples
and n_features is the number of features.
Returns
-------
X_new : array-like, shape (n_samples, n_components)
"""
X = check_array(X)
X = self._fit(X)
return fast_dot(X, self.components_.T)
def inverse_transform(self, X, y=None):
"""Transform data back to its original space.
Returns an array X_original whose transform would be X.
Parameters
----------
X : array-like, shape (n_samples, n_components)
New data, where n_samples in the number of samples
and n_components is the number of components.
Returns
-------
X_original array-like, shape (n_samples, n_features)
Notes
-----
If whitening is enabled, inverse_transform does not compute the
exact inverse operation of transform.
"""
check_is_fitted(self, 'mean_')
X_original = fast_dot(X, self.components_)
if self.mean_ is not None:
X_original = X_original + self.mean_
return X_original
| bsd-3-clause |
q1ang/scikit-learn | examples/text/document_classification_20newsgroups.py | 222 | 10500 | """
======================================================
Classification of text documents using sparse features
======================================================
This is an example showing how scikit-learn can be used to classify documents
by topics using a bag-of-words approach. This example uses a scipy.sparse
matrix to store the features and demonstrates various classifiers that can
efficiently handle sparse matrices.
The dataset used in this example is the 20 newsgroups dataset. It will be
automatically downloaded, then cached.
The bar plot indicates the accuracy, training time (normalized) and test time
(normalized) of each classifier.
"""
# Author: Peter Prettenhofer <[email protected]>
# Olivier Grisel <[email protected]>
# Mathieu Blondel <[email protected]>
# Lars Buitinck <[email protected]>
# License: BSD 3 clause
from __future__ import print_function
import logging
import numpy as np
from optparse import OptionParser
import sys
from time import time
import matplotlib.pyplot as plt
from sklearn.datasets import fetch_20newsgroups
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.feature_extraction.text import HashingVectorizer
from sklearn.feature_selection import SelectKBest, chi2
from sklearn.linear_model import RidgeClassifier
from sklearn.pipeline import Pipeline
from sklearn.svm import LinearSVC
from sklearn.linear_model import SGDClassifier
from sklearn.linear_model import Perceptron
from sklearn.linear_model import PassiveAggressiveClassifier
from sklearn.naive_bayes import BernoulliNB, MultinomialNB
from sklearn.neighbors import KNeighborsClassifier
from sklearn.neighbors import NearestCentroid
from sklearn.ensemble import RandomForestClassifier
from sklearn.utils.extmath import density
from sklearn import metrics
# Display progress logs on stdout
logging.basicConfig(level=logging.INFO,
format='%(asctime)s %(levelname)s %(message)s')
# parse commandline arguments
op = OptionParser()
op.add_option("--report",
action="store_true", dest="print_report",
help="Print a detailed classification report.")
op.add_option("--chi2_select",
action="store", type="int", dest="select_chi2",
help="Select some number of features using a chi-squared test")
op.add_option("--confusion_matrix",
action="store_true", dest="print_cm",
help="Print the confusion matrix.")
op.add_option("--top10",
action="store_true", dest="print_top10",
help="Print ten most discriminative terms per class"
" for every classifier.")
op.add_option("--all_categories",
action="store_true", dest="all_categories",
help="Whether to use all categories or not.")
op.add_option("--use_hashing",
action="store_true",
help="Use a hashing vectorizer.")
op.add_option("--n_features",
action="store", type=int, default=2 ** 16,
help="n_features when using the hashing vectorizer.")
op.add_option("--filtered",
action="store_true",
help="Remove newsgroup information that is easily overfit: "
"headers, signatures, and quoting.")
(opts, args) = op.parse_args()
if len(args) > 0:
op.error("this script takes no arguments.")
sys.exit(1)
print(__doc__)
op.print_help()
print()
###############################################################################
# Load some categories from the training set
if opts.all_categories:
categories = None
else:
categories = [
'alt.atheism',
'talk.religion.misc',
'comp.graphics',
'sci.space',
]
if opts.filtered:
remove = ('headers', 'footers', 'quotes')
else:
remove = ()
print("Loading 20 newsgroups dataset for categories:")
print(categories if categories else "all")
data_train = fetch_20newsgroups(subset='train', categories=categories,
shuffle=True, random_state=42,
remove=remove)
data_test = fetch_20newsgroups(subset='test', categories=categories,
shuffle=True, random_state=42,
remove=remove)
print('data loaded')
categories = data_train.target_names # for case categories == None
def size_mb(docs):
return sum(len(s.encode('utf-8')) for s in docs) / 1e6
data_train_size_mb = size_mb(data_train.data)
data_test_size_mb = size_mb(data_test.data)
print("%d documents - %0.3fMB (training set)" % (
len(data_train.data), data_train_size_mb))
print("%d documents - %0.3fMB (test set)" % (
len(data_test.data), data_test_size_mb))
print("%d categories" % len(categories))
print()
# split a training set and a test set
y_train, y_test = data_train.target, data_test.target
print("Extracting features from the training data using a sparse vectorizer")
t0 = time()
if opts.use_hashing:
vectorizer = HashingVectorizer(stop_words='english', non_negative=True,
n_features=opts.n_features)
X_train = vectorizer.transform(data_train.data)
else:
vectorizer = TfidfVectorizer(sublinear_tf=True, max_df=0.5,
stop_words='english')
X_train = vectorizer.fit_transform(data_train.data)
duration = time() - t0
print("done in %fs at %0.3fMB/s" % (duration, data_train_size_mb / duration))
print("n_samples: %d, n_features: %d" % X_train.shape)
print()
print("Extracting features from the test data using the same vectorizer")
t0 = time()
X_test = vectorizer.transform(data_test.data)
duration = time() - t0
print("done in %fs at %0.3fMB/s" % (duration, data_test_size_mb / duration))
print("n_samples: %d, n_features: %d" % X_test.shape)
print()
# mapping from integer feature name to original token string
if opts.use_hashing:
feature_names = None
else:
feature_names = vectorizer.get_feature_names()
if opts.select_chi2:
print("Extracting %d best features by a chi-squared test" %
opts.select_chi2)
t0 = time()
ch2 = SelectKBest(chi2, k=opts.select_chi2)
X_train = ch2.fit_transform(X_train, y_train)
X_test = ch2.transform(X_test)
if feature_names:
# keep selected feature names
feature_names = [feature_names[i] for i
in ch2.get_support(indices=True)]
print("done in %fs" % (time() - t0))
print()
if feature_names:
feature_names = np.asarray(feature_names)
def trim(s):
"""Trim string to fit on terminal (assuming 80-column display)"""
return s if len(s) <= 80 else s[:77] + "..."
###############################################################################
# Benchmark classifiers
def benchmark(clf):
print('_' * 80)
print("Training: ")
print(clf)
t0 = time()
clf.fit(X_train, y_train)
train_time = time() - t0
print("train time: %0.3fs" % train_time)
t0 = time()
pred = clf.predict(X_test)
test_time = time() - t0
print("test time: %0.3fs" % test_time)
score = metrics.accuracy_score(y_test, pred)
print("accuracy: %0.3f" % score)
if hasattr(clf, 'coef_'):
print("dimensionality: %d" % clf.coef_.shape[1])
print("density: %f" % density(clf.coef_))
if opts.print_top10 and feature_names is not None:
print("top 10 keywords per class:")
for i, category in enumerate(categories):
top10 = np.argsort(clf.coef_[i])[-10:]
print(trim("%s: %s"
% (category, " ".join(feature_names[top10]))))
print()
if opts.print_report:
print("classification report:")
print(metrics.classification_report(y_test, pred,
target_names=categories))
if opts.print_cm:
print("confusion matrix:")
print(metrics.confusion_matrix(y_test, pred))
print()
clf_descr = str(clf).split('(')[0]
return clf_descr, score, train_time, test_time
results = []
for clf, name in (
(RidgeClassifier(tol=1e-2, solver="lsqr"), "Ridge Classifier"),
(Perceptron(n_iter=50), "Perceptron"),
(PassiveAggressiveClassifier(n_iter=50), "Passive-Aggressive"),
(KNeighborsClassifier(n_neighbors=10), "kNN"),
(RandomForestClassifier(n_estimators=100), "Random forest")):
print('=' * 80)
print(name)
results.append(benchmark(clf))
for penalty in ["l2", "l1"]:
print('=' * 80)
print("%s penalty" % penalty.upper())
# Train Liblinear model
results.append(benchmark(LinearSVC(loss='l2', penalty=penalty,
dual=False, tol=1e-3)))
# Train SGD model
results.append(benchmark(SGDClassifier(alpha=.0001, n_iter=50,
penalty=penalty)))
# Train SGD with Elastic Net penalty
print('=' * 80)
print("Elastic-Net penalty")
results.append(benchmark(SGDClassifier(alpha=.0001, n_iter=50,
penalty="elasticnet")))
# Train NearestCentroid without threshold
print('=' * 80)
print("NearestCentroid (aka Rocchio classifier)")
results.append(benchmark(NearestCentroid()))
# Train sparse Naive Bayes classifiers
print('=' * 80)
print("Naive Bayes")
results.append(benchmark(MultinomialNB(alpha=.01)))
results.append(benchmark(BernoulliNB(alpha=.01)))
print('=' * 80)
print("LinearSVC with L1-based feature selection")
# The smaller C, the stronger the regularization.
# The more regularization, the more sparsity.
results.append(benchmark(Pipeline([
('feature_selection', LinearSVC(penalty="l1", dual=False, tol=1e-3)),
('classification', LinearSVC())
])))
# make some plots
indices = np.arange(len(results))
results = [[x[i] for x in results] for i in range(4)]
clf_names, score, training_time, test_time = results
training_time = np.array(training_time) / np.max(training_time)
test_time = np.array(test_time) / np.max(test_time)
plt.figure(figsize=(12, 8))
plt.title("Score")
plt.barh(indices, score, .2, label="score", color='r')
plt.barh(indices + .3, training_time, .2, label="training time", color='g')
plt.barh(indices + .6, test_time, .2, label="test time", color='b')
plt.yticks(())
plt.legend(loc='best')
plt.subplots_adjust(left=.25)
plt.subplots_adjust(top=.95)
plt.subplots_adjust(bottom=.05)
for i, c in zip(indices, clf_names):
plt.text(-.3, i, c)
plt.show()
| bsd-3-clause |
hainm/statsmodels | statsmodels/tsa/vector_ar/util.py | 24 | 6383 | """
Miscellaneous utility code for VAR estimation
"""
from statsmodels.compat.python import range, string_types, asbytes
import numpy as np
import scipy.stats as stats
import scipy.linalg as L
import scipy.linalg.decomp as decomp
import statsmodels.tsa.tsatools as tsa
from scipy.linalg import cholesky
#-------------------------------------------------------------------------------
# Auxiliary functions for estimation
def get_var_endog(y, lags, trend='c', has_constant='skip'):
"""
Make predictor matrix for VAR(p) process
Z := (Z_0, ..., Z_T).T (T x Kp)
Z_t = [1 y_t y_{t-1} ... y_{t - p + 1}] (Kp x 1)
Ref: Lutkepohl p.70 (transposed)
has_constant can be 'raise', 'add', or 'skip'. See add_constant.
"""
nobs = len(y)
# Ravel C order, need to put in descending order
Z = np.array([y[t-lags : t][::-1].ravel() for t in range(lags, nobs)])
# Add constant, trend, etc.
if trend != 'nc':
Z = tsa.add_trend(Z, prepend=True, trend=trend,
has_constant=has_constant)
return Z
def get_trendorder(trend='c'):
# Handle constant, etc.
if trend == 'c':
trendorder = 1
elif trend == 'nc':
trendorder = 0
elif trend == 'ct':
trendorder = 2
elif trend == 'ctt':
trendorder = 3
return trendorder
def make_lag_names(names, lag_order, trendorder=1):
"""
Produce list of lag-variable names. Constant / trends go at the beginning
Examples
--------
>>> make_lag_names(['foo', 'bar'], 2, 1)
['const', 'L1.foo', 'L1.bar', 'L2.foo', 'L2.bar']
"""
lag_names = []
if isinstance(names, string_types):
names = [names]
# take care of lagged endogenous names
for i in range(1, lag_order + 1):
for name in names:
if not isinstance(name, string_types):
name = str(name) # will need consistent unicode handling
lag_names.append('L'+str(i)+'.'+name)
# handle the constant name
if trendorder != 0:
lag_names.insert(0, 'const')
if trendorder > 1:
lag_names.insert(0, 'trend')
if trendorder > 2:
lag_names.insert(0, 'trend**2')
return lag_names
def comp_matrix(coefs):
"""
Return compansion matrix for the VAR(1) representation for a VAR(p) process
(companion form)
A = [A_1 A_2 ... A_p-1 A_p
I_K 0 0 0
0 I_K ... 0 0
0 ... I_K 0]
"""
p, k, k2 = coefs.shape
assert(k == k2)
kp = k * p
result = np.zeros((kp, kp))
result[:k] = np.concatenate(coefs, axis=1)
# Set I_K matrices
if p > 1:
result[np.arange(k, kp), np.arange(kp-k)] = 1
return result
#-------------------------------------------------------------------------------
# Miscellaneous stuff
def parse_lutkepohl_data(path): # pragma: no cover
"""
Parse data files from Lutkepohl (2005) book
Source for data files: www.jmulti.de
"""
from collections import deque
from datetime import datetime
import pandas
import pandas.core.datetools as dt
import re
regex = re.compile(asbytes('<(.*) (\w)([\d]+)>.*'))
lines = deque(open(path, 'rb'))
to_skip = 0
while asbytes('*/') not in lines.popleft():
#while '*/' not in lines.popleft():
to_skip += 1
while True:
to_skip += 1
line = lines.popleft()
m = regex.match(line)
if m:
year, freq, start_point = m.groups()
break
data = np.genfromtxt(path, names=True, skip_header=to_skip+1)
n = len(data)
# generate the corresponding date range (using pandas for now)
start_point = int(start_point)
year = int(year)
offsets = {
asbytes('Q') : dt.BQuarterEnd(),
asbytes('M') : dt.BMonthEnd(),
asbytes('A') : dt.BYearEnd()
}
# create an instance
offset = offsets[freq]
inc = offset * (start_point - 1)
start_date = offset.rollforward(datetime(year, 1, 1)) + inc
offset = offsets[freq]
from pandas import DatetimeIndex # pylint: disable=E0611
date_range = DatetimeIndex(start=start_date, freq=offset, periods=n)
return data, date_range
def get_logdet(m):
from statsmodels.tools.linalg import logdet_symm
return logdet_symm(m)
get_logdet = np.deprecate(get_logdet,
"statsmodels.tsa.vector_ar.util.get_logdet",
"statsmodels.tools.linalg.logdet_symm",
"get_logdet is deprecated and will be removed in "
"0.8.0")
def norm_signif_level(alpha=0.05):
return stats.norm.ppf(1 - alpha / 2)
def acf_to_acorr(acf):
diag = np.diag(acf[0])
# numpy broadcasting sufficient
return acf / np.sqrt(np.outer(diag, diag))
def varsim(coefs, intercept, sig_u, steps=100, initvalues=None, seed=None):
"""
Simulate simple VAR(p) process with known coefficients, intercept, white
noise covariance, etc.
"""
if seed is not None:
np.random.seed(seed=seed)
from numpy.random import multivariate_normal as rmvnorm
p, k, k = coefs.shape
ugen = rmvnorm(np.zeros(len(sig_u)), sig_u, steps)
result = np.zeros((steps, k))
result[p:] = intercept + ugen[p:]
# add in AR terms
for t in range(p, steps):
ygen = result[t]
for j in range(p):
ygen += np.dot(coefs[j], result[t-j-1])
return result
def get_index(lst, name):
try:
result = lst.index(name)
except Exception:
if not isinstance(name, int):
raise
result = name
return result
#method used repeatedly in Sims-Zha error bands
def eigval_decomp(sym_array):
"""
Returns
-------
W: array of eigenvectors
eigva: list of eigenvalues
k: largest eigenvector
"""
#check if symmetric, do not include shock period
eigva, W = decomp.eig(sym_array, left=True, right=False)
k = np.argmax(eigva)
return W, eigva, k
def vech(A):
"""
Simple vech operator
Returns
-------
vechvec: vector of all elements on and below diagonal
"""
length=A.shape[1]
vechvec=[]
for i in range(length):
b=i
while b < length:
vechvec.append(A[b,i])
b=b+1
vechvec=np.asarray(vechvec)
return vechvec
| bsd-3-clause |
erdc-cm/air-water-vv | 2d/benchmarks/dambreak_Colagrossi/helpers.py | 1 | 2100 | def CreateFig():
from tables import openFile
archive = openFile('dambreak_Colagrossi_p.h5','r')
import dambreak_Colagrossi
import dambreak_Colagrossi_so
import matplotlib.tri as mtri
from matplotlib import pyplot as plt
import numpy as np
domain = dambreak_Colagrossi.domain
domain.L = dambreak_Colagrossi.tank_dim
domain.x = (0.,0.,0.)
nodes = archive.getNode("/nodesSpatial_Domain0")
x=nodes[:,0]
y=nodes[:,1]
elements = archive.getNode("/elementsSpatial_Domain0")
triang = mtri.Triangulation(x, y, elements)
xg = np.linspace(0, domain.L[0], 20)
yg = np.linspace(0, domain.L[1], 20)
xi, yi = np.meshgrid(xg,yg)
plt.figure()
for it,t in enumerate(dambreak_Colagrossi_so.tnList[:]):
phi = archive.getNode("/phi_t"+`it`)
vof = archive.getNode("/vof_t"+`it`)
wvof = np.ones(vof.shape,'d')
wvof -= vof
u = archive.getNode("/u_t"+`it`)
v = archive.getNode("/v_t"+`it`)
plt.clf()
plt.xlabel(r'z[m]')
plt.ylabel(r'x[m]')
colors = ['b','g','r','c','m','y','k','w']
plt.xlim(domain.x[0]-0.1*domain.L[0],domain.x[0]+domain.L[0]+0.1*domain.L[0])
for si,s in enumerate(domain.segments):
plt.plot([domain.vertices[s[0]][0],
domain.vertices[s[1]][0]],
[domain.vertices[s[0]][1],
domain.vertices[s[1]][1]],
color=colors[domain.segmentFlags[si]-1],
linewidth=2,
marker='o')
plt.tricontourf(x,y,elements,wvof*np.sqrt(u[:]**2 + v[:]**2))
plt.tricontour(x,y,elements,phi,[0], linewidth=4)
u_interp_lin = mtri.LinearTriInterpolator(triang, u[:])
v_interp_lin = mtri.LinearTriInterpolator(triang, v[:])
u_lin = u_interp_lin(xi, yi)
v_lin = v_interp_lin(xi, yi)
plt.streamplot(xg, yg, u_lin, v_lin,color='k')
plt.title('T=%2.2f' % (t,))
plt.axis('equal')
plt.xlim((0,domain.L[0]))
plt.savefig('phi%4.4d.png' % (it,))
| mit |
anntzer/scikit-learn | sklearn/preprocessing/tests/test_common.py | 14 | 6817 | import warnings
import pytest
import numpy as np
from scipy import sparse
from sklearn.datasets import load_iris
from sklearn.model_selection import train_test_split
from sklearn.base import clone
from sklearn.preprocessing import maxabs_scale
from sklearn.preprocessing import minmax_scale
from sklearn.preprocessing import scale
from sklearn.preprocessing import power_transform
from sklearn.preprocessing import quantile_transform
from sklearn.preprocessing import robust_scale
from sklearn.preprocessing import MaxAbsScaler
from sklearn.preprocessing import MinMaxScaler
from sklearn.preprocessing import StandardScaler
from sklearn.preprocessing import PowerTransformer
from sklearn.preprocessing import QuantileTransformer
from sklearn.preprocessing import RobustScaler
from sklearn.utils._testing import assert_array_equal
from sklearn.utils._testing import assert_allclose
iris = load_iris()
def _get_valid_samples_by_column(X, col):
"""Get non NaN samples in column of X"""
return X[:, [col]][~np.isnan(X[:, col])]
@pytest.mark.parametrize(
"est, func, support_sparse, strictly_positive, omit_kwargs",
[(MaxAbsScaler(), maxabs_scale, True, False, []),
(MinMaxScaler(), minmax_scale, False, False, ['clip']),
(StandardScaler(), scale, False, False, []),
(StandardScaler(with_mean=False), scale, True, False, []),
(PowerTransformer('yeo-johnson'), power_transform, False, False, []),
(PowerTransformer('box-cox'), power_transform, False, True, []),
(QuantileTransformer(n_quantiles=10), quantile_transform, True, False,
[]),
(RobustScaler(), robust_scale, False, False, []),
(RobustScaler(with_centering=False), robust_scale, True, False, [])]
)
def test_missing_value_handling(est, func, support_sparse, strictly_positive,
omit_kwargs):
# check that the preprocessing method let pass nan
rng = np.random.RandomState(42)
X = iris.data.copy()
n_missing = 50
X[rng.randint(X.shape[0], size=n_missing),
rng.randint(X.shape[1], size=n_missing)] = np.nan
if strictly_positive:
X += np.nanmin(X) + 0.1
X_train, X_test = train_test_split(X, random_state=1)
# sanity check
assert not np.all(np.isnan(X_train), axis=0).any()
assert np.any(np.isnan(X_train), axis=0).all()
assert np.any(np.isnan(X_test), axis=0).all()
X_test[:, 0] = np.nan # make sure this boundary case is tested
with pytest.warns(None) as records:
Xt = est.fit(X_train).transform(X_test)
# ensure no warnings are raised
assert len(records) == 0
# missing values should still be missing, and only them
assert_array_equal(np.isnan(Xt), np.isnan(X_test))
# check that the function leads to the same results as the class
with pytest.warns(None) as records:
Xt_class = est.transform(X_train)
assert len(records) == 0
kwargs = est.get_params()
# remove the parameters which should be omitted because they
# are not defined in the sister function of the preprocessing class
for kwarg in omit_kwargs:
_ = kwargs.pop(kwarg)
Xt_func = func(X_train, **kwargs)
assert_array_equal(np.isnan(Xt_func), np.isnan(Xt_class))
assert_allclose(Xt_func[~np.isnan(Xt_func)], Xt_class[~np.isnan(Xt_class)])
# check that the inverse transform keep NaN
Xt_inv = est.inverse_transform(Xt)
assert_array_equal(np.isnan(Xt_inv), np.isnan(X_test))
# FIXME: we can introduce equal_nan=True in recent version of numpy.
# For the moment which just check that non-NaN values are almost equal.
assert_allclose(Xt_inv[~np.isnan(Xt_inv)], X_test[~np.isnan(X_test)])
for i in range(X.shape[1]):
# train only on non-NaN
est.fit(_get_valid_samples_by_column(X_train, i))
# check transforming with NaN works even when training without NaN
with pytest.warns(None) as records:
Xt_col = est.transform(X_test[:, [i]])
assert len(records) == 0
assert_allclose(Xt_col, Xt[:, [i]])
# check non-NaN is handled as before - the 1st column is all nan
if not np.isnan(X_test[:, i]).all():
Xt_col_nonan = est.transform(
_get_valid_samples_by_column(X_test, i))
assert_array_equal(Xt_col_nonan,
Xt_col[~np.isnan(Xt_col.squeeze())])
if support_sparse:
est_dense = clone(est)
est_sparse = clone(est)
with pytest.warns(None) as records:
Xt_dense = est_dense.fit(X_train).transform(X_test)
Xt_inv_dense = est_dense.inverse_transform(Xt_dense)
assert len(records) == 0
for sparse_constructor in (sparse.csr_matrix, sparse.csc_matrix,
sparse.bsr_matrix, sparse.coo_matrix,
sparse.dia_matrix, sparse.dok_matrix,
sparse.lil_matrix):
# check that the dense and sparse inputs lead to the same results
# precompute the matrix to avoid catching side warnings
X_train_sp = sparse_constructor(X_train)
X_test_sp = sparse_constructor(X_test)
with pytest.warns(None) as records:
warnings.simplefilter('ignore', PendingDeprecationWarning)
Xt_sp = est_sparse.fit(X_train_sp).transform(X_test_sp)
assert len(records) == 0
assert_allclose(Xt_sp.A, Xt_dense)
with pytest.warns(None) as records:
warnings.simplefilter('ignore', PendingDeprecationWarning)
Xt_inv_sp = est_sparse.inverse_transform(Xt_sp)
assert len(records) == 0
assert_allclose(Xt_inv_sp.A, Xt_inv_dense)
@pytest.mark.parametrize(
"est, func",
[(MaxAbsScaler(), maxabs_scale),
(MinMaxScaler(), minmax_scale),
(StandardScaler(), scale),
(StandardScaler(with_mean=False), scale),
(PowerTransformer('yeo-johnson'), power_transform),
(PowerTransformer('box-cox'), power_transform,),
(QuantileTransformer(n_quantiles=3), quantile_transform),
(RobustScaler(), robust_scale),
(RobustScaler(with_centering=False), robust_scale)]
)
def test_missing_value_pandas_na_support(est, func):
# Test pandas IntegerArray with pd.NA
pd = pytest.importorskip('pandas', minversion="1.0")
X = np.array([[1, 2, 3, np.nan, np.nan, 4, 5, 1],
[np.nan, np.nan, 8, 4, 6, np.nan, np.nan, 8],
[1, 2, 3, 4, 5, 6, 7, 8]]).T
# Creates dataframe with IntegerArrays with pd.NA
X_df = pd.DataFrame(X, dtype="Int16", columns=['a', 'b', 'c'])
X_df['c'] = X_df['c'].astype('int')
X_trans = est.fit_transform(X)
X_df_trans = est.fit_transform(X_df)
assert_allclose(X_trans, X_df_trans)
| bsd-3-clause |
JanetMatsen/bacteriopop | network_construction.py | 1 | 9581 | import networkx as nx
import numpy as np
import matplotlib.pyplot as plt
from matplotlib import cm
from matplotlib import pylab
import pandas as pd
import seaborn as sns
def create_one_graph_using_networkx(adj, nodes, edge_threshold):
"""
This function create a graph using the adjacency matrix adj and
list of nodes.
:param adj: a numpy array representing the adjacency matrix
:param nodes: a list representing the nodes in the graph
:param edge_threshold: the threshold on the elements of adjacency
matrix adj for that pair to be consider as an edge
:return: the graph in networkX format
"""
# create and empty graph
g = nx.Graph()
# add edges to the graph
n = len(nodes)
for node1 in range(n):
for node2 in range(n):
if node1 == node2 or abs(adj[node1][node2]) >= edge_threshold:
g.add_edge(node1, node2)
g[node1][node2]['weight'] = adj[node1][node2]
return g
def create_all_graphs(mappings, nodes_list, edge_threshold=1e-10):
"""
Transforms the adjacency matrix into the graph for all the instances
:param mappings:
:param nodes_list:
:param edge_threshold:
"""
graphs = {}
for key in mappings.keys():
graphs[key] = create_one_graph_using_networkx(mappings[key],
nodes_list[key],
edge_threshold)
return graphs
def save_graph(graph, file_name):
"""
This function saves the graph into a file called file_name
:param graph:
:param file_name:
"""
# initialze Figure
plt.figure(num=None, figsize=(20, 20), dpi=80)
plt.axis('off')
fig = plt.figure(1)
pos = nx.spring_layout(graph)
nx.draw_networkx_nodes(graph, pos)
nx.draw_networkx_edges(graph, pos)
nx.draw_networkx_labels(graph, pos)
cut = 1.00
xmax = cut * max(xx for xx, yy in pos.values())
ymax = cut * max(yy for xx, yy in pos.values())
plt.xlim(0, xmax)
plt.ylim(0, ymax)
plt.savefig('plots/' + file_name, bbox_inches="tight")
pylab.close()
del fig
def reduce_adjacency_matrix(adj, nodes, edge_threshold):
"""
This function removes some rows and columns of the adjacency matrix
according to their correlation with other elements in the adjacency
matrix and returns a new (smaller) numpy array for adjacency and the
new list of nodes considered in the matrix.
:param adj: adjacency matrix
:param nodes: name of nodes (row and column names)
:param edge_threshold: minimum magnitude to search for and include
"""
n = len(nodes)
new_nodes_index = []
kept_nodes_names = []
# todo: return the names of the reduced data's rows, columns instead of
# the index.
# loop over the nodes (which is both a row and column) and look for
# interactions that have greater magnitude than the specified
# edge_threshold
for node1 in range(n):
remove = True
for node2 in range(n):
# look for whether this pair has a significant interaction.
if abs(adj[node1][node2]) > edge_threshold or \
abs(adj[node2][node1]) > edge_threshold:
remove = False
if not remove:
new_nodes_index.append(node1)
kept_nodes_names.append(nodes[node1])
new_adj = np.zeros([len(new_nodes_index), len(new_nodes_index)])
for i, node1 in enumerate(new_nodes_index):
for j, node2 in enumerate(new_nodes_index):
new_adj[i][j] = adj[node1][node2]
return new_adj, kept_nodes_names
def reduce_all_adjacency_matrixes_in_dict(adjacency_dict, node_dict,
edge_threshold):
"""
This function iterate over all replicates in the adjacency_dict
which include 4 high O2 and 4 low O2 samples and applies the
construct a new adjacency matrix based on the edge_treshold.
edge_treshhold represents the minimum amount of interaction
between bacteria needed to be considered as an edge in the
network of bacteria.
"""
reduced_array_dict = {}
reduced_node_dict = {}
for key in adjacency_dict.keys():
reduced_array_dict[key], reduced_node_dict[key] = \
reduce_adjacency_matrix(adjacency_dict[key],
node_dict[key],
edge_threshold)
return reduced_array_dict, reduced_node_dict
def aggregate_adjacency_matrices(list_of_dfs):
"""
This function take the list of all adjacency matrices found for each high/low
replicate and calculate the mean, standard deviation and signal to noise ratio
over the 4 replicates in high/low groups.
"""
# Generalized aggregator. Will write a wrapper that individually makes
# one for each High/Low O2 condition.
# Returns yet another dictionary! (ha ha)
# Use Pandas panel to gather the replicates, and to make summary
# dataframes of the element-by-element averages, standard deviations,
# and signal-to-noise.
# Note that we are using 0, 1, 2... for keys in the Panel object. We
# could use the descriptive tuples, but there is currently no advantage.
p = pd.Panel(data={n: df for n, df in enumerate(list_of_dfs)})
# Use this Panel object to make summary statistics.
summary_df_dict = {}
summary_df_dict['mean'] = p.mean(axis=0)
summary_df_dict['standard deviation'] = p.std(axis=0)
summary_df_dict['signal to noise'] = summary_df_dict['mean']/summary_df_dict['standard deviation']
# to get signal to noise, we need to make a panel of these new dataframes.
p2 = pd.Panel(data={n: df for n, df in
enumerate([summary_df_dict['mean'],
summary_df_dict['standard deviation'],
summary_df_dict['signal to noise']])})
return summary_df_dict
def summarize_replicate_adjacency_matrices(result_dict):
"""
This function separates the high and low oxygen results before aggregating.
The output is a dictionary of dictionaries.
"""
high_oxygen_dfs = []
low_oxygen_dfs = []
# loop over the results dict. The keys are tuples like ("Low",
# 1) indicating their low/high oxygen content and replicate number. Use
# the "Low" or "High" string to sort before aggregation.
for oxy_rep_tuple in result_dict.keys():
if oxy_rep_tuple[0] == "Low":
low_oxygen_dfs.append(result_dict[oxy_rep_tuple])
else:
high_oxygen_dfs.append(result_dict[oxy_rep_tuple])
# Now we can pass these lists of dataframes to
# aggregate_adjacency_matrices(), which calculates the means and
# standard deviations. This will help us find the important and
# reproducible correlations/effects.
low_oxy_summary = aggregate_adjacency_matrices(low_oxygen_dfs)
high_oxy_summary = aggregate_adjacency_matrices(high_oxygen_dfs)
# create a dictionary to hold each dictionary. Now we are wishing we
# had classes!
return {"Low": low_oxy_summary, "high": high_oxy_summary}
def plot_heatmap(dataframe, title=None, file_name=None, file_type='pdf',
width=10, height=10):
"""
Plot a heat map of any data frame.
:param dataframe: data to plot as a heat map
:param title: optional title
:param file_name: optional filename
:param filetype: optional filetype
:param width: figure width (inches)
:param height: figue height (inches)
:return: a seaborn heat map, whic was saved if file_name was provided.
"""
ax = plt.axes()
# generate a seaborn heatmap on the axis ax
hmp = sns.heatmap(dataframe, ax=ax)
# add a title if it was passed to the function
if title:
ax.set_title(title)
# adjust the figure size
hmp.figure.set_figwidth(width)
hmp.figure.set_figheight(height)
if file_name:
hmp.figure.savefig(file_name + file_type, bbox_inches='tight')
return hmp
def o2_rep_tuple_to_string(o2_rep_tuple):
"""
this function create a string description for a sample
called o2_rep_tuple
"""
return str(o2_rep_tuple[0]) + '_oxygen_week_' + str(o2_rep_tuple[1])
def plot_all_adjacency_heatmaps(mappings_in_pandas):
"""
plot and save the heat maps of the matrices given in pandas data frame
:param mappings_in_pandas:
"""
for key in mappings_in_pandas:
file_name = 'plots/' + o2_rep_tuple_to_string(key)
title = str(key[0])+' oxygen, week '+str(key[1])
plot_heatmap(mappings_in_pandas[key], title, file_name,
file_type='.pdf')
plt.clf()
def plot_aggregated_adjacency_heatmaps(mappings_in_pandas, dtype='Mean'):
"""
plot and save the heat maps of the matrices given in pandas data frame
:param mappings_in_pandas: a dictionary that containing two elements,
including information for 'High' and 'Low' replicates
:param dtype: the type of matrices to be plotted such as Mean, STD, SNR
"""
# Todo: simplify by using plot_heatmap()
for key in mappings_in_pandas:
file_name = 'plots/'+key+"_oxygen_replicates_" + dtype
ax = plt.axes()
hmp = sns.heatmap(mappings_in_pandas[key], ax=ax)
ax.set_title(key + ' oxygen replicates ' + dtype)
hmp.figure.set_figwidth(10)
hmp.figure.set_figheight(10)
hmp.figure
hmp.figure.savefig(file_name, bbox_inches='tight')
plt.clf()
| apache-2.0 |
timvandermeij/mobile-radio-tomography | plan_reconstruct.py | 3 | 4348 | # Core imports
import datetime
import os
import sys
import time
import json
# matplotlib imports
import matplotlib
try:
# Make it possible to run matplotlib in displayless (console-only) mode
matplotlib.use('Agg' if 'DISPLAY' not in os.environ or os.environ['DISPLAY'] == '' else matplotlib.get_backend())
except ValueError as e:
raise ImportError("Could not load matplotlib backend: {}".format(e.message))
finally:
import matplotlib.pyplot as plt
# Package imports
from __init__ import __package__
from core.Import_Manager import Import_Manager
from core.Thread_Manager import Thread_Manager
from planning.Runner import Planning_Runner
from settings import Arguments
def do_plot(name):
"""
Finish plotting by saving or showing the plot.
"""
backend = matplotlib.get_backend()
if backend.lower() == 'agg' or 'SAVE_PATH' in os.environ:
path = os.environ['SAVE_PATH'] if 'SAVE_PATH' in os.environ else '.'
filename = "{}/{}".format(path, name)
plt.savefig(filename)
print("Saved plot as {}".format(filename))
else:
print("Close the plot window to continue.")
try:
plt.show()
except StandardError:
# Somethimes things go wrong in the plot display (such as when
# clicking close button too fast), so ignore those errors.
pass
def do_data(name, data):
"""
Handle data output.
Either write a JSON file with the given `name` for the `data` object, or
print the data to the standard output.
"""
if matplotlib.get_backend() == 'Agg' or 'SAVE_PATH' in os.environ:
path = os.environ['SAVE_PATH'] if 'SAVE_PATH' in os.environ else '.'
filename = "{}/{}.json".format(path, name)
with open(filename, 'wb') as f:
json.dump(data, f)
else:
print(data)
def iteration_callback(algorithm, data):
t = data["iteration"]
cur_time = data["cur_time"]
speed = t/float(cur_time)
print("Iteration {} ({} sec, {} it/s)".format(t, cur_time, speed))
if speed != 0.0:
rate = (algorithm.t_max - t) / speed
end_time = datetime.datetime.now() + datetime.timedelta(seconds=rate)
print("{} seconds remaining, ETA: {}".format(rate, end_time))
Feasible = data["feasible"]
Objectives = data["objectives"]
scores = list(sorted((i for i in range(algorithm.mu) if Feasible[i]), key=lambda i: Objectives[i]))
if scores:
idx = scores[len(scores)/2]
print("Current knee point objectives: {}".format(Objectives[idx]))
print("Infeasible count: {}".format(algorithm.mu - sum(Feasible)))
def main(argv):
# Initialize, read parameters from input and set up problems
stamp = int(time.time())
thread_manager = Thread_Manager()
import_manager = Import_Manager()
arguments = Arguments("settings.json", argv)
runner = Planning_Runner(arguments, thread_manager, import_manager,
iteration_callback)
arguments.check_help()
t_max = runner.get_iteration_limit()
size = runner.get_population_size()
print("Settings: Algorithm {}, mu={}, t_max={}".format(runner.algorithm.get_name(), size, t_max))
print("Steps: {}".format(runner.algorithm.steps))
indices = runner.start()
# Show feasible solutions in a sorted manner.
if len(indices) == 0:
print("No feasible solutions found after {} iterations!".format(t_max))
return
print("Search variables an objective values for feasible solutions:")
# If we have fewer nondominated solutions than the total number of
# individuals, then only show the nondominated ones. Otherwise, just show
# all feasible solutions.
c = 0
for i in indices:
c += 1
positions, unsnappable = runner.get_positions_plot(i, c, len(indices))
if positions.size == 0:
continue
print("{}. {} ({})".format(i, runner.get_objectives(i), unsnappable))
do_data("positions-{}-{}".format(stamp, c), positions.tolist())
do_plot("display-{}-{}.eps".format(stamp, c))
# Plot the pareto front between the two objectives.
print("Pareto front after t={}".format(t_max))
runner.make_pareto_plot()
do_plot("front-{}.eps".format(stamp))
if __name__ == "__main__":
main(sys.argv[1:])
| gpl-3.0 |
joshloyal/pydata-amazon-products | amazon_products/models/keras_model.py | 1 | 1675 | import os
import pandas as pd
from sklearn.preprocessing import LabelBinarizer
from keras.models import Model
from keras.layers import Input, Dense, Dropout
from amazon_products.resnet import ResNetVectorizer
data_dir = 'amazon_products_data'
image_dir = os.path.join(data_dir, 'images')
cache_dir = 'resnet50'
# training
df = pd.read_csv(os.path.join(data_dir, 'amazon_products_train.csv'))
train_image_list = df['image_file'].values
train_categories = df['product_category'].values
# dev
df = pd.read_csv(os.path.join(data_dir, 'amazon_products_dev.csv'))
dev_image_list = df['image_file'].values
dev_categories = df['product_category'].values
# encode labels (binary labels)
encoder = LabelBinarizer()
train_labels = encoder.fit_transform(train_categories)
dev_labels = encoder.transform(dev_categories)
# get features from a pre-trained resnet model
vec = ResNetVectorizer(batch_size=500,
image_dir=image_dir,
use_cache=True,
cache_dir=cache_dir)
train_features = vec.transform(train_image_list)
dev_features = vec.transform(dev_image_list)
# fine-tune the last layer
input_features = Input(shape=train_features.shape[1:], dtype='float32')
x = Dense(256, activation='relu')(input_features)
x = Dropout(0.5)(x)
predictions = Dense(encoder.classes_.shape[0], activation='softmax')(x)
model = Model(inputs=[input_features], outputs=[predictions])
model.compile(optimizer='adam',
loss='categorical_crossentropy',
metrics=['accuracy'])
model.fit(train_features, train_labels,
nb_epoch=50, batch_size=32,
validation_data=[dev_features, dev_labels])
| mit |
spragunr/echolocation | align_audio.py | 1 | 3434 |
import numpy as np
import h5py
import sys
from scipy import signal
import os
SAMPLE_RATE = 44100
def align_clip(clip, trigger_freq=16000, threshold=1250,
box_width=50, percent_required=.5, offset=200):
w = 5.0
s = 1.0
M = np.floor(2. * w * s * SAMPLE_RATE / trigger_freq)
wavelet = signal.morlet(M, w=5.0, s=1.0, complete=True)
resp = np.abs(signal.convolve(clip[:,0], wavelet, mode='same'))
box = np.array([1.0 for _ in range(box_width)])
above = resp > threshold
counts = signal.convolve(above, box,mode='valid')
counts = np.append(counts, np.zeros(box.shape[0] - 1))
candidates = np.logical_and(above, counts > (percent_required * box_width))
if np.where(candidates)[0].size > 0:
start_index = max(0, np.where(candidates)[0][0] - offset)
else:
start_index = 0
print "BAD SAMPLE?"
result = np.zeros(clip.shape, dtype=clip.dtype)
result[0:clip.shape[0] - start_index, :] = clip[start_index::, :]
return result
def compress_h5(file_name):
data = h5py.File(file_name, 'r')
compressed = h5py.File(file_name+".h5", 'w')
for item in data.items():
dset = compressed.create_dataset(item[0], data[item[0]].shape,
dtype=data[item[0]].dtype,
compression="lzf")
dset[...] = data[item[0]][...]
compressed.close()
def align_h5(file_name):
""" Add alligned audio data to an existing h5 file. """
data = h5py.File(file_name, 'r+')
if 'audio_aligned' in data:
del data['audio_aligned']
dset = data.create_dataset("audio_aligned", data['audio'].shape,
dtype=data['audio'].dtype)
for i in range(data['audio'].shape[0]):
dset[i, ...] = align_clip(data['audio'][i,...])
data.close()
def demo():
""" show example of an alignment """
import matplotlib.pyplot as plt
data = h5py.File(sys.argv[1], 'r')
# Sample rate and desired cutoff frequencies (in Hz).
clip = data['audio'][100, ...]
plt.subplot(4,1,1)
f, t, Sxx = signal.spectrogram(clip[:,0], 44100,
nperseg=256,
noverlap =255)
plt.pcolormesh(t, f, np.log(1 + Sxx))
plt.axis('tight')
plt.ylabel('Frequency [Hz]')
plt.xlabel('Time [sec]')
plt.subplot(4,1,2)
plt.plot(clip[:,0])
plt.axis('tight')
plt.subplot(4,1,3)
aligned = align_clip(clip)
f, t, Sxx = signal.spectrogram(aligned[:,0], 44100,
nperseg=256,
noverlap=255)
plt.pcolormesh(t, f, np.log(1 + Sxx))
plt.axis('tight')
plt.ylabel('Frequency [Hz]')
plt.xlabel('Time [sec]')
plt.subplot(4,1,4)
plt.plot(aligned[:,0])
plt.axis('tight')
plt.show()
def compress_all():
files = [#'isat143a', 'isat143b',
'isat231a', 'isat231b',
'isat243a', 'isat243b', 'isat246a', 'isat246b',
'isat246c', 'isat248a', 'isat248b', 'isat248c',
'isat250a', 'isat250b', 'roboA', 'roboB', 'roboC',
'roboD', 'roboE']
for f in files:
print f
compress_h5(f)
os.remove(f)
if __name__ == "__main__":
#demo()
#compress_all()
#compress_h5(sys.argv[1])
align_h5(sys.argv[1])
| mit |
kapteyn-astro/kapteyn | doc/source/EXAMPLES/mu_manyaxes.py | 1 | 2354 | from kapteyn import maputils
from matplotlib import pyplot as plt
# 1. Read the header
fitsobj = maputils.FITSimage("manyaxes.fits")
# 2. Create a Matplotlib Figure and Axes instance
figsize=fitsobj.get_figsize(ysize=12, xsize=11, cm=True)
fig = plt.figure(figsize=figsize)
frame = fig.add_subplot(1,1,1)
# 3. Create a graticule
fitsobj.set_imageaxes('freq','pol')
mplim = fitsobj.Annotatedimage(frame)
grat = mplim.Graticule(starty=1000, deltay=10)
# 4. Show the calculated world coordinates along y-axis
print("The world coordinates along the y-axis:", grat.ystarts)
# 5. Show header information in attributes of the Projection object
# The projection object of a graticule is attribute 'gmap'
print("CRVAL, CDELT from header:", grat.gmap.crval, grat.gmap.cdelt)
# 6. Set a number of properties of the graticules and plot axes
grat.setp_tick(plotaxis="bottom",
fun=lambda x: x/1.0e9, fmt="%.4f",
rotation=-30 )
grat.setp_axislabel("bottom", label="Frequency (GHz)")
grat.setp_gratline(wcsaxis=0, position=grat.gmap.crval[0],
tol=0.5*grat.gmap.cdelt[0], color='r')
grat.setp_ticklabel(plotaxis="left", position=1000, color='m', fmt="I")
grat.setp_ticklabel(plotaxis="left", position=1010, color='b', fmt="Q")
grat.setp_ticklabel(plotaxis="left", position=1020, color='r', fmt="U")
grat.setp_ticklabel(plotaxis="left", position=1030, color='g', fmt="V")
grat.setp_axislabel("left", label="Stokes parameters")
# 7. Set a title for this frame
title = r"""Polarization as function of frequency at:
$(\alpha_0,\delta_0) = (121^o,53^o)$"""
t = frame.set_title(title, color='#006400', y=1.01, linespacing=1.4)
# 8. Add labels inside plot
inlabs = grat.Insidelabels(wcsaxis=0, constval=1015,
deltapx=-0.15, rotation=90,
fontsize=10, color='r',
fun=lambda x: x*1e-9, fmt="%.4f.10^9")
w = grat.gmap.crval[0] + 0.2*grat.gmap.cdelt[0]
cv = grat.gmap.crval[1]
# Print without any formatting
inlab2 = grat.Insidelabels(wcsaxis=0, world=w, constval=cv,
deltapy=0.1, rotation=20,
fontsize=10, color='c')
pixel = grat.gmap.topixel((w,grat.gmap.crval[1]))
frame.plot( (pixel[0],), (pixel[1],), 'o', color='red' )
# 9. Plot the objects
maputils.showall() | bsd-3-clause |
kkk669/mxnet | python/mxnet/model.py | 17 | 39894 | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=fixme, invalid-name, too-many-arguments, too-many-locals, too-many-lines
# pylint: disable=too-many-branches, too-many-statements
"""MXNet model module"""
from __future__ import absolute_import, print_function
import time
import logging
import warnings
from collections import namedtuple
import numpy as np
from . import io
from . import nd
from . import symbol as sym
from . import optimizer as opt
from . import metric
from . import kvstore as kvs
from .context import Context, cpu
from .initializer import Uniform
from .optimizer import get_updater
from .executor_manager import DataParallelExecutorManager, _check_arguments, _load_data
from .io import DataDesc
from .base import mx_real_t
BASE_ESTIMATOR = object
try:
from sklearn.base import BaseEstimator
BASE_ESTIMATOR = BaseEstimator
except ImportError:
SKLEARN_INSTALLED = False
# Parameter to pass to batch_end_callback
BatchEndParam = namedtuple('BatchEndParams',
['epoch',
'nbatch',
'eval_metric',
'locals'])
def _create_kvstore(kvstore, num_device, arg_params):
"""Create kvstore
This function select and create a proper kvstore if given the kvstore type.
Parameters
----------
kvstore : KVStore or str
The kvstore.
num_device : int
The number of devices
arg_params : dict of str to `NDArray`.
Model parameter, dict of name to `NDArray` of net's weights.
"""
update_on_kvstore = True
if kvstore is None:
kv = None
elif isinstance(kvstore, kvs.KVStore):
kv = kvstore
elif isinstance(kvstore, str):
# create kvstore using the string type
if num_device is 1 and 'dist' not in kvstore:
# no need to use kv for single device and single machine
kv = None
else:
kv = kvs.create(kvstore)
if kvstore == 'local':
# automatically select a proper local
max_size = max(np.prod(param.shape) for param in
arg_params.values())
if max_size > 1024 * 1024 * 16:
update_on_kvstore = False
else:
raise TypeError('kvstore must be KVStore, str or None')
if kv is None:
update_on_kvstore = False
return (kv, update_on_kvstore)
def _initialize_kvstore(kvstore, param_arrays, arg_params, param_names, update_on_kvstore):
"""Initialize kvstore"""
for idx, param_on_devs in enumerate(param_arrays):
name = param_names[idx]
kvstore.init(name, arg_params[name])
if update_on_kvstore:
kvstore.pull(name, param_on_devs, priority=-idx)
def _update_params_on_kvstore(param_arrays, grad_arrays, kvstore, param_names):
"""Perform update of param_arrays from grad_arrays on kvstore."""
for index, pair in enumerate(zip(param_arrays, grad_arrays)):
arg_list, grad_list = pair
if grad_list[0] is None:
continue
name = param_names[index]
# push gradient, priority is negative index
kvstore.push(name, grad_list, priority=-index)
# pull back the weights
kvstore.pull(name, arg_list, priority=-index)
def _update_params(param_arrays, grad_arrays, updater, num_device,
kvstore=None, param_names=None):
"""Perform update of param_arrays from grad_arrays not on kvstore."""
for i, pair in enumerate(zip(param_arrays, grad_arrays)):
arg_list, grad_list = pair
if grad_list[0] is None:
continue
index = i
if kvstore:
name = param_names[index]
# push gradient, priority is negative index
kvstore.push(name, grad_list, priority=-index)
# pull back the sum gradients, to the same locations.
kvstore.pull(name, grad_list, priority=-index)
for k, p in enumerate(zip(arg_list, grad_list)):
# faked an index here, to make optimizer create diff
# state for the same index but on diff devs, TODO(mli)
# use a better solution later
w, g = p
updater(index*num_device+k, g, w)
def _multiple_callbacks(callbacks, *args, **kwargs):
"""Sends args and kwargs to any configured callbacks.
This handles the cases where the 'callbacks' variable
is ``None``, a single function, or a list.
"""
if isinstance(callbacks, list):
for cb in callbacks:
cb(*args, **kwargs)
return
if callbacks:
callbacks(*args, **kwargs)
def _train_multi_device(symbol, ctx, arg_names, param_names, aux_names,
arg_params, aux_params,
begin_epoch, end_epoch, epoch_size, optimizer,
kvstore, update_on_kvstore,
train_data, eval_data=None, eval_metric=None,
epoch_end_callback=None, batch_end_callback=None,
logger=None, work_load_list=None, monitor=None,
eval_end_callback=None,
eval_batch_end_callback=None, sym_gen=None):
"""Internal training function on multiple devices.
This function will also work for single device as well.
Parameters
----------
symbol : Symbol
The network configuration.
ctx : list of Context
The training devices.
arg_names: list of str
Name of all arguments of the network.
param_names: list of str
Name of all trainable parameters of the network.
aux_names: list of str
Name of all auxiliary states of the network.
arg_params : dict of str to NDArray
Model parameter, dict of name to NDArray of net's weights.
aux_params : dict of str to NDArray
Model parameter, dict of name to NDArray of net's auxiliary states.
begin_epoch : int
The begining training epoch.
end_epoch : int
The end training epoch.
epoch_size : int, optional
Number of batches in a epoch. In default, it is set to
``ceil(num_train_examples / batch_size)``.
optimizer : Optimizer
The optimization algorithm
train_data : DataIter
Training data iterator.
eval_data : DataIter
Validation data iterator.
eval_metric : EvalMetric
An evaluation function or a list of evaluation functions.
epoch_end_callback : callable(epoch, symbol, arg_params, aux_states)
A callback that is invoked at end of each epoch.
This can be used to checkpoint model each epoch.
batch_end_callback : callable(BatchEndParams)
A callback that is invoked at end of each batch.
This can be used to measure speed, get result from evaluation metric. etc.
kvstore : KVStore
The KVStore.
update_on_kvstore : bool
Whether or not perform weight updating on kvstore.
logger : logging logger
When not specified, default logger will be used.
work_load_list : list of float or int, optional
The list of work load for different devices,
in the same order as ``ctx``.
monitor : Monitor, optional
Monitor installed to executor,
for monitoring outputs, weights, and gradients for debugging.
Notes
-----
- This function will inplace update the NDArrays in `arg_params` and `aux_states`.
"""
if logger is None:
logger = logging
executor_manager = DataParallelExecutorManager(symbol=symbol,
sym_gen=sym_gen,
ctx=ctx,
train_data=train_data,
param_names=param_names,
arg_names=arg_names,
aux_names=aux_names,
work_load_list=work_load_list,
logger=logger)
if monitor:
executor_manager.install_monitor(monitor)
executor_manager.set_params(arg_params, aux_params)
if not update_on_kvstore:
updater = get_updater(optimizer)
if kvstore:
_initialize_kvstore(kvstore=kvstore,
param_arrays=executor_manager.param_arrays,
arg_params=arg_params,
param_names=executor_manager.param_names,
update_on_kvstore=update_on_kvstore)
if update_on_kvstore:
kvstore.set_optimizer(optimizer)
# Now start training
train_data.reset()
for epoch in range(begin_epoch, end_epoch):
# Training phase
tic = time.time()
eval_metric.reset()
nbatch = 0
# Iterate over training data.
while True:
do_reset = True
for data_batch in train_data:
executor_manager.load_data_batch(data_batch)
if monitor is not None:
monitor.tic()
executor_manager.forward(is_train=True)
executor_manager.backward()
if update_on_kvstore:
_update_params_on_kvstore(executor_manager.param_arrays,
executor_manager.grad_arrays,
kvstore, executor_manager.param_names)
else:
_update_params(executor_manager.param_arrays,
executor_manager.grad_arrays,
updater=updater,
num_device=len(ctx),
kvstore=kvstore,
param_names=executor_manager.param_names)
if monitor is not None:
monitor.toc_print()
# evaluate at end, so we can lazy copy
executor_manager.update_metric(eval_metric, data_batch.label)
nbatch += 1
# batch callback (for print purpose)
if batch_end_callback is not None:
batch_end_params = BatchEndParam(epoch=epoch,
nbatch=nbatch,
eval_metric=eval_metric,
locals=locals())
_multiple_callbacks(batch_end_callback, batch_end_params)
# this epoch is done possibly earlier
if epoch_size is not None and nbatch >= epoch_size:
do_reset = False
break
if do_reset:
logger.info('Epoch[%d] Resetting Data Iterator', epoch)
train_data.reset()
# this epoch is done
if epoch_size is None or nbatch >= epoch_size:
break
toc = time.time()
logger.info('Epoch[%d] Time cost=%.3f', epoch, (toc - tic))
if epoch_end_callback or epoch + 1 == end_epoch:
executor_manager.copy_to(arg_params, aux_params)
_multiple_callbacks(epoch_end_callback, epoch, symbol, arg_params, aux_params)
# evaluation
if eval_data:
eval_metric.reset()
eval_data.reset()
total_num_batch = 0
for i, eval_batch in enumerate(eval_data):
executor_manager.load_data_batch(eval_batch)
executor_manager.forward(is_train=False)
executor_manager.update_metric(eval_metric, eval_batch.label)
if eval_batch_end_callback is not None:
batch_end_params = BatchEndParam(epoch=epoch,
nbatch=i,
eval_metric=eval_metric,
locals=locals())
_multiple_callbacks(eval_batch_end_callback, batch_end_params)
total_num_batch += 1
if eval_end_callback is not None:
eval_end_params = BatchEndParam(epoch=epoch,
nbatch=total_num_batch,
eval_metric=eval_metric,
locals=locals())
_multiple_callbacks(eval_end_callback, eval_end_params)
eval_data.reset()
# end of all epochs
return
def save_checkpoint(prefix, epoch, symbol, arg_params, aux_params):
"""Checkpoint the model data into file.
Parameters
----------
prefix : str
Prefix of model name.
epoch : int
The epoch number of the model.
symbol : Symbol
The input Symbol.
arg_params : dict of str to NDArray
Model parameter, dict of name to NDArray of net's weights.
aux_params : dict of str to NDArray
Model parameter, dict of name to NDArray of net's auxiliary states.
Notes
-----
- ``prefix-symbol.json`` will be saved for symbol.
- ``prefix-epoch.params`` will be saved for parameters.
"""
if symbol is not None:
symbol.save('%s-symbol.json' % prefix)
save_dict = {('arg:%s' % k) : v.as_in_context(cpu()) for k, v in arg_params.items()}
save_dict.update({('aux:%s' % k) : v.as_in_context(cpu()) for k, v in aux_params.items()})
param_name = '%s-%04d.params' % (prefix, epoch)
nd.save(param_name, save_dict)
logging.info('Saved checkpoint to \"%s\"', param_name)
def load_checkpoint(prefix, epoch):
"""Load model checkpoint from file.
Parameters
----------
prefix : str
Prefix of model name.
epoch : int
Epoch number of model we would like to load.
Returns
-------
symbol : Symbol
The symbol configuration of computation network.
arg_params : dict of str to NDArray
Model parameter, dict of name to NDArray of net's weights.
aux_params : dict of str to NDArray
Model parameter, dict of name to NDArray of net's auxiliary states.
Notes
-----
- Symbol will be loaded from ``prefix-symbol.json``.
- Parameters will be loaded from ``prefix-epoch.params``.
"""
symbol = sym.load('%s-symbol.json' % prefix)
save_dict = nd.load('%s-%04d.params' % (prefix, epoch))
arg_params = {}
aux_params = {}
for k, v in save_dict.items():
tp, name = k.split(':', 1)
if tp == 'arg':
arg_params[name] = v
if tp == 'aux':
aux_params[name] = v
return (symbol, arg_params, aux_params)
from .callback import LogValidationMetricsCallback # pylint: disable=wrong-import-position
class FeedForward(BASE_ESTIMATOR):
"""Model class of MXNet for training and predicting feedforward nets.
This class is designed for a single-data single output supervised network.
Parameters
----------
symbol : Symbol
The symbol configuration of computation network.
ctx : Context or list of Context, optional
The device context of training and prediction.
To use multi GPU training, pass in a list of gpu contexts.
num_epoch : int, optional
Training parameter, number of training epochs(epochs).
epoch_size : int, optional
Number of batches in a epoch. In default, it is set to
``ceil(num_train_examples / batch_size)``.
optimizer : str or Optimizer, optional
Training parameter, name or optimizer object for training.
initializer : initializer function, optional
Training parameter, the initialization scheme used.
numpy_batch_size : int, optional
The batch size of training data.
Only needed when input array is numpy.
arg_params : dict of str to NDArray, optional
Model parameter, dict of name to NDArray of net's weights.
aux_params : dict of str to NDArray, optional
Model parameter, dict of name to NDArray of net's auxiliary states.
allow_extra_params : boolean, optional
Whether allow extra parameters that are not needed by symbol
to be passed by aux_params and ``arg_params``.
If this is True, no error will be thrown when ``aux_params`` and ``arg_params``
contain more parameters than needed.
begin_epoch : int, optional
The begining training epoch.
kwargs : dict
The additional keyword arguments passed to optimizer.
"""
def __init__(self, symbol, ctx=None,
num_epoch=None, epoch_size=None, optimizer='sgd',
initializer=Uniform(0.01),
numpy_batch_size=128,
arg_params=None, aux_params=None,
allow_extra_params=False,
begin_epoch=0,
**kwargs):
warnings.warn(
'\033[91mmxnet.model.FeedForward has been deprecated. ' + \
'Please use mxnet.mod.Module instead.\033[0m',
DeprecationWarning, stacklevel=2)
if isinstance(symbol, sym.Symbol):
self.symbol = symbol
self.sym_gen = None
else:
assert(callable(symbol))
self.symbol = None
self.sym_gen = symbol
# model parameters
self.arg_params = arg_params
self.aux_params = aux_params
self.allow_extra_params = allow_extra_params
self.argument_checked = False
if self.sym_gen is None:
self._check_arguments()
# basic configuration
if ctx is None:
ctx = [cpu()]
elif isinstance(ctx, Context):
ctx = [ctx]
self.ctx = ctx
# training parameters
self.num_epoch = num_epoch
self.epoch_size = epoch_size
self.kwargs = kwargs.copy()
self.optimizer = optimizer
self.initializer = initializer
self.numpy_batch_size = numpy_batch_size
# internal helper state
self._pred_exec = None
self.begin_epoch = begin_epoch
def _check_arguments(self):
"""verify the argument of the default symbol and user provided parameters"""
if self.argument_checked:
return
assert(self.symbol is not None)
self.argument_checked = True
# check if symbol contain duplicated names.
_check_arguments(self.symbol)
# rematch parameters to delete useless ones
if self.allow_extra_params:
if self.arg_params:
arg_names = set(self.symbol.list_arguments())
self.arg_params = {k : v for k, v in self.arg_params.items()
if k in arg_names}
if self.aux_params:
aux_names = set(self.symbol.list_auxiliary_states())
self.aux_params = {k : v for k, v in self.aux_params.items()
if k in aux_names}
@staticmethod
def _is_data_arg(name):
"""Check if name is a data argument."""
return name.endswith('data') or name.endswith('label')
def _init_params(self, inputs, overwrite=False):
"""Initialize weight parameters and auxiliary states."""
inputs = [x if isinstance(x, DataDesc) else DataDesc(*x) for x in inputs]
input_shapes = {item.name: item.shape for item in inputs}
arg_shapes, _, aux_shapes = self.symbol.infer_shape(**input_shapes)
assert arg_shapes is not None
input_dtypes = {item.name: item.dtype for item in inputs}
arg_dtypes, _, aux_dtypes = self.symbol.infer_type(**input_dtypes)
assert arg_dtypes is not None
arg_names = self.symbol.list_arguments()
input_names = input_shapes.keys()
param_names = [key for key in arg_names if key not in input_names]
aux_names = self.symbol.list_auxiliary_states()
param_name_attrs = [x for x in zip(arg_names, arg_shapes, arg_dtypes)
if x[0] in param_names]
arg_params = {k : nd.zeros(shape=s, dtype=t)
for k, s, t in param_name_attrs}
aux_name_attrs = [x for x in zip(aux_names, aux_shapes, aux_dtypes)
if x[0] in aux_names]
aux_params = {k : nd.zeros(shape=s, dtype=t)
for k, s, t in aux_name_attrs}
for k, v in arg_params.items():
if self.arg_params and k in self.arg_params and (not overwrite):
arg_params[k][:] = self.arg_params[k][:]
else:
self.initializer(k, v)
for k, v in aux_params.items():
if self.aux_params and k in self.aux_params and (not overwrite):
aux_params[k][:] = self.aux_params[k][:]
else:
self.initializer(k, v)
self.arg_params = arg_params
self.aux_params = aux_params
return (arg_names, list(param_names), aux_names)
def __getstate__(self):
this = self.__dict__.copy()
this['_pred_exec'] = None
return this
def __setstate__(self, state):
self.__dict__.update(state)
def _init_predictor(self, input_shapes, type_dict=None):
"""Initialize the predictor module for running prediction."""
if self._pred_exec is not None:
arg_shapes, _, _ = self.symbol.infer_shape(**dict(input_shapes))
assert arg_shapes is not None, "Incomplete input shapes"
pred_shapes = [x.shape for x in self._pred_exec.arg_arrays]
if arg_shapes == pred_shapes:
return
# for now only use the first device
pred_exec = self.symbol.simple_bind(
self.ctx[0], grad_req='null', type_dict=type_dict, **dict(input_shapes))
pred_exec.copy_params_from(self.arg_params, self.aux_params)
_check_arguments(self.symbol)
self._pred_exec = pred_exec
def _init_iter(self, X, y, is_train):
"""Initialize the iterator given input."""
if isinstance(X, (np.ndarray, nd.NDArray)):
if y is None:
if is_train:
raise ValueError('y must be specified when X is numpy.ndarray')
else:
y = np.zeros(X.shape[0])
if not isinstance(y, (np.ndarray, nd.NDArray)):
raise TypeError('y must be ndarray when X is numpy.ndarray')
if X.shape[0] != y.shape[0]:
raise ValueError("The numbers of data points and labels not equal")
if y.ndim == 2 and y.shape[1] == 1:
y = y.flatten()
if y.ndim != 1:
raise ValueError("Label must be 1D or 2D (with 2nd dimension being 1)")
if is_train:
return io.NDArrayIter(X, y, min(X.shape[0], self.numpy_batch_size),
shuffle=is_train, last_batch_handle='roll_over')
else:
return io.NDArrayIter(X, y, min(X.shape[0], self.numpy_batch_size), shuffle=False)
if not isinstance(X, io.DataIter):
raise TypeError('X must be DataIter, NDArray or numpy.ndarray')
return X
def _init_eval_iter(self, eval_data):
"""Initialize the iterator given eval_data."""
if eval_data is None:
return eval_data
if isinstance(eval_data, (tuple, list)) and len(eval_data) == 2:
if eval_data[0] is not None:
if eval_data[1] is None and isinstance(eval_data[0], io.DataIter):
return eval_data[0]
input_data = (np.array(eval_data[0]) if isinstance(eval_data[0], list)
else eval_data[0])
input_label = (np.array(eval_data[1]) if isinstance(eval_data[1], list)
else eval_data[1])
return self._init_iter(input_data, input_label, is_train=True)
else:
raise ValueError("Eval data is NONE")
if not isinstance(eval_data, io.DataIter):
raise TypeError('Eval data must be DataIter, or ' \
'NDArray/numpy.ndarray/list pair (i.e. tuple/list of length 2)')
return eval_data
def predict(self, X, num_batch=None, return_data=False, reset=True):
"""Run the prediction, always only use one device.
Parameters
----------
X : mxnet.DataIter
num_batch : int or None
The number of batch to run. Go though all batches if ``None``.
Returns
-------
y : numpy.ndarray or a list of numpy.ndarray if the network has multiple outputs.
The predicted value of the output.
"""
X = self._init_iter(X, None, is_train=False)
if reset:
X.reset()
data_shapes = X.provide_data
data_names = [x[0] for x in data_shapes]
type_dict = dict((key, value.dtype) for (key, value) in self.arg_params.items())
for x in X.provide_data:
if isinstance(x, DataDesc):
type_dict[x.name] = x.dtype
else:
type_dict[x[0]] = mx_real_t
self._init_predictor(data_shapes, type_dict)
batch_size = X.batch_size
data_arrays = [self._pred_exec.arg_dict[name] for name in data_names]
output_list = [[] for _ in range(len(self._pred_exec.outputs))]
if return_data:
data_list = [[] for _ in X.provide_data]
label_list = [[] for _ in X.provide_label]
i = 0
for batch in X:
_load_data(batch, data_arrays)
self._pred_exec.forward(is_train=False)
padded = batch.pad
real_size = batch_size - padded
for o_list, o_nd in zip(output_list, self._pred_exec.outputs):
o_list.append(o_nd[0:real_size].asnumpy())
if return_data:
for j, x in enumerate(batch.data):
data_list[j].append(x[0:real_size].asnumpy())
for j, x in enumerate(batch.label):
label_list[j].append(x[0:real_size].asnumpy())
i += 1
if num_batch is not None and i == num_batch:
break
outputs = [np.concatenate(x) for x in output_list]
if len(outputs) == 1:
outputs = outputs[0]
if return_data:
data = [np.concatenate(x) for x in data_list]
label = [np.concatenate(x) for x in label_list]
if len(data) == 1:
data = data[0]
if len(label) == 1:
label = label[0]
return outputs, data, label
else:
return outputs
def score(self, X, eval_metric='acc', num_batch=None, batch_end_callback=None, reset=True):
"""Run the model given an input and calculate the score
as assessed by an evaluation metric.
Parameters
----------
X : mxnet.DataIter
eval_metric : metric.metric
The metric for calculating score.
num_batch : int or None
The number of batches to run. Go though all batches if ``None``.
Returns
-------
s : float
The final score.
"""
# setup metric
if not isinstance(eval_metric, metric.EvalMetric):
eval_metric = metric.create(eval_metric)
X = self._init_iter(X, None, is_train=False)
if reset:
X.reset()
data_shapes = X.provide_data
data_names = [x[0] for x in data_shapes]
type_dict = dict((key, value.dtype) for (key, value) in self.arg_params.items())
for x in X.provide_data:
if isinstance(x, DataDesc):
type_dict[x.name] = x.dtype
else:
type_dict[x[0]] = mx_real_t
self._init_predictor(data_shapes, type_dict)
data_arrays = [self._pred_exec.arg_dict[name] for name in data_names]
for i, batch in enumerate(X):
if num_batch is not None and i == num_batch:
break
_load_data(batch, data_arrays)
self._pred_exec.forward(is_train=False)
eval_metric.update(batch.label, self._pred_exec.outputs)
if batch_end_callback is not None:
batch_end_params = BatchEndParam(epoch=0,
nbatch=i,
eval_metric=eval_metric,
locals=locals())
_multiple_callbacks(batch_end_callback, batch_end_params)
return eval_metric.get()[1]
def fit(self, X, y=None, eval_data=None, eval_metric='acc',
epoch_end_callback=None, batch_end_callback=None, kvstore='local', logger=None,
work_load_list=None, monitor=None, eval_end_callback=LogValidationMetricsCallback(),
eval_batch_end_callback=None):
"""Fit the model.
Parameters
----------
X : DataIter, or numpy.ndarray/NDArray
Training data. If `X` is a `DataIter`, the name or (if name not available)
the position of its outputs should match the corresponding variable
names defined in the symbolic graph.
y : numpy.ndarray/NDArray, optional
Training set label.
If X is ``numpy.ndarray`` or `NDArray`, `y` is required to be set.
While y can be 1D or 2D (with 2nd dimension as 1), its first dimension must be
the same as `X`, i.e. the number of data points and labels should be equal.
eval_data : DataIter or numpy.ndarray/list/NDArray pair
If eval_data is numpy.ndarray/list/NDArray pair,
it should be ``(valid_data, valid_label)``.
eval_metric : metric.EvalMetric or str or callable
The evaluation metric. This could be the name of evaluation metric
or a custom evaluation function that returns statistics
based on a minibatch.
epoch_end_callback : callable(epoch, symbol, arg_params, aux_states)
A callback that is invoked at end of each epoch.
This can be used to checkpoint model each epoch.
batch_end_callback: callable(epoch)
A callback that is invoked at end of each batch for purposes of printing.
kvstore: KVStore or str, optional
The KVStore or a string kvstore type: 'local', 'dist_sync', 'dist_async'
In default uses 'local', often no need to change for single machiine.
logger : logging logger, optional
When not specified, default logger will be used.
work_load_list : float or int, optional
The list of work load for different devices,
in the same order as `ctx`.
Note
----
KVStore behavior
- 'local', multi-devices on a single machine, will automatically choose best type.
- 'dist_sync', multiple machines communicating via BSP.
- 'dist_async', multiple machines with asynchronous communication.
"""
data = self._init_iter(X, y, is_train=True)
eval_data = self._init_eval_iter(eval_data)
if self.sym_gen:
self.symbol = self.sym_gen(data.default_bucket_key) # pylint: disable=no-member
self._check_arguments()
self.kwargs["sym"] = self.symbol
arg_names, param_names, aux_names = \
self._init_params(data.provide_data+data.provide_label)
# setup metric
if not isinstance(eval_metric, metric.EvalMetric):
eval_metric = metric.create(eval_metric)
# create kvstore
(kvstore, update_on_kvstore) = _create_kvstore(
kvstore, len(self.ctx), self.arg_params)
param_idx2name = {}
if update_on_kvstore:
param_idx2name.update(enumerate(param_names))
else:
for i, n in enumerate(param_names):
for k in range(len(self.ctx)):
param_idx2name[i*len(self.ctx)+k] = n
self.kwargs["param_idx2name"] = param_idx2name
# init optmizer
if isinstance(self.optimizer, str):
batch_size = data.batch_size
if kvstore and 'dist' in kvstore.type and not '_async' in kvstore.type:
batch_size *= kvstore.num_workers
optimizer = opt.create(self.optimizer,
rescale_grad=(1.0/batch_size),
**(self.kwargs))
elif isinstance(self.optimizer, opt.Optimizer):
optimizer = self.optimizer
# do training
_train_multi_device(self.symbol, self.ctx, arg_names, param_names, aux_names,
self.arg_params, self.aux_params,
begin_epoch=self.begin_epoch, end_epoch=self.num_epoch,
epoch_size=self.epoch_size,
optimizer=optimizer,
train_data=data, eval_data=eval_data,
eval_metric=eval_metric,
epoch_end_callback=epoch_end_callback,
batch_end_callback=batch_end_callback,
kvstore=kvstore, update_on_kvstore=update_on_kvstore,
logger=logger, work_load_list=work_load_list, monitor=monitor,
eval_end_callback=eval_end_callback,
eval_batch_end_callback=eval_batch_end_callback,
sym_gen=self.sym_gen)
def save(self, prefix, epoch=None):
"""Checkpoint the model checkpoint into file.
You can also use `pickle` to do the job if you only work on Python.
The advantage of `load` and `save` (as compared to `pickle`) is that
the resulting file can be loaded from other MXNet language bindings.
One can also directly `load`/`save` from/to cloud storage(S3, HDFS)
Parameters
----------
prefix : str
Prefix of model name.
Notes
-----
- ``prefix-symbol.json`` will be saved for symbol.
- ``prefix-epoch.params`` will be saved for parameters.
"""
if epoch is None:
epoch = self.num_epoch
assert epoch is not None
save_checkpoint(prefix, epoch, self.symbol, self.arg_params, self.aux_params)
@staticmethod
def load(prefix, epoch, ctx=None, **kwargs):
"""Load model checkpoint from file.
Parameters
----------
prefix : str
Prefix of model name.
epoch : int
epoch number of model we would like to load.
ctx : Context or list of Context, optional
The device context of training and prediction.
kwargs : dict
Other parameters for model, including `num_epoch`, optimizer and `numpy_batch_size`.
Returns
-------
model : FeedForward
The loaded model that can be used for prediction.
Notes
-----
- ``prefix-symbol.json`` will be saved for symbol.
- ``prefix-epoch.params`` will be saved for parameters.
"""
symbol, arg_params, aux_params = load_checkpoint(prefix, epoch)
return FeedForward(symbol, ctx=ctx,
arg_params=arg_params, aux_params=aux_params,
begin_epoch=epoch,
**kwargs)
@staticmethod
def create(symbol, X, y=None, ctx=None,
num_epoch=None, epoch_size=None, optimizer='sgd', initializer=Uniform(0.01),
eval_data=None, eval_metric='acc',
epoch_end_callback=None, batch_end_callback=None,
kvstore='local', logger=None, work_load_list=None,
eval_end_callback=LogValidationMetricsCallback(),
eval_batch_end_callback=None, **kwargs):
"""Functional style to create a model.
This function is more consistent with functional
languages such as R, where mutation is not allowed.
Parameters
----------
symbol : Symbol
The symbol configuration of a computation network.
X : DataIter
Training data.
y : numpy.ndarray, optional
If `X` is a ``numpy.ndarray``, `y` must be set.
ctx : Context or list of Context, optional
The device context of training and prediction.
To use multi-GPU training, pass in a list of GPU contexts.
num_epoch : int, optional
The number of training epochs(epochs).
epoch_size : int, optional
Number of batches in a epoch. In default, it is set to
``ceil(num_train_examples / batch_size)``.
optimizer : str or Optimizer, optional
The name of the chosen optimizer, or an optimizer object, used for training.
initializer : initializer function, optional
The initialization scheme used.
eval_data : DataIter or numpy.ndarray pair
If `eval_set` is ``numpy.ndarray`` pair, it should
be (`valid_data`, `valid_label`).
eval_metric : metric.EvalMetric or str or callable
The evaluation metric. Can be the name of an evaluation metric
or a custom evaluation function that returns statistics
based on a minibatch.
epoch_end_callback : callable(epoch, symbol, arg_params, aux_states)
A callback that is invoked at end of each epoch.
This can be used to checkpoint model each epoch.
batch_end_callback: callable(epoch)
A callback that is invoked at end of each batch for print purposes.
kvstore: KVStore or str, optional
The KVStore or a string kvstore type: 'local', 'dist_sync', 'dis_async'.
Defaults to 'local', often no need to change for single machine.
logger : logging logger, optional
When not specified, default logger will be used.
work_load_list : list of float or int, optional
The list of work load for different devices,
in the same order as `ctx`.
"""
model = FeedForward(symbol, ctx=ctx, num_epoch=num_epoch,
epoch_size=epoch_size,
optimizer=optimizer, initializer=initializer, **kwargs)
model.fit(X, y, eval_data=eval_data, eval_metric=eval_metric,
epoch_end_callback=epoch_end_callback,
batch_end_callback=batch_end_callback,
kvstore=kvstore,
logger=logger,
work_load_list=work_load_list,
eval_end_callback=eval_end_callback,
eval_batch_end_callback=eval_batch_end_callback)
return model
| apache-2.0 |
thientu/scikit-learn | sklearn/covariance/tests/test_robust_covariance.py | 213 | 3359 | # Author: Alexandre Gramfort <[email protected]>
# Gael Varoquaux <[email protected]>
# Virgile Fritsch <[email protected]>
#
# License: BSD 3 clause
import numpy as np
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_raises
from sklearn.utils.validation import NotFittedError
from sklearn import datasets
from sklearn.covariance import empirical_covariance, MinCovDet, \
EllipticEnvelope
X = datasets.load_iris().data
X_1d = X[:, 0]
n_samples, n_features = X.shape
def test_mcd():
# Tests the FastMCD algorithm implementation
# Small data set
# test without outliers (random independent normal data)
launch_mcd_on_dataset(100, 5, 0, 0.01, 0.1, 80)
# test with a contaminated data set (medium contamination)
launch_mcd_on_dataset(100, 5, 20, 0.01, 0.01, 70)
# test with a contaminated data set (strong contamination)
launch_mcd_on_dataset(100, 5, 40, 0.1, 0.1, 50)
# Medium data set
launch_mcd_on_dataset(1000, 5, 450, 0.1, 0.1, 540)
# Large data set
launch_mcd_on_dataset(1700, 5, 800, 0.1, 0.1, 870)
# 1D data set
launch_mcd_on_dataset(500, 1, 100, 0.001, 0.001, 350)
def launch_mcd_on_dataset(n_samples, n_features, n_outliers, tol_loc, tol_cov,
tol_support):
rand_gen = np.random.RandomState(0)
data = rand_gen.randn(n_samples, n_features)
# add some outliers
outliers_index = rand_gen.permutation(n_samples)[:n_outliers]
outliers_offset = 10. * \
(rand_gen.randint(2, size=(n_outliers, n_features)) - 0.5)
data[outliers_index] += outliers_offset
inliers_mask = np.ones(n_samples).astype(bool)
inliers_mask[outliers_index] = False
pure_data = data[inliers_mask]
# compute MCD by fitting an object
mcd_fit = MinCovDet(random_state=rand_gen).fit(data)
T = mcd_fit.location_
S = mcd_fit.covariance_
H = mcd_fit.support_
# compare with the estimates learnt from the inliers
error_location = np.mean((pure_data.mean(0) - T) ** 2)
assert(error_location < tol_loc)
error_cov = np.mean((empirical_covariance(pure_data) - S) ** 2)
assert(error_cov < tol_cov)
assert(np.sum(H) >= tol_support)
assert_array_almost_equal(mcd_fit.mahalanobis(data), mcd_fit.dist_)
def test_mcd_issue1127():
# Check that the code does not break with X.shape = (3, 1)
# (i.e. n_support = n_samples)
rnd = np.random.RandomState(0)
X = rnd.normal(size=(3, 1))
mcd = MinCovDet()
mcd.fit(X)
def test_outlier_detection():
rnd = np.random.RandomState(0)
X = rnd.randn(100, 10)
clf = EllipticEnvelope(contamination=0.1)
assert_raises(NotFittedError, clf.predict, X)
assert_raises(NotFittedError, clf.decision_function, X)
clf.fit(X)
y_pred = clf.predict(X)
decision = clf.decision_function(X, raw_values=True)
decision_transformed = clf.decision_function(X, raw_values=False)
assert_array_almost_equal(
decision, clf.mahalanobis(X))
assert_array_almost_equal(clf.mahalanobis(X), clf.dist_)
assert_almost_equal(clf.score(X, np.ones(100)),
(100 - y_pred[y_pred == -1].size) / 100.)
assert(sum(y_pred == -1) == sum(decision_transformed < 0))
| bsd-3-clause |
nobel1154/edX-Learning-From-Data-Solutions | Final/Python/by_Mark_B2/hw6.py | 3 | 5511 | '''
Created on
@author: Mark
'''
import numpy as np
import string
import scipy.linalg
from scipy.spatial.distance import euclidean
import matplotlib.pyplot as plt
# from cvxopt import matrix
def readIn(name):
return np.fromfile(name, np.float64, sep=' ').reshape((-1, 3))
def dataSet(d):
def f(x):
x1, x2 = x[0], x[1]
return [1, x1, x2, x1*x1, x2*x2, x1*x2, abs(x1-x2), abs(x1+x2)]
return np.apply_along_axis(f, axis=1, arr=d), d[:,-1:]
def regression((X, Y)):
return scipy.linalg.pinv(X).dot(Y)
def weightDecayRegression((X, Y), l=0.):
I = np.identity(len(X[0]))
inv = scipy.linalg.inv(X.T.dot(X) + l*I)
return inv.dot(X.T).dot(Y)
def sign(array):
array[array >= 0] = 1.
array[array < 0] = -1.
return array
def plotData(data, w):
x = readIn('c://users//mark//out.dta')
z = sign(data[0].dot(w))
mis = np.equal(z, -data[1]).T
pos = np.equal(z, data[1]).T
# mis = np.equal(z, -data[1]).T
p = x[mis[0,:]]
plt.plot(p[:,0], p[:,1], 'ob')
p = x[pos[0,:]]
plt.plot(p[:,0], p[:,1], 'og')
# pidx = [i for i, x in enumerate(idx[0])]
# plt.plot(p[:,1], p[:,2], 'or')
plt.show()
# return np.sum(np.equal(sign(z), -data[1])) / float(len(data[0]))
# return np.sum(np.equal(sign(z), -data[1])) / float(len(data[0]))
def classificationError(data, w):
return np.sum(np.equal(sign(data[0].dot(w)), -data[1])) / float(len(data[0]))
def findClosest(options, computed):
def f(a):
return euclidean(a, computed)
distances = np.apply_along_axis(f, axis=1, arr=options)
idx = np.argmin(distances)
return options[idx], distances[idx]
def problem2_6():
name = ['c://users//mark//in.dta', 'c://users//mark//out.dta']
train = dataSet(readIn(name[0]))
test = dataSet(readIn(name[1]))
w = weightDecayRegression(train, 0)
# print w
computed2 = [classificationError(train, w), classificationError(test, w)]
print 'Without:', computed2
for k in range(-5, 4):
w = weightDecayRegression(train, 10 ** k)
print k, classificationError(train, w), classificationError(test, w)#, w
if k == -3:
computed3 = [classificationError(train, w), classificationError(test, w)]
if k == 3:
computed4 = [classificationError(train, w), classificationError(test, w)]
options2 = [[.03, .08], [.03, .1], [.04, .09], [.04, .11], [.05, .1]]
options3 = [[.01, .02], [.02, .04], [.02, .06], [.03, .08], [.03, .1]]
options4 = [[.2, .2], [.2, .3], [.3, .3], [.3, .4], [.4, .4]]
print 'Problem 2', computed2, findClosest(options2, computed2)
print 'Problem 3', computed3, findClosest(options3, computed3)
print 'Problem 4', computed4, findClosest(options4, computed4)
print 'Problem 5', -1, .056
# problem2_6()
def multiplyX(v):
return np.insert(v[0:-1], 0, 0)
def legendre(P, n):
return 1. / (n + 1) * ((2. * n + 1) * multiplyX(P[n]) - n * P[n - 1])
def init(N):
P = np.zeros((N+1, N+1))
P[0,0] = 1.
P[1,1] = 1.
for n in range(1,N):
P[n+1] = legendre(P, n)
return P
def createSet(P, Q, C, Q_o):
# H = np.empty_like(['*',P[1]])
H = np.zeros((0, 12))
# print H.shape
# print P[1].shape
# np.reshape(H, (0, 11))
for q in range(Q+1):
symbol = 99
if q >= Q_o:
symbol = C
if symbol != 0:
v = np.insert(P[q][:], 0, symbol)
# print v.shape
H = scipy.append(H, [v], axis=0)
return H
def problem7():
'''
P(n+1) = 1/(n+1) * ((2*n+1)*x*P(n) - n*P(n-1))
'''
P = init(10)
H = createSet(P, 10, 1, 3)
print 'H(10,1,3)'
print np.sum(H, axis=0)
H = createSet(P, 10, 1, 4)
print 'H(10,1,4)'
print np.sum(H, axis=0)
H = createSet(P, 1, 99, 5)
# print H.shape
print 'H(3,99,5)'
print np.sum(H, axis=0)
# print P
# for n in range(10):
# print P[n].dot(P[n+1])
# problem7()
def problem8():
''' Number operations in backpropagation '''
L = 2
d = [5, 3, 1]
N = 0
''' deltas computation '''
for l in range(L, 0, -1):
for i in range (d[l-1]):
''' delta_i computation '''
for j in range(1, d[l]+1):
''' sum delta_j * w_ij '''
N += 1
print N
print N * 3
# problem8()
def weigthNumber(D):
''' 10 inputs and 1 output '''
n = 0
d_prev = 10
for d in D:
n += d_prev * (d - 1)
d_prev = d
n += d_prev
return n
def problem9_10():
print weigthNumber([36])
line18 = [2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2]
print sum(line18)
print weigthNumber(line18)
return
print weigthNumber([18, 18])
print weigthNumber([17, 19])
print weigthNumber([19, 17]) # 501
print weigthNumber([20, 16]) # 506
print weigthNumber([21, 15]) # 509
print weigthNumber([22, 14]) # 510
print weigthNumber([23, 13]) # 509
print weigthNumber([22, 12, 2]) # 466
print weigthNumber([21, 13, 2]) # 467
print weigthNumber([20, 14, 2]) # 466
return
print weigthNumber([12, 12, 12])
print
print weigthNumber([11, 13, 12])
print weigthNumber([11, 12, 13])
print weigthNumber([12, 11, 13])
print weigthNumber([13, 11, 12])
print weigthNumber([13, 12, 11])
print weigthNumber([12, 13, 11])
print weigthNumber([11, 14, 11])
print weigthNumber([12, 14, 10])
print weigthNumber([12, 15, 9]) # 407
print weigthNumber([12, 16, 8]) # 410
print weigthNumber([13, 15, 8]) # 415
print weigthNumber([14, 15, 7]) # 423
print weigthNumber([13, 16, 7]) # 418
print weigthNumber([15, 15, 6]) # 431
print weigthNumber([14, 16, 6]) # 426
# problem9_10()
def test():
a = matrix([[1,1], [0,0]])
print a
# test() | apache-2.0 |
nolanliou/tensorflow | tensorflow/contrib/learn/python/learn/learn_io/pandas_io_test.py | 111 | 7865 | # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for pandas_io."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.contrib.learn.python.learn.learn_io import pandas_io
from tensorflow.python.framework import errors
from tensorflow.python.platform import test
from tensorflow.python.training import coordinator
from tensorflow.python.training import queue_runner_impl
# pylint: disable=g-import-not-at-top
try:
import pandas as pd
HAS_PANDAS = True
except ImportError:
HAS_PANDAS = False
class PandasIoTest(test.TestCase):
def makeTestDataFrame(self):
index = np.arange(100, 104)
a = np.arange(4)
b = np.arange(32, 36)
x = pd.DataFrame({'a': a, 'b': b}, index=index)
y = pd.Series(np.arange(-32, -28), index=index)
return x, y
def callInputFnOnce(self, input_fn, session):
results = input_fn()
coord = coordinator.Coordinator()
threads = queue_runner_impl.start_queue_runners(session, coord=coord)
result_values = session.run(results)
coord.request_stop()
coord.join(threads)
return result_values
def testPandasInputFn_IndexMismatch(self):
if not HAS_PANDAS:
return
x, _ = self.makeTestDataFrame()
y_noindex = pd.Series(np.arange(-32, -28))
with self.assertRaises(ValueError):
pandas_io.pandas_input_fn(
x, y_noindex, batch_size=2, shuffle=False, num_epochs=1)
def testPandasInputFn_ProducesExpectedOutputs(self):
if not HAS_PANDAS:
return
with self.test_session() as session:
x, y = self.makeTestDataFrame()
input_fn = pandas_io.pandas_input_fn(
x, y, batch_size=2, shuffle=False, num_epochs=1)
features, target = self.callInputFnOnce(input_fn, session)
self.assertAllEqual(features['a'], [0, 1])
self.assertAllEqual(features['b'], [32, 33])
self.assertAllEqual(target, [-32, -31])
def testPandasInputFn_ProducesOutputsForLargeBatchAndMultipleEpochs(self):
if not HAS_PANDAS:
return
with self.test_session() as session:
index = np.arange(100, 102)
a = np.arange(2)
b = np.arange(32, 34)
x = pd.DataFrame({'a': a, 'b': b}, index=index)
y = pd.Series(np.arange(-32, -30), index=index)
input_fn = pandas_io.pandas_input_fn(
x, y, batch_size=128, shuffle=False, num_epochs=2)
results = input_fn()
coord = coordinator.Coordinator()
threads = queue_runner_impl.start_queue_runners(session, coord=coord)
features, target = session.run(results)
self.assertAllEqual(features['a'], [0, 1, 0, 1])
self.assertAllEqual(features['b'], [32, 33, 32, 33])
self.assertAllEqual(target, [-32, -31, -32, -31])
with self.assertRaises(errors.OutOfRangeError):
session.run(results)
coord.request_stop()
coord.join(threads)
def testPandasInputFn_ProducesOutputsWhenDataSizeNotDividedByBatchSize(self):
if not HAS_PANDAS:
return
with self.test_session() as session:
index = np.arange(100, 105)
a = np.arange(5)
b = np.arange(32, 37)
x = pd.DataFrame({'a': a, 'b': b}, index=index)
y = pd.Series(np.arange(-32, -27), index=index)
input_fn = pandas_io.pandas_input_fn(
x, y, batch_size=2, shuffle=False, num_epochs=1)
results = input_fn()
coord = coordinator.Coordinator()
threads = queue_runner_impl.start_queue_runners(session, coord=coord)
features, target = session.run(results)
self.assertAllEqual(features['a'], [0, 1])
self.assertAllEqual(features['b'], [32, 33])
self.assertAllEqual(target, [-32, -31])
features, target = session.run(results)
self.assertAllEqual(features['a'], [2, 3])
self.assertAllEqual(features['b'], [34, 35])
self.assertAllEqual(target, [-30, -29])
features, target = session.run(results)
self.assertAllEqual(features['a'], [4])
self.assertAllEqual(features['b'], [36])
self.assertAllEqual(target, [-28])
with self.assertRaises(errors.OutOfRangeError):
session.run(results)
coord.request_stop()
coord.join(threads)
def testPandasInputFn_OnlyX(self):
if not HAS_PANDAS:
return
with self.test_session() as session:
x, _ = self.makeTestDataFrame()
input_fn = pandas_io.pandas_input_fn(
x, y=None, batch_size=2, shuffle=False, num_epochs=1)
features = self.callInputFnOnce(input_fn, session)
self.assertAllEqual(features['a'], [0, 1])
self.assertAllEqual(features['b'], [32, 33])
def testPandasInputFn_ExcludesIndex(self):
if not HAS_PANDAS:
return
with self.test_session() as session:
x, y = self.makeTestDataFrame()
input_fn = pandas_io.pandas_input_fn(
x, y, batch_size=2, shuffle=False, num_epochs=1)
features, _ = self.callInputFnOnce(input_fn, session)
self.assertFalse('index' in features)
def assertInputsCallableNTimes(self, input_fn, session, n):
inputs = input_fn()
coord = coordinator.Coordinator()
threads = queue_runner_impl.start_queue_runners(session, coord=coord)
for _ in range(n):
session.run(inputs)
with self.assertRaises(errors.OutOfRangeError):
session.run(inputs)
coord.request_stop()
coord.join(threads)
def testPandasInputFn_RespectsEpoch_NoShuffle(self):
if not HAS_PANDAS:
return
with self.test_session() as session:
x, y = self.makeTestDataFrame()
input_fn = pandas_io.pandas_input_fn(
x, y, batch_size=4, shuffle=False, num_epochs=1)
self.assertInputsCallableNTimes(input_fn, session, 1)
def testPandasInputFn_RespectsEpoch_WithShuffle(self):
if not HAS_PANDAS:
return
with self.test_session() as session:
x, y = self.makeTestDataFrame()
input_fn = pandas_io.pandas_input_fn(
x, y, batch_size=4, shuffle=True, num_epochs=1)
self.assertInputsCallableNTimes(input_fn, session, 1)
def testPandasInputFn_RespectsEpoch_WithShuffleAutosize(self):
if not HAS_PANDAS:
return
with self.test_session() as session:
x, y = self.makeTestDataFrame()
input_fn = pandas_io.pandas_input_fn(
x, y, batch_size=2, shuffle=True, queue_capacity=None, num_epochs=2)
self.assertInputsCallableNTimes(input_fn, session, 4)
def testPandasInputFn_RespectsEpochUnevenBatches(self):
if not HAS_PANDAS:
return
x, y = self.makeTestDataFrame()
with self.test_session() as session:
input_fn = pandas_io.pandas_input_fn(
x, y, batch_size=3, shuffle=False, num_epochs=1)
# Before the last batch, only one element of the epoch should remain.
self.assertInputsCallableNTimes(input_fn, session, 2)
def testPandasInputFn_Idempotent(self):
if not HAS_PANDAS:
return
x, y = self.makeTestDataFrame()
for _ in range(2):
pandas_io.pandas_input_fn(
x, y, batch_size=2, shuffle=False, num_epochs=1)()
for _ in range(2):
pandas_io.pandas_input_fn(
x, y, batch_size=2, shuffle=True, num_epochs=1)()
if __name__ == '__main__':
test.main()
| apache-2.0 |
weakties/infrastructure | component_big_twitter/twitter_Parsing_usingPandas.py | 1 | 2436 | # -*- coding: utf-8 -*-
"""
Created on Tue Mar 14 10:35:57 2017
@author: guptaj
"""
import pandas as pd
import sframe
# tweet-preprocessor 0.5.0
import preprocessor as p
import networkx as nx
#sf_full = sframe.SFrame.read_csv('data/01-raw/mini_sample.csv', header=False)
pd_full = pd.read_csv('data/01-raw/mini_sample.csv',encoding = 'utf-8')
#
#sf_full = sframe.SFrame(data = pd_full)
#p = ttp.Parser()
#print sf_full.head()
#print sf_full['X5'][1]
sample_tweet1 = "@bärnettedmond, you now support #IvoWertzel's tweet parser! https://github.com/edburnett/"
sample_tweet2 = "@bärnettedmond, @jysh support #IvoWertzel's tweet parser! https://github.com/edburnett/"
sample_tweet3 = u'RT @aukia: (In Finnish) Ilmoittaudu 28.5 ""Kuinka ostaa ketterästi julkishallinnossa"" aamiaisseminaariin. @codento,Hansel,MML,HY http://t.co…"
#tweet_encoded = sample_tweet.encode('utf-8')
#tweet = sf_full['X5'][11]
#result = p.parse(tweet_encoded)
#print p.clean('Preprocessor is #awesome 👍 https://github.com/s/preprocessor')
#print p.clean(tweet)
#print result.userss
#parsed_tweet = p.parse(tweet)
#
#print parsed_tweet.mentions
#print parsed_tweet.hashtags
#mention = p.parse(sample_tweet2).mentions
#print mention[0].match.strip('@')
#for indvmention in mention:
# print indvmention.match
def get_usermentions(singletweet):
return p.parse(singletweet).mentions
#print get_usermentions(sample_tweet2)
#print sf_full.dtype()
#sf_full_2 = sf_full['X5'].apply(get_usermentions)
#sf_full_trial = sf_full[[14]]
#print len(sf_full)
#print range(len(sf_full))
implicit_network = nx.DiGraph()
for i in range(len(pd_full)):
try:
mentioned = get_usermentions(pd_full['X5'][i])
# mentioned = p.parse(sf_full['X5'][i]).mentions
if mentioned != None:
for indvmention in mentioned:
print pd_full['X4'][i],indvmention.match.strip('@')
if not implicit_network.has_edge(pd_full['X4'][i],indvmention.match.strip('@')):
implicit_network.add_edge(pd_full['X4'][i],indvmention.match.strip('@'), weight = 0)
implicit_network[pd_full['X4'][i]][indvmention.match.strip('@')]['weight'] += 1
except Exception as e:
print i
print "the error is"
print e
nx.readwrite.gexf.write_gexf(implicit_network,'data/02-network/trial_network2.gexf', encoding='utf-8') | mit |
geodynamics/specfem3d | EXTERNAL_PACKAGES_coupled_with_SPECFEM3D/AxiSEM_for_SPECFEM3D/AxiSEM_modif_for_coupling_with_specfem/SOLVER/UTILS/hemispherical_model.py | 3 | 2107 | #!/usr/bin/env python
import matplotlib.pyplot as plt
import numpy as np
# Define layer boundaries (one more than layers)
layers = [1217.5, 1190., 1160., 1100.]
# Define angles of hemispherical boundaries with a linearly interpolated region in between
angles = [[45., 55.], [50., 60.], [55., 65.]]
vp_in = np.ones((len(layers) - 1, len(angles)))
# define perturbations in each layer and hemisphere
vp_in[0,0] = 0.
vp_in[0,1] = 2.
vp_in[1,0] = 0.
vp_in[1,1] = 5.
vp_in[2,0] = 0.
vp_in[2,1] = 2.
# number of points in theta direction (fine sampling usefull in combination with nearest
# neighbour interpolation)
ntheta = 721
dtheta = 180. / (ntheta - 1)
nlayers = len(layers) - 1
# number of radial point per layer. with nearest neighbour interpolation 2 is fine
nlpl = 2
# distance of points from layer boundaries (e.g. to avoid perturbations on both sides of a
# discontinuity)
dr = .01
f = open('model.sph', 'w')
vp = 0.
vs = 0.
rho = 0.
# total number of points. +1 for the additional zero layer at the bottom
npoints = (nlayers * nlpl + 1) * ntheta
print >> f, npoints
# write model file
for l in np.arange(nlayers):
for r in np.linspace(layers[l] - dr, layers[l+1] + dr, nlpl):
for theta in np.linspace(0., 180., ntheta):
if theta < angles[l][0]:
vp = vp_in[l,0]
elif theta > angles[l][1]:
vp = vp_in[l,1]
else:
# linear interpolation in the central region
vp = vp_in[l,0] \
+ (vp_in[l,1] - vp_in[l,0]) / (angles[l][1] - angles[l][0]) \
* (theta - angles[l][0])
print >> f, '%7.2f %6.2f %5.2f %5.2f %5.2f ' % (r, theta, vp, vs, rho)
# additional zero (relative perturbation!) layer at the bottom to make sure the last layer
# does not extent to the next element boundary. Same approach might be usefull for the
# first layer, but in this case it is the ICB anyway
vp = 0.
r = layers[-1] - dr
for theta in np.linspace(0., 180., ntheta):
print >> f, '%7.2f %6.2f %5.2f %5.2f %5.2f ' % (r, theta, vp, vs, rho)
f.close
| gpl-3.0 |
lokeshpancharia/data-science-from-scratch | code/recommender_systems.py | 60 | 6291 | from __future__ import division
import math, random
from collections import defaultdict, Counter
from linear_algebra import dot
users_interests = [
["Hadoop", "Big Data", "HBase", "Java", "Spark", "Storm", "Cassandra"],
["NoSQL", "MongoDB", "Cassandra", "HBase", "Postgres"],
["Python", "scikit-learn", "scipy", "numpy", "statsmodels", "pandas"],
["R", "Python", "statistics", "regression", "probability"],
["machine learning", "regression", "decision trees", "libsvm"],
["Python", "R", "Java", "C++", "Haskell", "programming languages"],
["statistics", "probability", "mathematics", "theory"],
["machine learning", "scikit-learn", "Mahout", "neural networks"],
["neural networks", "deep learning", "Big Data", "artificial intelligence"],
["Hadoop", "Java", "MapReduce", "Big Data"],
["statistics", "R", "statsmodels"],
["C++", "deep learning", "artificial intelligence", "probability"],
["pandas", "R", "Python"],
["databases", "HBase", "Postgres", "MySQL", "MongoDB"],
["libsvm", "regression", "support vector machines"]
]
popular_interests = Counter(interest
for user_interests in users_interests
for interest in user_interests).most_common()
def most_popular_new_interests(user_interests, max_results=5):
suggestions = [(interest, frequency)
for interest, frequency in popular_interests
if interest not in user_interests]
return suggestions[:max_results]
#
# user-based filtering
#
def cosine_similarity(v, w):
return dot(v, w) / math.sqrt(dot(v, v) * dot(w, w))
unique_interests = sorted(list({ interest
for user_interests in users_interests
for interest in user_interests }))
def make_user_interest_vector(user_interests):
"""given a list of interests, produce a vector whose i-th element is 1
if unique_interests[i] is in the list, 0 otherwise"""
return [1 if interest in user_interests else 0
for interest in unique_interests]
user_interest_matrix = map(make_user_interest_vector, users_interests)
user_similarities = [[cosine_similarity(interest_vector_i, interest_vector_j)
for interest_vector_j in user_interest_matrix]
for interest_vector_i in user_interest_matrix]
def most_similar_users_to(user_id):
pairs = [(other_user_id, similarity) # find other
for other_user_id, similarity in # users with
enumerate(user_similarities[user_id]) # nonzero
if user_id != other_user_id and similarity > 0] # similarity
return sorted(pairs, # sort them
key=lambda (_, similarity): similarity, # most similar
reverse=True) # first
def user_based_suggestions(user_id, include_current_interests=False):
# sum up the similarities
suggestions = defaultdict(float)
for other_user_id, similarity in most_similar_users_to(user_id):
for interest in users_interests[other_user_id]:
suggestions[interest] += similarity
# convert them to a sorted list
suggestions = sorted(suggestions.items(),
key=lambda (_, weight): weight,
reverse=True)
# and (maybe) exclude already-interests
if include_current_interests:
return suggestions
else:
return [(suggestion, weight)
for suggestion, weight in suggestions
if suggestion not in users_interests[user_id]]
#
# Item-Based Collaborative Filtering
#
interest_user_matrix = [[user_interest_vector[j]
for user_interest_vector in user_interest_matrix]
for j, _ in enumerate(unique_interests)]
interest_similarities = [[cosine_similarity(user_vector_i, user_vector_j)
for user_vector_j in interest_user_matrix]
for user_vector_i in interest_user_matrix]
def most_similar_interests_to(interest_id):
similarities = interest_similarities[interest_id]
pairs = [(unique_interests[other_interest_id], similarity)
for other_interest_id, similarity in enumerate(similarities)
if interest_id != other_interest_id and similarity > 0]
return sorted(pairs,
key=lambda (_, similarity): similarity,
reverse=True)
def item_based_suggestions(user_id, include_current_interests=False):
suggestions = defaultdict(float)
user_interest_vector = user_interest_matrix[user_id]
for interest_id, is_interested in enumerate(user_interest_vector):
if is_interested == 1:
similar_interests = most_similar_interests_to(interest_id)
for interest, similarity in similar_interests:
suggestions[interest] += similarity
suggestions = sorted(suggestions.items(),
key=lambda (_, similarity): similarity,
reverse=True)
if include_current_interests:
return suggestions
else:
return [(suggestion, weight)
for suggestion, weight in suggestions
if suggestion not in users_interests[user_id]]
if __name__ == "__main__":
print "Popular Interests"
print popular_interests
print
print "Most Popular New Interests"
print "already like:", ["NoSQL", "MongoDB", "Cassandra", "HBase", "Postgres"]
print most_popular_new_interests(["NoSQL", "MongoDB", "Cassandra", "HBase", "Postgres"])
print
print "already like:", ["R", "Python", "statistics", "regression", "probability"]
print most_popular_new_interests(["R", "Python", "statistics", "regression", "probability"])
print
print "User based similarity"
print "most similar to 0"
print most_similar_users_to(0)
print "Suggestions for 0"
print user_based_suggestions(0)
print
print "Item based similarity"
print "most similar to 'Big Data'"
print most_similar_interests_to(0)
print
print "suggestions for user 0"
print item_based_suggestions(0)
| unlicense |
danblick/robocar | scripts/camera_cal.py | 1 | 2599 | import argparse
import os
import glob
import json
import cv2
import numpy as np
import matplotlib
matplotlib.use("AGG")
import matplotlib.pyplot as plt
import matplotlib.image as mpimg
def base(filepath):
filename = os.path.basename(filepath)
x, _ = os.path.splitext(filename)
return x
def run(args):
images = glob.glob(os.path.join(args.dirpath, "calibration*.jpg"))
objp = np.zeros((args.nx * args.ny, 3), np.float32)
objp[:, :2] = np.mgrid[0:args.nx, 0:args.ny].T.reshape(-1, 2)
# Arrays to store object points and image points from all the images.
objpoints = [] # 3d points in real world space
imgpoints = [] # 2d points in image plane.
img_size = (640, 480)
for idx, fname in enumerate(images):
img = cv2.imread(fname)
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
ret, corners = cv2.findChessboardCorners(gray, (args.nx, args.ny), None)
if ret == True:
objpoints.append(objp)
imgpoints.append(corners)
cv2.drawChessboardCorners(img, (args.nx, args.ny), corners, ret)
write_name = os.path.join(args.outpath, "corners_found.%s.png" % base(fname))
cv2.imwrite(write_name, img)
else:
os.unlink(fname)
cv2.destroyAllWindows()
ret, mtx, dist, rvecs, tvecs = cv2.calibrateCamera(objpoints, imgpoints,
img_size, None, None)
images = glob.glob(os.path.join(args.dirpath, "calibration*.jpg"))
for idx, fname in enumerate(images):
img = cv2.imread(fname)
dst = cv2.undistort(img, mtx, dist, None, mtx)
fig, (ax1, ax2) = plt.subplots(1, 2, figsize=(24, 9))
fig.tight_layout()
ax1.imshow(img)
ax1.set_title('Original Image', fontsize=30)
ax2.imshow(dst)
ax2.set_title('Undistorted Image', fontsize=30)
fig.savefig(os.path.join(args.outpath, "undistort.%s.png" % base(fname)))
plt.close(fig)
dist_pickle = {}
dist_pickle["mtx"] = mtx.tolist()
dist_pickle["dist"] = dist.tolist()
with open(os.path.join(args.outpath, "calibration.json"), "wb") as outfile:
json.dump(dist_pickle, outfile, indent=2)
def main():
parser = argparse.ArgumentParser()
parser.add_argument("dirpath")
parser.add_argument("outpath")
parser.add_argument("--nx", type=int, default=10)
parser.add_argument("--ny", type=int, default=7)
args = parser.parse_args()
if not os.path.isdir(args.outpath):
os.makedirs(args.outpath)
run(args)
if __name__ == "__main__":
main()
| mit |
mph-/lcapy | lcapy/sexpr.py | 1 | 14849 | """This module provides the LaplaceDomainExpression class to represent
s-domain (Laplace domain) expressions.
Copyright 2014--2021 Michael Hayes, UCECE
"""
from __future__ import division
from .domains import LaplaceDomain
from .inverse_laplace import inverse_laplace_transform
from .sym import ssym, tsym, j, pi, sympify
from .ratfun import _zp2tf, _pr2tf, Ratfun
from .expr import Expr, symbol, expr, ExprDict, exprcontainer, expr_make
from .units import u as uu
from .functions import sqrt
import numpy as np
from sympy import limit, exp, Poly, Integral, div, oo, Eq, Expr as symExpr
__all__ = ('sexpr', 'zp2tf', 'tf', 'pr2tf')
class LaplaceDomainExpression(LaplaceDomain, Expr):
"""s-domain expression or symbol."""
var = ssym
def __init__(self, val, **assumptions):
check = assumptions.pop('check', True)
super(LaplaceDomainExpression, self).__init__(val, **assumptions)
expr = self.expr
if check and expr.has(tsym) and not expr.has(Integral):
raise ValueError(
's-domain expression %s cannot depend on t' % expr)
def as_expr(self):
return LaplaceDomainExpression(self)
@classmethod
def from_poles_residues(cls, poles, residues):
"""Create a transfer function from lists of poles and residues.
See also from_zeros_poles_gain, from_numer_denom"""
return cls(pr2tf(poles, residues, cls.var), causal=True)
@classmethod
def from_zeros_poles_gain(cls, zeros, poles, K=1):
"""Create a transfer function from lists of zeros and poles,
and from a constant gain.
See also from_poles_residues, from_numer_denom"""
return cls(zp2tf(zeros, poles, K, cls.var), causal=True)
@classmethod
def from_numer_denom(cls, numer, denom):
"""Create a transfer function from lists of the coefficient
for the numerator and denominator.
See also from_zeros_poles_gain, from_poles_residues"""
return cls(tf(numer, denom, cls.var), causal=True)
def tdifferentiate(self):
"""Differentiate in t-domain (multiply by s)."""
return self.__class__(self.expr * self.var, **self.assumptions)
def tintegrate(self):
"""Integrate in t-domain (divide by s)."""
return self.__class__(self.expr / self.var, **self.assumptions)
def delay(self, T):
"""Apply delay of T seconds by multiplying by exp(-s T)."""
T = self.__class__(T)
return self.__class__(self.expr * exp(-s * T))
@property
def jomega(self):
"""Return expression with s = j omega."""
from .symbols import jomega
return self.subs(self.var, jomega)
def post_initial_value(self):
"""Determine post-initial value at t = 0+."""
return self.__class__(limit(self.expr * self.var, self.var, oo))
def final_value(self):
"""Determine value at t = oo."""
return self.__class__(limit(self.expr * self.var, self.var, 0))
def inverse_laplace(self, **assumptions):
"""Attempt inverse Laplace transform.
If causal=True the response is zero for t < 0 and
the result is multiplied by Heaviside(t)
If ac=True or dc=True the result is extrapolated for t < 0.
Otherwise the result is only known for t >= 0.
"""
assumptions = self.assumptions.merge(**assumptions)
result = inverse_laplace_transform(self.expr, self.var, tsym,
**assumptions)
return self.change(result, domain='time', units_scale=uu.Hz, **assumptions)
def ILT(self, **assumptions):
"""Attempt inverse Laplace transform.
If causal=True the response is zero for t < 0 and
the result is multiplied by Heaviside(t)
If ac=True or dc=True the result is extrapolated for t < 0.
Otherwise the result is only known for t >= 0.
"""
return self.inverse_laplace(**assumptions)
def time(self, **assumptions):
"""Convert to time domain.
If causal=True the response is zero for t < 0 and
the result is multiplied by Heaviside(t)
If ac=True or dc=True the result is extrapolated for t < 0.
Otherwise the result is only known for t >= 0.
"""
try:
return self.inverse_laplace(**assumptions)
except ValueError:
return self.as_sum().inverse_laplace(**assumptions)
def laplace(self, **assumptions):
"""Convert to s-domain."""
assumptions = self.assumptions.merge(**assumptions)
return self.__class__(self, **assumptions)
def fourier(self, **assumptions):
"""Convert to Fourier domain."""
from .symbols import f, jw, pi
if self.is_causal or assumptions.get('causal', False):
# Note, this does not apply for 1 / s.
tmp = self(jw)
if tmp.real != 0:
return self.change(tmp(2 * pi * f), domain='fourier',
**assumptions)
result = self.time(**assumptions).fourier(**assumptions)
return result
def angular_fourier(self, **assumptions):
"""Convert to angular Fourier domain."""
from .symbols import jw
if self.is_causal:
# Note, this does not apply for 1 / s.
tmp = self(jw)
if tmp.real != 0:
return self.change(tmp, domain='angular fourier',
**assumptions)
result = self.time(**assumptions).angular_fourier(**assumptions)
return result
def norm_angular_fourier(self, **assumptions):
"""Convert to normalized angular Fourier domain."""
from .symbols import jw, Omega
from .dsym import dt
if self.is_causal:
# Note, this does not apply for 1 / s.
tmp = self(j * Omega / dt)
if tmp.real != 0:
return self.change(tmp, domain='norm angular fourier',
**assumptions)
result = self.time(**assumptions).norm_angular_fourier(**assumptions)
return result
def norm_fourier(self, **assumptions):
"""Convert to normalized Fourier domain."""
from .symbols import jw, F
from .dsym import dt
if self.is_causal:
# Note, this does not apply for 1 / s.
tmp = self(j * F / dt)
if tmp.real != 0:
return self.change(tmp, domain='norm fourier',
**assumptions)
result = self.time(**assumptions).norm_fourier(**assumptions)
return result
def phasor(self, **assumptions):
"""Convert to phasor domain."""
result = PhasorFrequencyDomainExpression.from_laplace(self, **assumptions)
return result
def transient_response(self, tvector=None):
"""Evaluate transient (impulse) response."""
if tvector is None:
return self.time()
return self.time().evaluate(tvector)
def impulse_response(self, tvector=None):
"""Evaluate transient (impulse) response."""
return self.transient_response(tvector)
def step_response(self, tvector=None):
"""Evaluate step response."""
H = self.__class__(self / self.var, **self.assumptions)
return H.transient_response(tvector)
def angular_frequency_response(self, wvector=None):
"""Convert to angular frequency domain and evaluate response if
angular frequency vector specified.
"""
from .symbols import omega
X = self.subs(j * omega)
if wvector is None:
return X
return X.evaluate(wvector)
def frequency_response(self, fvector=None):
"""Convert to frequency domain and evaluate response if frequency
vector specified.
"""
from .symbols import f
X = self.subs(j * 2 * pi * f)
if fvector is None:
return X
return X.evaluate(fvector)
def response(self, x, t):
"""Evaluate response to input signal x at times t."""
if len(x) != len(t):
raise ValueError('x must have same length as t')
dt = t[1] - t[0]
if not np.allclose(np.diff(t), np.ones(len(t) - 1) * dt):
raise (ValueError, 't values not equally spaced')
# Perform polynomial long division so expr = Q + M / D
N, D, delay = self._decompose()
Q, M = div(N, D)
expr = M / D
N = len(t)
# Evaluate transient response.
th = np.arange(N) * dt - dt
h = LaplaceDomainExpression(expr).transient_response(th)
print('Convolving...')
ty = t
y = np.convolve(x, h)[0:N] * dt
if Q:
# Handle Dirac deltas and their derivatives.
C = Q.all_coeffs()
for n, c in enumerate(C):
y += c * x
x = np.diff(x) / dt
x = np.hstack((x, 0))
from scipy.interpolate import interp1d
if delay != 0.0:
print('Interpolating...')
# Try linear interpolation; should oversample first...
y = interp1d(ty, y, bounds_error=False, fill_value=0)
y = y(t - delay)
return y
def _decompose(self):
N, D, delay = self._ratfun.as_ratfun_delay()
return N, D, delay
def differential_equation(self, input='x', output='y'):
"""Create differential equation from transfer function.
For example,
>>> H = (s + 3) / (s**2 + 4)
>>> H.differential_equation()
d d
3.y(t) + --(y(t)) = 4.x(t) + ---(x(t))
dt 2
dt
"""
H = self
x = texpr('%s(t)' % input)
y = texpr('%s(t)' % output)
X = x.LT()
Y = y.LT()
N = self.N
D = self.D
lhs = (N * Y).ILT(causal=True)
rhs = (D * X).ILT(causal=True)
return TimeDomainExpression(Eq(lhs.expr, rhs.expr))
def evaluate(self, svector=None):
return super(LaplaceDomainExpression, self).evaluate(svector)
def plot(self, **kwargs):
"""Plot pole-zero map.
kwargs include:
axes - the plot axes to use otherwise a new figure is created
xlabel - the x-axis label (default Re(s))
ylabel - the y-axis label (default Im(s))
xscale - the x-axis scaling
yscale - the y-axis scaling
in addition to those supported by the matplotlib plot command.
The plot axes are returned."""
from .plot import plot_pole_zero
return plot_pole_zero(self, **kwargs)
def pole_zero_plot(self, **kwargs):
"""Plot pole-zero map."""
return self.plot(**kwargs)
def bode_plot(self, fvector=None, **kwargs):
"""Plot frequency response for a frequency-domain phasor as a Bode
plot (but without the straight line approximations). fvector
specifies the frequencies. If it is a tuple (f1, f2), it sets
the frequency limits. Since a logarithmic frequency scale is used,
f1 must be greater than 0.
This method makes the assumption that the expression is causal.
"""
return self.fourier(causal=True).bode_plot(fvector, **kwargs)
def nyquist_plot(self, fvector=None, **kwargs):
"""Plot frequency response for a frequency-domain phasor as a Nyquist
plot. fvector specifies the frequencies. If it is a tuple
(f1, f2), it sets the frequency limits.
`npoints` set the number of plotted points.
The unit circle is shown by default. This can be disabled with `unitcircle=False`.
This method makes the assumption that the expression is causal.
"""
return self.fourier(causal=True).nyquist_plot(fvector, **kwargs)
def bilinear_transform(self):
"""Approximate s = ln(z)
by s = (2 / dt) * (1 - z**-1) / (1 + z**-1)
This is also called Tustin's method and is equivalent to the
trapezoidal method."""
# TODO: add frequency warping as an option
from .discretetime import z, dt
return self.subs((2 / dt) * (1 - z**-1) / (1 + z**-1))
def forward_euler_transform(self):
"""Approximate s = ln(z)
by s = (1 / dt) * (1 - z**-1) / z**-1"""
from .discretetime import z, dt
return self.subs((1 / dt) * (1 - z**-1) / (z**-1))
def backward_euler_transform(self):
"""Approximate s = ln(z)
by s = (1 / dt) * (1 - z**-1)"""
from .discretetime import z, dt
return self.subs((1 / dt) * (1 - z**-1))
def tf(numer, denom=1, var=None):
"""Create a transfer function from lists of the coefficient
for the numerator and denominator."""
if var is None:
var = ssym
N = Poly(sympify(numer), var)
D = Poly(sympify(denom), var)
return LaplaceDomainTransferFunction(N / D, causal=True)
def zp2tf(zeros, poles, K=1, var=None):
"""Create a transfer function from lists (or dictionaries) of zeros and poles,
and from a constant gain."""
if var is None:
var = ssym
return LaplaceDomainTransferFunction(_zp2tf(sympify(zeros), sympify(poles),
sympify(K), var), causal=True)
def pr2tf(poles, residues, var=None):
"""Create a transfer function from lists of poles and residues."""
if var is None:
var = ssym
return LaplaceDomainTransferFunction(_pr2tf(sympify(poles), sympify(residues), var),
causal=True)
def sexpr(arg, **assumptions):
"""Create LaplaceDomainExpression object. If `arg` is ssym return s"""
if arg is ssym:
return s
return expr_make('laplace', arg, **assumptions)
from .expressionclasses import expressionclasses
classes = expressionclasses.register('laplace', LaplaceDomainExpression)
LaplaceDomainVoltage = classes['voltage']
LaplaceDomainCurrent = classes['current']
LaplaceDomainAdmittance = classes['admittance']
LaplaceDomainImpedance = classes['impedance']
LaplaceDomainTransferFunction = classes['transfer']
from .texpr import TimeDomainExpression, texpr
from .phasor import PhasorFrequencyDomainExpression
s = LaplaceDomainExpression('s')
s.units = uu.rad / uu.s
| lgpl-2.1 |
inflector/opencog | opencog/python/utility/functions.py | 34 | 11056 | from math import fabs, isnan
from datetime import datetime
from spatiotemporal.unix_time import UnixTime
from utility.generic import convert_dict_to_sorted_lists
from utility.numeric.globals import EPSILON
from numpy import NINF as NEGATIVE_INFINITY, PINF as POSITIVE_INFINITY
from scipy.integrate import quad
__author__ = 'keyvan'
def integral(function, start, end):
if hasattr(function, 'integral'):
return function.integral(start, end)
area, error = quad(function, start, end)
return area
def almost_equals(a, b, epsilon=EPSILON):
if fabs(a - b) < epsilon:
return True
return False
def invoke_method_on(method, sequence_or_point):
if method is None:
return None
if not callable(method):
raise TypeError("'method' is not callable")
result = []
try:
for point in sequence_or_point:
if type(point) is datetime:
point = UnixTime(point)
result.append(method(point))
except TypeError:
if type(sequence_or_point) is datetime:
sequence_or_point = UnixTime(sequence_or_point)
return method(sequence_or_point)
return result
def index_of_first_local_maximum(sequence):
first_time = True
index = 0
for element in sequence:
if first_time:
previous = element
first_time = False
continue
if element <= previous:
return index
previous = element
index += 1
return None
class Function(object):
_domain = None
_range = None
_function_undefined = None
def __init__(self, function_undefined=None, domain=None):
if function_undefined is not None:
self.function_undefined = function_undefined
if domain is not None:
if not hasattr(domain, '__iter__') or not hasattr(domain, '__getitem__'):
raise TypeError("'domain' should be iterable and support indexing")
self._domain = domain
def call_on_single_point(self, x):
"""
to override, __call__ invokes this to handle both points and sequences
"""
return 0
def derivative(self, point):
return None
def _check_domain_for(self, feature_name):
if self.domain is None:
raise TypeError("'{0}' object does not support {1}, 'domain' should be specified".format(
self.__class__.__name__, feature_name))
def plot(self, plt=None):
self._check_domain_for('plotting')
if plt is None:
import matplotlib.pyplot as plt
plt.plot(self.domain, self.range)
return plt
@property
def function_undefined(self):
return self._function_undefined
@function_undefined.setter
def function_undefined(self, value):
if value is not None and not isinstance(value, Function):
raise TypeError("'function_undefined' should be of type 'Function'")
self._function_undefined = value
@property
def domain(self):
return self._domain
@property
def range(self):
return self()
def __call__(self, x=None):
if x is None:
self._check_domain_for("call with 'None'")
x = self.domain
return invoke_method_on(self.call_on_single_point, x)
def __getitem__(self, index):
self._check_domain_for('indexing')
return self.range[index]
def __len__(self):
self._check_domain_for('len()')
return len(self.range)
def __iter__(self):
self._check_domain_for('iter()')
return iter(self.range)
def __reversed__(self):
self._check_domain_for('reversed()')
return reversed(self.range)
class FunctionLinear(Function):
def __init__(self, a=None, b=None, x_0=None, y_0=None, x_1=None, y_1=None):
#(x_0, y_0), (x_1, y_1) = sorted([(x_0, y_0), (x_1, y_1)])
if (a, b) == (None, None):
a = (float(y_1) - y_0) / (x_1 - x_0)
b = y_0 - a * x_0
if isnan(a) or isnan(b):
pass
self.a = a
self.b = b
def call_on_single_point(self, x):
return float(self.a * x + self.b)
def intersect(self, other):
if almost_equals(self.a, other.a):
return None
x = (float(other.b) - self.b) / (self.a - other.a)
return x, self(x)
def integral(self, start, end):
if start >= end:
return 0
if self.a == 0:
return self.b * (end - start)
x_intercept = self.x_intercept
if start > x_intercept or end < x_intercept or almost_equals(end, x_intercept) or almost_equals(start, x_intercept):
return (self(start) + self(end)) * (end - start) / 2.0
minus_triangle = (x_intercept - start) * self(start)
plus_triangle = (end - x_intercept) * self(end)
return minus_triangle + plus_triangle
def derivative(self, point):
return self.a
@property
def x_intercept(self):
return - float(self.b) / self.a
@property
def y_intercept(self):
return self(0)
class FunctionHorizontalLinear(FunctionLinear):
def __init__(self, y_intercept):
FunctionLinear.__init__(self, a=0, b=y_intercept)
def call_on_single_point(self, x):
return self.b
def integral(self, start, end):
if start >= end:
return 0
if almost_equals(self.b, 0):
return 0
return float(self.b) * (end - start)
def derivative(self, point):
return 0
FUNCTION_ZERO = FunctionHorizontalLinear(0)
FUNCTION_ONE = FunctionHorizontalLinear(1)
class FunctionComposite(Function):
is_normalised = False
def __init__(self, dictionary_bounds_function, function_undefined=None, domain=None, is_normalised=False):
if is_normalised is not False:
self.is_normalised = True
Function.__init__(self, function_undefined=function_undefined, domain=domain)
if not isinstance(dictionary_bounds_function, dict):
raise TypeError("'dictionary_bounds_function' should be a dictionary with (lower_bound, higher_bound) "
"tuple keys and values of type 'Function'")
self._dictionary_bounds_function = dictionary_bounds_function
def call_on_single_point(self, x):
for function_bounds in self.dictionary_bounds_function:
(a, b) = function_bounds
if a <= x:
if b >= x:
if self.dictionary_bounds_function[function_bounds] is None:
return None
return self.dictionary_bounds_function[function_bounds](x)
return self.function_undefined(x)
def integral(self, start, end):
if self.is_normalised and self.domain is not None:
if (start < self.domain[0] or almost_equals(start, self.domain[0])) and (
end > self.domain[-1] or almost_equals(end, self.domain[-1])):
return 1.0
if start >= end:
return 0
result = 0
for function_bounds in self.dictionary_bounds_function:
(a, b) = function_bounds
if a <= start:
if b >= end:
return self.dictionary_bounds_function[function_bounds].integral(start, end)
not_ordered = {
(start, 0): 's', (end, 0): 'e',
(a, 1): 'a', (b, 1): 'b'
}
order = ''.join([not_ordered[i] for i in sorted(not_ordered)])
if (a == start or a == end) and order == 'saeb' or (b == start or b == end) and order == 'asbe':
continue
if order in 'seab abse':
continue
if order == 'saeb':
b = end
elif order == 'asbe':
a = start
result += self.dictionary_bounds_function[function_bounds].integral(a, b)
return result
def find_bounds_for(self, point):
for bounds in self.dictionary_bounds_function:
(a, b) = bounds
if a <= point and b >= point:
return bounds
def derivative(self, point):
return self.dictionary_bounds_function[self.find_bounds_for(point)].derivative(point)
def function_in_point(self, point):
for bounds in self.dictionary_bounds_function:
a, b = bounds
if a <= point <= b:
return self.dictionary_bounds_function[bounds]
return None
# def functions_in_interval(self, interval_start, interval_end):
# dictionary_bounds_function = {}
# for bounds in self.dictionary_bounds_function:
# a, b = bounds
# if (interval_start < a or almost_equals(interval_start, a)) and (
#
# ):
@property
def dictionary_bounds_function(self):
return self._dictionary_bounds_function
class FunctionPiecewiseLinear(FunctionComposite):
def __init__(self, dictionary_input_output, function_undefined=None, is_normalised=False):
self.input_list, self.output_list = convert_dict_to_sorted_lists(dictionary_input_output)
dictionary_bounds_function = {}
for i in xrange(1, len(self.input_list)):
x_0, x_1 = self.input_list[i - 1], self.input_list[i]
y_0, y_1 = self.output_list[i - 1], self.output_list[i]
dictionary_bounds_function[(x_0, x_1)] = FunctionLinear(x_0=x_0, x_1=x_1, y_0=y_0, y_1=y_1)
if NEGATIVE_INFINITY not in self.input_list:
dictionary_bounds_function[(NEGATIVE_INFINITY, self.input_list[0])] = function_undefined
if POSITIVE_INFINITY not in self.input_list:
dictionary_bounds_function[(self.input_list[-1], POSITIVE_INFINITY)] = function_undefined
FunctionComposite.__init__(self, dictionary_bounds_function,
function_undefined=function_undefined,
domain=self.input_list,
is_normalised=is_normalised)
def normalised(self):
area = self.integral(NEGATIVE_INFINITY, POSITIVE_INFINITY)
if almost_equals(area, 0):
area = self.integral(NEGATIVE_INFINITY, POSITIVE_INFINITY)
dictionary_input_output = {}
output_list = [y / area for y in self.output_list]
for i in xrange(len(self.input_list)):
dictionary_input_output[self.input_list[i]] = output_list[i]
result = FunctionPiecewiseLinear(dictionary_input_output, function_undefined=self.function_undefined)
result.is_normalised = True
return result
def __and__(self, other):
for bounds in self.dictionary_bounds_function:
a, b = bounds
linear_function = self.dictionary_bounds_function[bounds]
if __name__ == '__main__':
a = FunctionLinear(1, 0)
b = FunctionLinear(-1, 1)
print a.intersect(b)
| agpl-3.0 |
jmmease/pandas | pandas/tests/io/parser/quoting.py | 18 | 5813 | # -*- coding: utf-8 -*-
"""
Tests that quoting specifications are properly handled
during parsing for all of the parsers defined in parsers.py
"""
import csv
import pandas.util.testing as tm
from pandas import DataFrame
from pandas.compat import PY3, StringIO, u
class QuotingTests(object):
def test_bad_quote_char(self):
data = '1,2,3'
# Python 2.x: "...must be an 1-character..."
# Python 3.x: "...must be a 1-character..."
msg = '"quotechar" must be a(n)? 1-character string'
tm.assert_raises_regex(TypeError, msg, self.read_csv,
StringIO(data), quotechar='foo')
msg = 'quotechar must be set if quoting enabled'
tm.assert_raises_regex(TypeError, msg, self.read_csv,
StringIO(data), quotechar=None,
quoting=csv.QUOTE_MINIMAL)
msg = '"quotechar" must be string, not int'
tm.assert_raises_regex(TypeError, msg, self.read_csv,
StringIO(data), quotechar=2)
def test_bad_quoting(self):
data = '1,2,3'
msg = '"quoting" must be an integer'
tm.assert_raises_regex(TypeError, msg, self.read_csv,
StringIO(data), quoting='foo')
# quoting must in the range [0, 3]
msg = 'bad "quoting" value'
tm.assert_raises_regex(TypeError, msg, self.read_csv,
StringIO(data), quoting=5)
def test_quote_char_basic(self):
data = 'a,b,c\n1,2,"cat"'
expected = DataFrame([[1, 2, 'cat']],
columns=['a', 'b', 'c'])
result = self.read_csv(StringIO(data), quotechar='"')
tm.assert_frame_equal(result, expected)
def test_quote_char_various(self):
data = 'a,b,c\n1,2,"cat"'
expected = DataFrame([[1, 2, 'cat']],
columns=['a', 'b', 'c'])
quote_chars = ['~', '*', '%', '$', '@', 'P']
for quote_char in quote_chars:
new_data = data.replace('"', quote_char)
result = self.read_csv(StringIO(new_data), quotechar=quote_char)
tm.assert_frame_equal(result, expected)
def test_null_quote_char(self):
data = 'a,b,c\n1,2,3'
# sanity checks
msg = 'quotechar must be set if quoting enabled'
tm.assert_raises_regex(TypeError, msg, self.read_csv,
StringIO(data), quotechar=None,
quoting=csv.QUOTE_MINIMAL)
tm.assert_raises_regex(TypeError, msg, self.read_csv,
StringIO(data), quotechar='',
quoting=csv.QUOTE_MINIMAL)
# no errors should be raised if quoting is None
expected = DataFrame([[1, 2, 3]],
columns=['a', 'b', 'c'])
result = self.read_csv(StringIO(data), quotechar=None,
quoting=csv.QUOTE_NONE)
tm.assert_frame_equal(result, expected)
result = self.read_csv(StringIO(data), quotechar='',
quoting=csv.QUOTE_NONE)
tm.assert_frame_equal(result, expected)
def test_quoting_various(self):
data = '1,2,"foo"'
cols = ['a', 'b', 'c']
# QUOTE_MINIMAL and QUOTE_ALL apply only to
# the CSV writer, so they should have no
# special effect for the CSV reader
expected = DataFrame([[1, 2, 'foo']], columns=cols)
# test default (afterwards, arguments are all explicit)
result = self.read_csv(StringIO(data), names=cols)
tm.assert_frame_equal(result, expected)
result = self.read_csv(StringIO(data), quotechar='"',
quoting=csv.QUOTE_MINIMAL, names=cols)
tm.assert_frame_equal(result, expected)
result = self.read_csv(StringIO(data), quotechar='"',
quoting=csv.QUOTE_ALL, names=cols)
tm.assert_frame_equal(result, expected)
# QUOTE_NONE tells the reader to do no special handling
# of quote characters and leave them alone
expected = DataFrame([[1, 2, '"foo"']], columns=cols)
result = self.read_csv(StringIO(data), quotechar='"',
quoting=csv.QUOTE_NONE, names=cols)
tm.assert_frame_equal(result, expected)
# QUOTE_NONNUMERIC tells the reader to cast
# all non-quoted fields to float
expected = DataFrame([[1.0, 2.0, 'foo']], columns=cols)
result = self.read_csv(StringIO(data), quotechar='"',
quoting=csv.QUOTE_NONNUMERIC,
names=cols)
tm.assert_frame_equal(result, expected)
def test_double_quote(self):
data = 'a,b\n3,"4 "" 5"'
expected = DataFrame([[3, '4 " 5']],
columns=['a', 'b'])
result = self.read_csv(StringIO(data), quotechar='"',
doublequote=True)
tm.assert_frame_equal(result, expected)
expected = DataFrame([[3, '4 " 5"']],
columns=['a', 'b'])
result = self.read_csv(StringIO(data), quotechar='"',
doublequote=False)
tm.assert_frame_equal(result, expected)
def test_quotechar_unicode(self):
# See gh-14477
data = 'a\n1'
expected = DataFrame({'a': [1]})
result = self.read_csv(StringIO(data), quotechar=u('"'))
tm.assert_frame_equal(result, expected)
# Compared to Python 3.x, Python 2.x does not handle unicode well.
if PY3:
result = self.read_csv(StringIO(data), quotechar=u('\u0001'))
tm.assert_frame_equal(result, expected)
| bsd-3-clause |
johannfaouzi/pyts | pyts/transformation/tests/test_boss.py | 1 | 6243 | """Testing for Bag-of-SFA Symbols."""
# Author: Johann Faouzi <[email protected]>
# License: BSD-3-Clause
import numpy as np
import pytest
import re
from scipy.sparse import csr_matrix
from sklearn.feature_extraction.text import CountVectorizer
from pyts.transformation import BOSS
from pyts.approximation import SymbolicFourierApproximation
n_samples, n_timestamps, n_classes = 8, 200, 2
rng = np.random.RandomState(42)
X = rng.randn(n_samples, n_timestamps)
y = rng.randint(n_classes, size=n_samples)
@pytest.mark.parametrize(
'params, error, err_msg',
[({'word_size': "3"}, TypeError, "'word_size' must be an integer."),
({'window_size': {}}, TypeError,
"'window_size' must be an integer or a float."),
({'window_step': {}}, TypeError,
"'window_step' must be an integer or a float."),
({'word_size': 0}, ValueError, "'word_size' must be a positive integer."),
({'window_size': 0, 'drop_sum': True}, ValueError,
"If 'window_size' is an integer, it must be greater than or equal to 1 "
"and lower than or equal to (n_timestamps - 1) if 'drop_sum=True'."),
({'window_size': n_timestamps, 'drop_sum': True}, ValueError,
"If 'window_size' is an integer, it must be greater than or equal to 1 "
"and lower than or equal to (n_timestamps - 1) if 'drop_sum=True'."),
({'window_size': 0}, ValueError,
"If 'window_size' is an integer, it must be greater than or equal to 1 "
"and lower than or equal to n_timestamps if 'drop_sum=False'."),
({'window_size': n_timestamps + 1}, ValueError,
"If 'window_size' is an integer, it must be greater than or equal to 1 "
"and lower than or equal to n_timestamps if 'drop_sum=False'."),
({'window_size': 1.5}, ValueError,
"If 'window_size' is a float, it must be greater than 0 and lower than "
"or equal to 1."),
({'window_step': 0}, ValueError,
"If 'window_step' is an integer, it must be greater than or equal to 1 "
"and lower than or equal to n_timestamps."),
({'window_step': n_timestamps + 1}, ValueError,
"If 'window_step' is an integer, it must be greater than or equal to 1 "
"and lower than or equal to n_timestamps."),
({'window_step': 0.}, ValueError,
"If 'window_step' is a float, it must be greater than 0 and lower than "
"or equal to 1."),
({'window_step': 1.2}, ValueError,
"If 'window_step' is a float, it must be greater than 0 and lower than "
"or equal to 1."),
({'window_size': 4, 'drop_sum': True}, ValueError,
"'word_size' must be lower than or equal to (window_size - 1) if "
"'drop_sum=True'."),
({'window_size': 3}, ValueError,
"'word_size' must be lower than or equal to window_size if "
"'drop_sum=False'.")]
)
def test_parameter_check(params, error, err_msg):
"""Test parameter validation."""
boss = BOSS(**params)
with pytest.raises(error, match=re.escape(err_msg)):
boss.fit(X, y)
@pytest.mark.parametrize(
'sparse, instance', [(True, csr_matrix), (False, np.ndarray)])
def test_sparse_dense(sparse, instance):
"""Test that the expected type is returned."""
weasel = BOSS(sparse=sparse)
assert isinstance(weasel.fit(X, y).transform(X), instance)
assert isinstance(weasel.fit_transform(X, y), instance)
def test_accurate_results_without_numerosity_reduction():
"""Test that the actual results are the expected ones."""
boss = BOSS(
word_size=4, n_bins=3, window_size=100, window_step=100,
anova=False, drop_sum=False, norm_mean=False, norm_std=False,
strategy='quantile', alphabet=None, numerosity_reduction=False
)
X_windowed = X.reshape(8, 2, 100).reshape(16, 100)
sfa = SymbolicFourierApproximation(
n_coefs=4, drop_sum=False, anova=False, norm_mean=False,
norm_std=False, n_bins=3, strategy='quantile', alphabet=None
)
y_repeated = np.repeat(y, 2)
X_sfa = sfa.fit_transform(X_windowed, y_repeated)
X_word = np.asarray([''.join(X_sfa[i]) for i in range(16)])
X_word = X_word.reshape(8, 2)
X_bow = np.asarray([' '.join(X_word[i]) for i in range(8)])
vectorizer = CountVectorizer()
arr_desired = vectorizer.fit_transform(X_bow).toarray()
vocabulary_desired = {value: key for key, value in
vectorizer.vocabulary_.items()}
arr_actual = boss.fit_transform(X, y).toarray()
np.testing.assert_allclose(arr_actual, arr_desired, atol=1e-5, rtol=0)
assert boss.vocabulary_ == vocabulary_desired
arr_actual = boss.fit(X, y).transform(X).toarray()
np.testing.assert_allclose(arr_actual, arr_desired, atol=1e-5, rtol=0)
assert boss.vocabulary_ == vocabulary_desired
def test_accurate_results_floats():
"""Test that the actual results are the expected ones."""
boss = BOSS(
word_size=4, n_bins=3, window_size=0.5, window_step=0.5,
anova=False, drop_sum=False, norm_mean=False, norm_std=False,
strategy='quantile', alphabet=None, numerosity_reduction=True
)
X_windowed = X.reshape(8, 2, 100).reshape(16, 100)
sfa = SymbolicFourierApproximation(
n_coefs=4, drop_sum=False, anova=False, norm_mean=False,
norm_std=False, n_bins=3, strategy='quantile', alphabet=None
)
y_repeated = np.repeat(y, 2)
X_sfa = sfa.fit_transform(X_windowed, y_repeated)
X_word = np.asarray([''.join(X_sfa[i]) for i in range(16)])
X_word = X_word.reshape(8, 2)
not_equal = np.c_[X_word[:, 1:] != X_word[:, :-1], np.full(8, True)]
X_bow = np.asarray([' '.join(X_word[i, not_equal[i]]) for i in range(8)])
vectorizer = CountVectorizer()
arr_desired = vectorizer.fit_transform(X_bow).toarray()
vocabulary_desired = {value: key for key, value in
vectorizer.vocabulary_.items()}
arr_actual_1 = boss.fit_transform(X, None).toarray()
np.testing.assert_allclose(arr_actual_1, arr_desired, atol=1e-5, rtol=0)
assert boss.vocabulary_ == vocabulary_desired
arr_actual_2 = boss.fit(X, None).transform(X).toarray()
np.testing.assert_allclose(arr_actual_2, arr_desired, atol=1e-5, rtol=0)
assert boss.vocabulary_ == vocabulary_desired
| bsd-3-clause |
coin-or/oBB | obb/T1.py | 1 | 13298 | from __future__ import division
# Overlapping Branch and Bound, data parallel algorithm
def runpar(f, g, H, Lg, Lh, l, u, bound, circle, A=None, b=None, E=None, d=None, Tol=1e-2, Heur=0, TolType='r', Vis=0, SD=1, Rtol=1e-15, TimeQP=0, TimeWidle=0, qpsolver='cvxopt'):
# Optional Inputs
# Tolerance
# Heuristic lattice (0 - off, 1 - on)
# Tolerance type (r - relative, a - absolute)
# Visualisation (0 - off, 1 - on)
# Step Debugging (0 - off, 1 - on)
# Radius Tolerance
# Time Worker Idling (0 - off, 1 - on)
# Time QP solves (0 - off, 1 - on)
# QP Solver (quadprog, cvxopt, nag)
# MPI
from mpi4py import MPI
# MPI comm
comm = MPI.COMM_WORLD
rank = comm.Get_rank()
# Get D
D = len(l)
# Master Process
if(rank == 0):
# Number of processors
numprocs = comm.Get_size()
# Import necessary functions
from numpy import array, pi, sqrt, dot, identity, hstack, vstack, empty, inf
from numpy.linalg import norm
from time import time
from itertools import product
from sys import float_info
# Initialise empty arrays
if(A is None):
A = empty((0,D))
b = empty(0)
if(E is None):
E = empty((0,D))
d = empty(0)
# QuadProg++ solver (fast!)
if(qpsolver == 'quadprog'):
# Import QuadProg++ solver
from PyQuadProg import PyQuadProg
# Check if circle has feasible point
def mfeasible(c):
# Solve QP to check feasibility
sol = PyQuadProg(2*identity(D),-2*c.xc,E.transpose(),-1*d,vstack([identity(D),-1*identity(D),-1*A]).transpose(),hstack([-l,u,b]))
mxopt = sol.x.getArray()
mr = dot(mxopt,mxopt) + dot(mxopt,-2*c.xc) + dot(c.xc,c.xc)
# Check if point lies inside domain
if(mr < (c.r**2)):
mf = 1
else:
mf = 0
mxopt = None
return mf, mxopt
# CVXOPT QP solver (slow)
elif(qpsolver == 'cvxopt'):
# Import cvxopt solver
from cvxopt import matrix
from cvxopt.solvers import qp, options
# Set tolerance options
options['show_progress'] = False
options['abstol'] = 1e-9
options['reltol'] = 1e-8
options['feastol'] = 1e-9
# Check if circle has feasible point
def mfeasible(c):
# Solve QP to check feasibility
sol = qp(matrix(2*identity(D)),matrix(-2*c.xc),matrix(vstack([-1*identity(D),identity(D),A])),matrix(hstack([-l,u,b])),matrix(E),matrix(d))
mxopt = array(sol['x']).flatten()
mr = sol['primal objective']
mr = mr + dot(c.xc,c.xc)
# Check if point lies inside domain
if(mr < (c.r**2)):
mf = 1
else:
mf = 0
mxopt = None
return mf, mxopt
# NAG QP solver (fast!)
elif(qpsolver == 'nag'):
# Import QP Solver
from qpsolver_lincon import qpsolver_lincon
# Check if circle has feasible point
def mfeasible(c):
# Solve QP to check feasibility
mxopt = c.xc.copy()
mr = qpsolver_lincon(2*identity(D),-2*c.xc,hstack([l,-inf,d]),hstack([u,b,d]),mxopt,vstack([A,E]),D,A.shape[0]+E.shape[0])
mr = mr + dot(c.xc,c.xc)
# Check if point lies inside domain
if(mr < (c.r**2)):
mf = 1
else:
mf = 0
mxopt = None
return mf, mxopt
# Visualisation
if(Vis == 1):
from matplotlib.pyplot import figure, Rectangle, Circle, gca, show, title, axis, draw
# Draw bound constraints [l,u]
fig = figure('Processor '+str(rank))
gca().add_patch(Rectangle((l[0],l[1]), u[0]-l[0], u[1]-l[1], fill=False))
axis([l[0]-1,u[0]+1,l[1]-1,u[1]+1])
title('Master Processor')
show(block=False)
# Circle drawing procedure
def drawc(c,col):
# Draw circle
gca().add_patch(Circle((c.xc[0],c.xc[1]), radius=c.r , color=col, fill=False))
axis('equal')
draw()
if(Heur == 0):
ksp = 3**D
else:
# Load relevant normalised lattice
from numpy import loadtxt
from pkg_resources import resource_stream
if(D == 2):
lat = array([[0., 0.], [1., 0.], [-1., 0.],
[0.5, sqrt(3.)/2.], [-0.5, sqrt(3.)/2.],
[0.5, -sqrt(3.)/2.], [-0.5, -sqrt(3.)/2.]])
elif(D == 3):
lat = loadtxt(resource_stream('obb','lattices/d3'))
elif(D == 4):
lat = loadtxt(resource_stream('obb','lattices/d4'))
elif(D == 5):
lat = loadtxt(resource_stream('obb','lattices/d5'))
elif(D == 6):
lat = loadtxt(resource_stream('obb','lattices/e6'))
elif(D == 7):
lat = loadtxt(resource_stream('obb','lattices/e7'))
elif(D == 8):
lat = loadtxt(resource_stream('obb','lattices/e8'))
else:
raise RuntimeError('A lattice for '+str(D)+' Dimensions has yet to be provided.')
# Get kissing number + 1
ksp = lat.shape[0]
# Set up initial circle
c0 = circle((u+l)/2,norm(u-l)/2)
c0.xopt = c0.xc
# Bound circle
c0.lbound = bound(c0,Lg,Lh,f,g,H,D)
# Upper bound
c0.ubound = f(c0.xopt)
# Set up circle list
clist = [c0]
cslb = clist[0]
# Update global bounds
U = cslb.ubound
L = cslb.lbound
xopt = cslb.xopt
rad = cslb.r
# Loop counter
itr = 0
timer = time()
if(TimeQP == 1):
qptime = 0
# Debug Output
if (SD == 1):
print('----------------------------')
print('Number of elements: %i') % len(clist)
print('U: %f') % U
print('L: %f') % L
print('Circle radius: %e') % rad
print('----------------------------')
# Set tolerance
if((TolType == 'r')and(abs(U) > float_info.epsilon)):
cutoff = (U - L)/abs(U)
else:
cutoff = U - L
#and((time()-timer) < 3000)
while((cutoff > Tol)and(rad > Rtol)):
# Update iteration count
itr = itr + 1
# Prune list
i = 0
while(i < len(clist)):
if(clist[i].lbound > U):
# Visualise
if(Vis == 1):
drawc(clist[i],'k')
del clist[i]
i=i-1
i = i+1
if(Heur == 0):
# Split circle into more circles
inc = (cslb.r)/sqrt(D) # Increment
rn = (cslb.r)/2 # New radius
xc = cslb.xc # Centre
# Visualise
if(Vis == 1):
drawc(cslb,'k')
clist.remove(cslb) # Remove split circle from list
# Create square spoke configuration
spk = array([p for p in product([-inc,0,inc], repeat=D)])
else:
# Split circle into more circles
inc = cslb.r # Increment
rn = (cslb.r)/2 # New radius
xc = cslb.xc # Centre
# Visualise
if(Vis == 1):
drawc(cslb,'k')
clist.remove(cslb) # Remove split circle from list
# Scale configuration
spk = inc*lat
# List to distribute amongst processes
dlist = []
# Create surrounding circles
for i in range(0,ksp):
# Create centre
xcn = xc + spk[i,:]
# Check if circle exists
nc = 0;
for k in range(0,len(clist)):
if(all(xcn == clist[k].xc)):
nc = 1
# If circle doesn't exist
if(nc == 0):
# Create circle
cn = circle(xcn,rn)
# Time QP Solve
if(TimeQP == 1):
eltime = MPI.Wtime()
mfeas, cn.xopt = mfeasible(cn)
if(TimeQP == 1):
qptime += MPI.Wtime() - eltime
# If circle has a feasible point (i.e. it's feasible)
if(mfeas != 0):
# Add to distribution list
dlist.append(cn)
# Visualise
if(Vis == 1):
drawc(cn,'b')
# Distribute data evenly amongst processes
ihi = 0
trem = len(dlist)
prem = numprocs
req = []
for p in range(0,numprocs-1):
tproc = int(round(trem/prem))
ilo = ihi + 1
ihi = ihi + tproc
req.append(comm.isend(dlist[ilo-1:ihi], dest=numprocs-1-p, tag=0))
prem = prem - 1
trem = trem - tproc
# Distribute remaining data to self
tproc = int(round(trem/prem))
# Bound each item allocated to self
for k in range(ihi,ihi+tproc):
# Bound circle
dlist[k].ubound = f(dlist[k].xopt)
dlist[k].lbound = bound(dlist[k],Lg,Lh,f,g,H,D)
# Add to circle list
clist.append(dlist[k])
# Gather data back up
for p in range(0,numprocs-1):
# Make sure data has been sent
req[p].Wait()
# Get list of bounded circles from other processes
dlist = comm.recv(source=numprocs-1-p, tag=1)
# Add to clist
clist += dlist
# Find circle c with smallest ubound
cslb = min(clist,key=lambda x: x.ubound)
# Update global feasible upper bound
U = cslb.ubound
xopt = cslb.xopt
rad = cslb.r
# Find circle with smallest lbound
cslb = min(clist,key=lambda x: x.lbound)
L = cslb.lbound
# Set tolerance
if((TolType == 'r')and(abs(U) > float_info.epsilon)):
cutoff = (U - L)/abs(U)
else:
cutoff = U - L
# Debug Output
if (SD == 1):
print('Number of elements: %i') % len(clist)
print('U: %f') % U
print('L: %f') % L
print('Circle radius: %e') % rad
print('----------------------------')
# Output end result
print('Minimum value of %f at (') % f(xopt),
for i in range(0,D-1):
print('%f,') % xopt[i],
print('%f)') % xopt[D-1]
print('found with'),
if((TolType == 'r')and(abs(U) > float_info.epsilon)):
print('relative'),
else:
print('absolute'),
print('tolerance %f in %i iterations.') % (cutoff, itr)
tol = cutoff
xs = xopt
fxs = f(xopt)
print('Elapsed time %f seconds.') % (time()-timer)
if(TimeQP == 1):
print('Time taken for QP solves is %f seconds') % qptime
# Kill worker processes
for p in range(0,numprocs-1):
comm.send(None, dest=numprocs-1-p, tag=0)
# Display figures and wait
if(Vis == 1):
show()
return xs, fxs, tol, itr
# Worker processes
else:
# Idle time
if(TimeWidle == 1):
eltime = MPI.Wtime()
itime = 0
# Pick up scattered list
rlist = comm.recv(source=0, tag=0)
# Idle time
if(TimeWidle == 1):
itime += MPI.Wtime() - eltime
while(rlist != None):
# Bound each item in the list
for k in range(0,len(rlist)):
# Bound circle
rlist[k].ubound = f(rlist[k].xopt)
rlist[k].lbound = bound(rlist[k],Lg,Lh,f,g,H,D)
# Send bounded list
comm.send(rlist, dest=0, tag=1)
# Idle time
if(TimeWidle == 1):
eltime = MPI.Wtime()
# Pick up next scattered list
rlist = comm.recv(source=0, tag=0)
# Idle time
if(TimeWidle == 1):
itime += MPI.Wtime() - eltime
# Output idle time
if(TimeWidle == 1):
print('Processor %i has been idle for %f seconds') % (rank,itime)
return None, None, None, None
| lgpl-3.0 |
maaskola/GPy | GPy/models/mrd.py | 8 | 14617 | # ## Copyright (c) 2013, GPy authors (see AUTHORS.txt).
# Licensed under the BSD 3-clause license (see LICENSE.txt)
import numpy as np
import itertools, logging
from ..kern import Kern
from ..core.parameterization.variational import NormalPosterior, NormalPrior
from ..core.parameterization import Param, Parameterized
from ..core.parameterization.observable_array import ObsAr
from ..inference.latent_function_inference.var_dtc import VarDTC
from ..inference.latent_function_inference import InferenceMethodList
from ..likelihoods import Gaussian
from ..util.initialization import initialize_latent
from ..core.sparse_gp import SparseGP, GP
from GPy.core.parameterization.variational import VariationalPosterior
from GPy.models.bayesian_gplvm_minibatch import BayesianGPLVMMiniBatch
from GPy.models.sparse_gp_minibatch import SparseGPMiniBatch
class MRD(BayesianGPLVMMiniBatch):
"""
!WARNING: This is bleeding edge code and still in development.
Functionality may change fundamentally during development!
Apply MRD to all given datasets Y in Ylist.
Y_i in [n x p_i]
If Ylist is a dictionary, the keys of the dictionary are the names, and the
values are the different datasets to compare.
The samples n in the datasets need
to match up, whereas the dimensionality p_d can differ.
:param [array-like] Ylist: List of datasets to apply MRD on
:param input_dim: latent dimensionality
:type input_dim: int
:param array-like X: mean of starting latent space q in [n x q]
:param array-like X_variance: variance of starting latent space q in [n x q]
:param initx: initialisation method for the latent space :
* 'concat' - PCA on concatenation of all datasets
* 'single' - Concatenation of PCA on datasets, respectively
* 'random' - Random draw from a Normal(0,1)
:type initx: ['concat'|'single'|'random']
:param initz: initialisation method for inducing inputs
:type initz: 'permute'|'random'
:param num_inducing: number of inducing inputs to use
:param Z: initial inducing inputs
:param kernel: list of kernels or kernel to copy for each output
:type kernel: [GPy.kernels.kernels] | GPy.kernels.kernels | None (default)
:param :class:`~GPy.inference.latent_function_inference inference_method:
InferenceMethodList of inferences, or one inference method for all
:param :class:`~GPy.likelihoodss.likelihoods.likelihoods` likelihoods: the likelihoods to use
:param str name: the name of this model
:param [str] Ynames: the names for the datasets given, must be of equal length as Ylist or None
:param bool|Norm normalizer: How to normalize the data?
:param bool stochastic: Should this model be using stochastic gradient descent over the dimensions?
:param bool|[bool] batchsize: either one batchsize for all, or one batchsize per dataset.
"""
def __init__(self, Ylist, input_dim, X=None, X_variance=None,
initx = 'PCA', initz = 'permute',
num_inducing=10, Z=None, kernel=None,
inference_method=None, likelihoods=None, name='mrd',
Ynames=None, normalizer=False, stochastic=False, batchsize=10):
self.logger = logging.getLogger(self.__class__.__name__)
self.input_dim = input_dim
self.num_inducing = num_inducing
if isinstance(Ylist, dict):
Ynames, Ylist = zip(*Ylist.items())
self.logger.debug("creating observable arrays")
self.Ylist = [ObsAr(Y) for Y in Ylist]
#The next line is a fix for Python 3. It replicates the python 2 behaviour from the above comprehension
Y = Ylist[-1]
if Ynames is None:
self.logger.debug("creating Ynames")
Ynames = ['Y{}'.format(i) for i in range(len(Ylist))]
self.names = Ynames
assert len(self.names) == len(self.Ylist), "one name per dataset, or None if Ylist is a dict"
if inference_method is None:
self.inference_method = InferenceMethodList([VarDTC() for _ in range(len(self.Ylist))])
else:
assert isinstance(inference_method, InferenceMethodList), "please provide one inference method per Y in the list and provide it as InferenceMethodList, inference_method given: {}".format(inference_method)
self.inference_method = inference_method
if X is None:
X, fracs = self._init_X(initx, Ylist)
else:
fracs = [X.var(0)]*len(Ylist)
Z = self._init_Z(initz, X)
self.Z = Param('inducing inputs', Z)
self.num_inducing = self.Z.shape[0] # ensure M==N if M>N
# sort out the kernels
self.logger.info("building kernels")
if kernel is None:
from ..kern import RBF
kernels = [RBF(input_dim, ARD=1, lengthscale=1./fracs[i]) for i in range(len(Ylist))]
elif isinstance(kernel, Kern):
kernels = []
for i in range(len(Ylist)):
k = kernel.copy()
kernels.append(k)
else:
assert len(kernel) == len(Ylist), "need one kernel per output"
assert all([isinstance(k, Kern) for k in kernel]), "invalid kernel object detected!"
kernels = kernel
self.variational_prior = NormalPrior()
#self.X = NormalPosterior(X, X_variance)
if likelihoods is None:
likelihoods = [Gaussian(name='Gaussian_noise'.format(i)) for i in range(len(Ylist))]
else: likelihoods = likelihoods
self.logger.info("adding X and Z")
super(MRD, self).__init__(Y, input_dim, X=X, X_variance=X_variance, num_inducing=num_inducing,
Z=self.Z, kernel=None, inference_method=self.inference_method, likelihood=Gaussian(),
name='manifold relevance determination', normalizer=None,
missing_data=False, stochastic=False, batchsize=1)
self._log_marginal_likelihood = 0
self.unlink_parameter(self.likelihood)
self.unlink_parameter(self.kern)
del self.kern
del self.likelihood
self.num_data = Ylist[0].shape[0]
if isinstance(batchsize, int):
batchsize = itertools.repeat(batchsize)
self.bgplvms = []
for i, n, k, l, Y, im, bs in zip(itertools.count(), Ynames, kernels, likelihoods, Ylist, self.inference_method, batchsize):
assert Y.shape[0] == self.num_data, "All datasets need to share the number of datapoints, and those have to correspond to one another"
md = np.isnan(Y).any()
spgp = BayesianGPLVMMiniBatch(Y, input_dim, X, X_variance,
Z=Z, kernel=k, likelihood=l,
inference_method=im, name=n,
normalizer=normalizer,
missing_data=md,
stochastic=stochastic,
batchsize=bs)
spgp.kl_factr = 1./len(Ynames)
spgp.unlink_parameter(spgp.Z)
spgp.unlink_parameter(spgp.X)
del spgp.Z
del spgp.X
spgp.Z = self.Z
spgp.X = self.X
self.link_parameter(spgp, i+2)
self.bgplvms.append(spgp)
self.posterior = None
self.logger.info("init done")
def parameters_changed(self):
self._log_marginal_likelihood = 0
self.Z.gradient[:] = 0.
self.X.gradient[:] = 0.
for b, i in zip(self.bgplvms, self.inference_method):
self._log_marginal_likelihood += b._log_marginal_likelihood
self.logger.info('working on im <{}>'.format(hex(id(i))))
self.Z.gradient[:] += b.Z.gradient#full_values['Zgrad']
#grad_dict = b.full_values
if self.has_uncertain_inputs():
self.X.gradient += b._Xgrad
else:
self.X.gradient += b._Xgrad
#if self.has_uncertain_inputs():
# # update for the KL divergence
# self.variational_prior.update_gradients_KL(self.X)
# self._log_marginal_likelihood -= self.variational_prior.KL_divergence(self.X)
# pass
def log_likelihood(self):
return self._log_marginal_likelihood
def _init_X(self, init='PCA', Ylist=None):
if Ylist is None:
Ylist = self.Ylist
if init in "PCA_concat":
X, fracs = initialize_latent('PCA', self.input_dim, np.hstack(Ylist))
fracs = [fracs]*len(Ylist)
elif init in "PCA_single":
X = np.zeros((Ylist[0].shape[0], self.input_dim))
fracs = []
for qs, Y in zip(np.array_split(np.arange(self.input_dim), len(Ylist)), Ylist):
x,frcs = initialize_latent('PCA', len(qs), Y)
X[:, qs] = x
fracs.append(frcs)
else: # init == 'random':
X = np.random.randn(Ylist[0].shape[0], self.input_dim)
fracs = X.var(0)
fracs = [fracs]*len(Ylist)
X -= X.mean()
X /= X.std()
return X, fracs
def _init_Z(self, init="permute", X=None):
if X is None:
X = self.X
if init in "permute":
Z = np.random.permutation(X.copy())[:self.num_inducing]
elif init in "random":
Z = np.random.randn(self.num_inducing, self.input_dim) * X.var()
return Z
def _handle_plotting(self, fignum, axes, plotf, sharex=False, sharey=False):
import matplotlib.pyplot as plt
if axes is None:
fig = plt.figure(num=fignum)
sharex_ax = None
sharey_ax = None
plots = []
for i, g in enumerate(self.bgplvms):
try:
if sharex:
sharex_ax = ax # @UndefinedVariable
sharex = False # dont set twice
if sharey:
sharey_ax = ax # @UndefinedVariable
sharey = False # dont set twice
except:
pass
if axes is None:
ax = fig.add_subplot(1, len(self.bgplvms), i + 1, sharex=sharex_ax, sharey=sharey_ax)
elif isinstance(axes, (tuple, list, np.ndarray)):
ax = axes[i]
else:
raise ValueError("Need one axes per latent dimension input_dim")
plots.append(plotf(i, g, ax))
if sharey_ax is not None:
plt.setp(ax.get_yticklabels(), visible=False)
plt.draw()
if axes is None:
try:
fig.tight_layout()
except:
pass
return plots
def predict(self, Xnew, full_cov=False, Y_metadata=None, kern=None, Yindex=0):
"""
Prediction for data set Yindex[default=0].
This predicts the output mean and variance for the dataset given in Ylist[Yindex]
"""
b = self.bgplvms[Yindex]
self.posterior = b.posterior
self.kern = b.kern
self.likelihood = b.likelihood
return super(MRD, self).predict(Xnew, full_cov, Y_metadata, kern)
#===============================================================================
# TODO: Predict! Maybe even change to several bgplvms, which share an X?
#===============================================================================
# def plot_predict(self, fignum=None, ax=None, sharex=False, sharey=False, **kwargs):
# fig = self._handle_plotting(fignum,
# ax,
# lambda i, g, ax: ax.imshow(g.predict(g.X)[0], **kwargs),
# sharex=sharex, sharey=sharey)
# return fig
def plot_scales(self, fignum=None, ax=None, titles=None, sharex=False, sharey=True, *args, **kwargs):
"""
TODO: Explain other parameters
:param titles: titles for axes of datasets
"""
if titles is None:
titles = [r'${}$'.format(name) for name in self.names]
ymax = reduce(max, [np.ceil(max(g.kern.input_sensitivity())) for g in self.bgplvms])
def plotf(i, g, ax):
#ax.set_ylim([0,ymax])
return g.kern.plot_ARD(ax=ax, title=titles[i], *args, **kwargs)
fig = self._handle_plotting(fignum, ax, plotf, sharex=sharex, sharey=sharey)
return fig
def plot_latent(self, labels=None, which_indices=None,
resolution=50, ax=None, marker='o', s=40,
fignum=None, plot_inducing=True, legend=True,
plot_limits=None,
aspect='auto', updates=False, predict_kwargs={}, imshow_kwargs={}):
"""
see plotting.matplot_dep.dim_reduction_plots.plot_latent
if predict_kwargs is None, will plot latent spaces for 0th dataset (and kernel), otherwise give
predict_kwargs=dict(Yindex='index') for plotting only the latent space of dataset with 'index'.
"""
import sys
assert "matplotlib" in sys.modules, "matplotlib package has not been imported."
from matplotlib import pyplot as plt
from ..plotting.matplot_dep import dim_reduction_plots
if "Yindex" not in predict_kwargs:
predict_kwargs['Yindex'] = 0
Yindex = predict_kwargs['Yindex']
if ax is None:
fig = plt.figure(num=fignum)
ax = fig.add_subplot(111)
else:
fig = ax.figure
self.kern = self.bgplvms[Yindex].kern
self.likelihood = self.bgplvms[Yindex].likelihood
plot = dim_reduction_plots.plot_latent(self, labels, which_indices,
resolution, ax, marker, s,
fignum, plot_inducing, legend,
plot_limits, aspect, updates, predict_kwargs, imshow_kwargs)
ax.set_title(self.bgplvms[Yindex].name)
try:
fig.tight_layout()
except:
pass
return plot
def __getstate__(self):
state = super(MRD, self).__getstate__()
if 'kern' in state:
del state['kern']
if 'likelihood' in state:
del state['likelihood']
return state
def __setstate__(self, state):
# TODO:
super(MRD, self).__setstate__(state)
self.kern = self.bgplvms[0].kern
self.likelihood = self.bgplvms[0].likelihood
self.parameters_changed()
| bsd-3-clause |
koverholt/bayes-fire | Example_Cases/Correlation_Fire_Location/Scripts/pymc_heat_flux_localization_fds_1000kW.py | 1 | 1027 | #!/usr/bin/env python
import matplotlib
matplotlib.use("Agg")
import pylab as pl
import pymc as mc
import models
import graphics
import data_fds_1000kW
# Generate model
vars = models.point_source_radiation_fds(1000, data_fds_1000kW)
# Fit model with MAP estimates
map = mc.MAP(vars)
map.fit(method='fmin_powell', verbose=2)
# Import model variables and set database options
m = mc.MCMC(
vars,
db='sqlite',
dbname='../Figures/heat_flux_localization_fds_1000kW.sqlite')
# Configure and run MCMC simulation
m.sample(iter=50000, burn=25000, thin=10)
# Plot results
pl.figure()
graphics.plot_ps_radiation_model(m)
pl.savefig('../Figures/heat_flux_localization_fds_1000kW.pdf')
# Plot results in a 3D grid
pl.figure()
graphics.plot_3d_hist(m)
pl.savefig('../Figures/heat_flux_localization_fds_1000kW_3d.pdf')
# Plot resulting distributions and convergence diagnostics
mc.Matplot.plot(m,
format='pdf',
path='../Figures/heat_flux_localization_fds_1000kW')
m.summary()
| bsd-3-clause |
MohammedWasim/scikit-learn | examples/preprocessing/plot_robust_scaling.py | 221 | 2702 | #!/usr/bin/python
# -*- coding: utf-8 -*-
"""
=========================================================
Robust Scaling on Toy Data
=========================================================
Making sure that each Feature has approximately the same scale can be a
crucial preprocessing step. However, when data contains outliers,
:class:`StandardScaler <sklearn.preprocessing.StandardScaler>` can often
be mislead. In such cases, it is better to use a scaler that is robust
against outliers.
Here, we demonstrate this on a toy dataset, where one single datapoint
is a large outlier.
"""
from __future__ import print_function
print(__doc__)
# Code source: Thomas Unterthiner
# License: BSD 3 clause
import matplotlib.pyplot as plt
import numpy as np
from sklearn.preprocessing import StandardScaler, RobustScaler
# Create training and test data
np.random.seed(42)
n_datapoints = 100
Cov = [[0.9, 0.0], [0.0, 20.0]]
mu1 = [100.0, -3.0]
mu2 = [101.0, -3.0]
X1 = np.random.multivariate_normal(mean=mu1, cov=Cov, size=n_datapoints)
X2 = np.random.multivariate_normal(mean=mu2, cov=Cov, size=n_datapoints)
Y_train = np.hstack([[-1]*n_datapoints, [1]*n_datapoints])
X_train = np.vstack([X1, X2])
X1 = np.random.multivariate_normal(mean=mu1, cov=Cov, size=n_datapoints)
X2 = np.random.multivariate_normal(mean=mu2, cov=Cov, size=n_datapoints)
Y_test = np.hstack([[-1]*n_datapoints, [1]*n_datapoints])
X_test = np.vstack([X1, X2])
X_train[0, 0] = -1000 # a fairly large outlier
# Scale data
standard_scaler = StandardScaler()
Xtr_s = standard_scaler.fit_transform(X_train)
Xte_s = standard_scaler.transform(X_test)
robust_scaler = RobustScaler()
Xtr_r = robust_scaler.fit_transform(X_train)
Xte_r = robust_scaler.fit_transform(X_test)
# Plot data
fig, ax = plt.subplots(1, 3, figsize=(12, 4))
ax[0].scatter(X_train[:, 0], X_train[:, 1],
color=np.where(Y_train > 0, 'r', 'b'))
ax[1].scatter(Xtr_s[:, 0], Xtr_s[:, 1], color=np.where(Y_train > 0, 'r', 'b'))
ax[2].scatter(Xtr_r[:, 0], Xtr_r[:, 1], color=np.where(Y_train > 0, 'r', 'b'))
ax[0].set_title("Unscaled data")
ax[1].set_title("After standard scaling (zoomed in)")
ax[2].set_title("After robust scaling (zoomed in)")
# for the scaled data, we zoom in to the data center (outlier can't be seen!)
for a in ax[1:]:
a.set_xlim(-3, 3)
a.set_ylim(-3, 3)
plt.tight_layout()
plt.show()
# Classify using k-NN
from sklearn.neighbors import KNeighborsClassifier
knn = KNeighborsClassifier()
knn.fit(Xtr_s, Y_train)
acc_s = knn.score(Xte_s, Y_test)
print("Testset accuracy using standard scaler: %.3f" % acc_s)
knn.fit(Xtr_r, Y_train)
acc_r = knn.score(Xte_r, Y_test)
print("Testset accuracy using robust scaler: %.3f" % acc_r)
| bsd-3-clause |
466152112/scikit-learn | examples/applications/svm_gui.py | 287 | 11161 | """
==========
Libsvm GUI
==========
A simple graphical frontend for Libsvm mainly intended for didactic
purposes. You can create data points by point and click and visualize
the decision region induced by different kernels and parameter settings.
To create positive examples click the left mouse button; to create
negative examples click the right button.
If all examples are from the same class, it uses a one-class SVM.
"""
from __future__ import division, print_function
print(__doc__)
# Author: Peter Prettenhoer <[email protected]>
#
# License: BSD 3 clause
import matplotlib
matplotlib.use('TkAgg')
from matplotlib.backends.backend_tkagg import FigureCanvasTkAgg
from matplotlib.backends.backend_tkagg import NavigationToolbar2TkAgg
from matplotlib.figure import Figure
from matplotlib.contour import ContourSet
import Tkinter as Tk
import sys
import numpy as np
from sklearn import svm
from sklearn.datasets import dump_svmlight_file
from sklearn.externals.six.moves import xrange
y_min, y_max = -50, 50
x_min, x_max = -50, 50
class Model(object):
"""The Model which hold the data. It implements the
observable in the observer pattern and notifies the
registered observers on change event.
"""
def __init__(self):
self.observers = []
self.surface = None
self.data = []
self.cls = None
self.surface_type = 0
def changed(self, event):
"""Notify the observers. """
for observer in self.observers:
observer.update(event, self)
def add_observer(self, observer):
"""Register an observer. """
self.observers.append(observer)
def set_surface(self, surface):
self.surface = surface
def dump_svmlight_file(self, file):
data = np.array(self.data)
X = data[:, 0:2]
y = data[:, 2]
dump_svmlight_file(X, y, file)
class Controller(object):
def __init__(self, model):
self.model = model
self.kernel = Tk.IntVar()
self.surface_type = Tk.IntVar()
# Whether or not a model has been fitted
self.fitted = False
def fit(self):
print("fit the model")
train = np.array(self.model.data)
X = train[:, 0:2]
y = train[:, 2]
C = float(self.complexity.get())
gamma = float(self.gamma.get())
coef0 = float(self.coef0.get())
degree = int(self.degree.get())
kernel_map = {0: "linear", 1: "rbf", 2: "poly"}
if len(np.unique(y)) == 1:
clf = svm.OneClassSVM(kernel=kernel_map[self.kernel.get()],
gamma=gamma, coef0=coef0, degree=degree)
clf.fit(X)
else:
clf = svm.SVC(kernel=kernel_map[self.kernel.get()], C=C,
gamma=gamma, coef0=coef0, degree=degree)
clf.fit(X, y)
if hasattr(clf, 'score'):
print("Accuracy:", clf.score(X, y) * 100)
X1, X2, Z = self.decision_surface(clf)
self.model.clf = clf
self.model.set_surface((X1, X2, Z))
self.model.surface_type = self.surface_type.get()
self.fitted = True
self.model.changed("surface")
def decision_surface(self, cls):
delta = 1
x = np.arange(x_min, x_max + delta, delta)
y = np.arange(y_min, y_max + delta, delta)
X1, X2 = np.meshgrid(x, y)
Z = cls.decision_function(np.c_[X1.ravel(), X2.ravel()])
Z = Z.reshape(X1.shape)
return X1, X2, Z
def clear_data(self):
self.model.data = []
self.fitted = False
self.model.changed("clear")
def add_example(self, x, y, label):
self.model.data.append((x, y, label))
self.model.changed("example_added")
# update decision surface if already fitted.
self.refit()
def refit(self):
"""Refit the model if already fitted. """
if self.fitted:
self.fit()
class View(object):
"""Test docstring. """
def __init__(self, root, controller):
f = Figure()
ax = f.add_subplot(111)
ax.set_xticks([])
ax.set_yticks([])
ax.set_xlim((x_min, x_max))
ax.set_ylim((y_min, y_max))
canvas = FigureCanvasTkAgg(f, master=root)
canvas.show()
canvas.get_tk_widget().pack(side=Tk.TOP, fill=Tk.BOTH, expand=1)
canvas._tkcanvas.pack(side=Tk.TOP, fill=Tk.BOTH, expand=1)
canvas.mpl_connect('button_press_event', self.onclick)
toolbar = NavigationToolbar2TkAgg(canvas, root)
toolbar.update()
self.controllbar = ControllBar(root, controller)
self.f = f
self.ax = ax
self.canvas = canvas
self.controller = controller
self.contours = []
self.c_labels = None
self.plot_kernels()
def plot_kernels(self):
self.ax.text(-50, -60, "Linear: $u^T v$")
self.ax.text(-20, -60, "RBF: $\exp (-\gamma \| u-v \|^2)$")
self.ax.text(10, -60, "Poly: $(\gamma \, u^T v + r)^d$")
def onclick(self, event):
if event.xdata and event.ydata:
if event.button == 1:
self.controller.add_example(event.xdata, event.ydata, 1)
elif event.button == 3:
self.controller.add_example(event.xdata, event.ydata, -1)
def update_example(self, model, idx):
x, y, l = model.data[idx]
if l == 1:
color = 'w'
elif l == -1:
color = 'k'
self.ax.plot([x], [y], "%so" % color, scalex=0.0, scaley=0.0)
def update(self, event, model):
if event == "examples_loaded":
for i in xrange(len(model.data)):
self.update_example(model, i)
if event == "example_added":
self.update_example(model, -1)
if event == "clear":
self.ax.clear()
self.ax.set_xticks([])
self.ax.set_yticks([])
self.contours = []
self.c_labels = None
self.plot_kernels()
if event == "surface":
self.remove_surface()
self.plot_support_vectors(model.clf.support_vectors_)
self.plot_decision_surface(model.surface, model.surface_type)
self.canvas.draw()
def remove_surface(self):
"""Remove old decision surface."""
if len(self.contours) > 0:
for contour in self.contours:
if isinstance(contour, ContourSet):
for lineset in contour.collections:
lineset.remove()
else:
contour.remove()
self.contours = []
def plot_support_vectors(self, support_vectors):
"""Plot the support vectors by placing circles over the
corresponding data points and adds the circle collection
to the contours list."""
cs = self.ax.scatter(support_vectors[:, 0], support_vectors[:, 1],
s=80, edgecolors="k", facecolors="none")
self.contours.append(cs)
def plot_decision_surface(self, surface, type):
X1, X2, Z = surface
if type == 0:
levels = [-1.0, 0.0, 1.0]
linestyles = ['dashed', 'solid', 'dashed']
colors = 'k'
self.contours.append(self.ax.contour(X1, X2, Z, levels,
colors=colors,
linestyles=linestyles))
elif type == 1:
self.contours.append(self.ax.contourf(X1, X2, Z, 10,
cmap=matplotlib.cm.bone,
origin='lower', alpha=0.85))
self.contours.append(self.ax.contour(X1, X2, Z, [0.0], colors='k',
linestyles=['solid']))
else:
raise ValueError("surface type unknown")
class ControllBar(object):
def __init__(self, root, controller):
fm = Tk.Frame(root)
kernel_group = Tk.Frame(fm)
Tk.Radiobutton(kernel_group, text="Linear", variable=controller.kernel,
value=0, command=controller.refit).pack(anchor=Tk.W)
Tk.Radiobutton(kernel_group, text="RBF", variable=controller.kernel,
value=1, command=controller.refit).pack(anchor=Tk.W)
Tk.Radiobutton(kernel_group, text="Poly", variable=controller.kernel,
value=2, command=controller.refit).pack(anchor=Tk.W)
kernel_group.pack(side=Tk.LEFT)
valbox = Tk.Frame(fm)
controller.complexity = Tk.StringVar()
controller.complexity.set("1.0")
c = Tk.Frame(valbox)
Tk.Label(c, text="C:", anchor="e", width=7).pack(side=Tk.LEFT)
Tk.Entry(c, width=6, textvariable=controller.complexity).pack(
side=Tk.LEFT)
c.pack()
controller.gamma = Tk.StringVar()
controller.gamma.set("0.01")
g = Tk.Frame(valbox)
Tk.Label(g, text="gamma:", anchor="e", width=7).pack(side=Tk.LEFT)
Tk.Entry(g, width=6, textvariable=controller.gamma).pack(side=Tk.LEFT)
g.pack()
controller.degree = Tk.StringVar()
controller.degree.set("3")
d = Tk.Frame(valbox)
Tk.Label(d, text="degree:", anchor="e", width=7).pack(side=Tk.LEFT)
Tk.Entry(d, width=6, textvariable=controller.degree).pack(side=Tk.LEFT)
d.pack()
controller.coef0 = Tk.StringVar()
controller.coef0.set("0")
r = Tk.Frame(valbox)
Tk.Label(r, text="coef0:", anchor="e", width=7).pack(side=Tk.LEFT)
Tk.Entry(r, width=6, textvariable=controller.coef0).pack(side=Tk.LEFT)
r.pack()
valbox.pack(side=Tk.LEFT)
cmap_group = Tk.Frame(fm)
Tk.Radiobutton(cmap_group, text="Hyperplanes",
variable=controller.surface_type, value=0,
command=controller.refit).pack(anchor=Tk.W)
Tk.Radiobutton(cmap_group, text="Surface",
variable=controller.surface_type, value=1,
command=controller.refit).pack(anchor=Tk.W)
cmap_group.pack(side=Tk.LEFT)
train_button = Tk.Button(fm, text='Fit', width=5,
command=controller.fit)
train_button.pack()
fm.pack(side=Tk.LEFT)
Tk.Button(fm, text='Clear', width=5,
command=controller.clear_data).pack(side=Tk.LEFT)
def get_parser():
from optparse import OptionParser
op = OptionParser()
op.add_option("--output",
action="store", type="str", dest="output",
help="Path where to dump data.")
return op
def main(argv):
op = get_parser()
opts, args = op.parse_args(argv[1:])
root = Tk.Tk()
model = Model()
controller = Controller(model)
root.wm_title("Scikit-learn Libsvm GUI")
view = View(root, controller)
model.add_observer(view)
Tk.mainloop()
if opts.output:
model.dump_svmlight_file(opts.output)
if __name__ == "__main__":
main(sys.argv)
| bsd-3-clause |
smmribeiro/intellij-community | python/helpers/pydev/_pydevd_bundle/pydevd_xml.py | 1 | 13225 | """Contains methods for building XML structures for interacting with IDE
The methods from this file are used for the debugger interaction. Please note
that Python console now uses Thrift structures with the similar methods
contained in `pydevd_thrift.py` file.
"""
import sys
import traceback
from _pydev_bundle import pydev_log
from _pydev_bundle.pydev_imports import quote
from _pydevd_bundle import pydevd_extension_utils
from _pydevd_bundle import pydevd_resolver
from _pydevd_bundle.pydevd_constants import dict_iter_items, dict_keys, IS_PY3K, \
MAXIMUM_VARIABLE_REPRESENTATION_SIZE, RETURN_VALUES_DICT, LOAD_VALUES_POLICY, DEFAULT_VALUES_DICT
from _pydevd_bundle.pydevd_extension_api import TypeResolveProvider, StrPresentationProvider
from _pydevd_bundle.pydevd_utils import take_first_n_coll_elements, is_numeric_container, is_pandas_container, is_string, pandas_to_str, \
should_evaluate_full_value, should_evaluate_shape
try:
import types
frame_type = types.FrameType
except:
frame_type = None
def make_valid_xml_value(s):
# Same thing as xml.sax.saxutils.escape but also escaping double quotes.
return s.replace("&", "&").replace('<', '<').replace('>', '>').replace('"', '"')
class ExceptionOnEvaluate:
def __init__(self, result):
self.result = result
_IS_JYTHON = sys.platform.startswith("java")
def _create_default_type_map():
if not _IS_JYTHON:
default_type_map = [
# None means that it should not be treated as a compound variable
# isintance does not accept a tuple on some versions of python, so, we must declare it expanded
(type(None), None,),
(int, None),
(float, None),
(complex, None),
(str, None),
(tuple, pydevd_resolver.tupleResolver),
(list, pydevd_resolver.tupleResolver),
(dict, pydevd_resolver.dictResolver),
]
try:
default_type_map.append((long, None)) # @UndefinedVariable
except:
pass # not available on all python versions
try:
default_type_map.append((unicode, None)) # @UndefinedVariable
except:
pass # not available on all python versions
try:
default_type_map.append((set, pydevd_resolver.setResolver))
except:
pass # not available on all python versions
try:
default_type_map.append((frozenset, pydevd_resolver.setResolver))
except:
pass # not available on all python versions
try:
from django.utils.datastructures import MultiValueDict
default_type_map.insert(0, (MultiValueDict, pydevd_resolver.multiValueDictResolver))
# we should put it before dict
except:
pass # django may not be installed
try:
from django.forms import BaseForm
default_type_map.insert(0, (BaseForm, pydevd_resolver.djangoFormResolver))
# we should put it before instance resolver
except:
pass # django may not be installed
try:
from collections import deque
default_type_map.append((deque, pydevd_resolver.dequeResolver))
except:
pass
try:
from collections import OrderedDict
default_type_map.insert(0, (OrderedDict, pydevd_resolver.orderedDictResolver))
# we should put it before dict
except:
pass
if frame_type is not None:
default_type_map.append((frame_type, pydevd_resolver.frameResolver))
else:
from org.python import core # @UnresolvedImport
default_type_map = [
(core.PyNone, None),
(core.PyInteger, None),
(core.PyLong, None),
(core.PyFloat, None),
(core.PyComplex, None),
(core.PyString, None),
(core.PyTuple, pydevd_resolver.tupleResolver),
(core.PyList, pydevd_resolver.tupleResolver),
(core.PyDictionary, pydevd_resolver.dictResolver),
(core.PyStringMap, pydevd_resolver.dictResolver),
]
if hasattr(core, 'PyJavaInstance'):
# Jython 2.5b3 removed it.
default_type_map.append((core.PyJavaInstance, pydevd_resolver.instanceResolver))
return default_type_map
class TypeResolveHandler(object):
NO_PROVIDER = [] # Sentinel value (any mutable object to be used as a constant would be valid).
def __init__(self):
# Note: don't initialize with the types we already know about so that the extensions can override
# the default resolvers that are already available if they want.
self._type_to_resolver_cache = {}
self._type_to_str_provider_cache = {}
self._initialized = False
def _initialize(self):
self._default_type_map = _create_default_type_map()
self._resolve_providers = pydevd_extension_utils.extensions_of_type(TypeResolveProvider)
self._str_providers = pydevd_extension_utils.extensions_of_type(StrPresentationProvider)
self._initialized = True
def get_type(self, o):
try:
try:
# Faster than type(o) as we don't need the function call.
type_object = o.__class__
except:
# Not all objects have __class__ (i.e.: there are bad bindings around).
type_object = type(o)
type_name = type_object.__name__
except:
# This happens for org.python.core.InitModule
return 'Unable to get Type', 'Unable to get Type', None
return self._get_type(o, type_object, type_name)
def _get_type(self, o, type_object, type_name):
resolver = self._type_to_resolver_cache.get(type_object)
if resolver is not None:
return type_object, type_name, resolver
if not self._initialized:
self._initialize()
try:
for resolver in self._resolve_providers:
if resolver.can_provide(type_object, type_name):
# Cache it
self._type_to_resolver_cache[type_object] = resolver
return type_object, type_name, resolver
for t in self._default_type_map:
if isinstance(o, t[0]):
# Cache it
resolver = t[1]
self._type_to_resolver_cache[type_object] = resolver
return (type_object, type_name, resolver)
except:
traceback.print_exc()
# No match return default (and cache it).
resolver = pydevd_resolver.defaultResolver
self._type_to_resolver_cache[type_object] = resolver
return type_object, type_name, resolver
if _IS_JYTHON:
_base_get_type = _get_type
def _get_type(self, o, type_object, type_name):
if type_name == 'org.python.core.PyJavaInstance':
return type_object, type_name, pydevd_resolver.instanceResolver
if type_name == 'org.python.core.PyArray':
return type_object, type_name, pydevd_resolver.jyArrayResolver
return self._base_get_type(o, type_name, type_name)
def str_from_providers(self, o, type_object, type_name):
provider = self._type_to_str_provider_cache.get(type_object)
if provider is self.NO_PROVIDER:
return None
if provider is not None:
return provider.get_str(o)
if not self._initialized:
self._initialize()
for provider in self._str_providers:
if provider.can_provide(type_object, type_name):
self._type_to_str_provider_cache[type_object] = provider
return provider.get_str(o)
self._type_to_str_provider_cache[type_object] = self.NO_PROVIDER
return None
_TYPE_RESOLVE_HANDLER = TypeResolveHandler()
"""
def get_type(o):
Receives object and returns a triple (typeObject, typeString, resolver).
resolver != None means that variable is a container, and should be displayed as a hierarchy.
Use the resolver to get its attributes.
All container objects should have a resolver.
"""
get_type = _TYPE_RESOLVE_HANDLER.get_type
_str_from_providers = _TYPE_RESOLVE_HANDLER.str_from_providers
def frame_vars_to_xml(frame_f_locals, hidden_ns=None):
""" dumps frame variables to XML
<var name="var_name" scope="local" type="type" value="value"/>
"""
xml = ""
keys = dict_keys(frame_f_locals)
if hasattr(keys, 'sort'):
keys.sort() # Python 3.0 does not have it
else:
keys = sorted(keys) # Jython 2.1 does not have it
return_values_xml = ''
for k in keys:
try:
v = frame_f_locals[k]
eval_full_val = should_evaluate_full_value(v)
if k == RETURN_VALUES_DICT:
for name, val in dict_iter_items(v):
return_values_xml += var_to_xml(val, name, additional_in_xml=' isRetVal="True"')
else:
if hidden_ns is not None and k in hidden_ns:
xml += var_to_xml(v, str(k), additional_in_xml=' isIPythonHidden="True"',
evaluate_full_value=eval_full_val)
else:
xml += var_to_xml(v, str(k), evaluate_full_value=eval_full_val)
except Exception:
traceback.print_exc()
pydev_log.error("Unexpected error, recovered safely.\n")
# Show return values as the first entry.
return return_values_xml + xml
def var_to_xml(val, name, doTrim=True, additional_in_xml='', evaluate_full_value=True, format='%s'):
""" single variable or dictionary to xml representation """
try:
# This should be faster than isinstance (but we have to protect against not having a '__class__' attribute).
is_exception_on_eval = val.__class__ == ExceptionOnEvaluate
except:
is_exception_on_eval = False
if is_exception_on_eval:
v = val.result
else:
v = val
_type, typeName, resolver = get_type(v)
type_qualifier = getattr(_type, "__module__", "")
if not evaluate_full_value:
value = DEFAULT_VALUES_DICT[LOAD_VALUES_POLICY]
else:
try:
str_from_provider = _str_from_providers(v, _type, typeName)
if str_from_provider is not None:
value = str_from_provider
elif hasattr(v, '__class__'):
if v.__class__ == frame_type:
value = pydevd_resolver.frameResolver.get_frame_name(v)
elif v.__class__ in (list, tuple, set, frozenset, dict):
if len(v) > pydevd_resolver.MAX_ITEMS_TO_HANDLE:
value = '%s' % take_first_n_coll_elements(v, pydevd_resolver.MAX_ITEMS_TO_HANDLE)
value = value.rstrip(')]}') + '...'
else:
value = '%s' % v
else:
value = format % v
else:
value = str(v)
except:
try:
value = repr(v)
except:
value = 'Unable to get repr for %s' % v.__class__
try:
name = quote(name, '/>_= ') # TODO: Fix PY-5834 without using quote
except:
pass
xml = '<var name="%s" type="%s" ' % (make_valid_xml_value(name), make_valid_xml_value(typeName))
if type_qualifier:
xml_qualifier = 'qualifier="%s"' % make_valid_xml_value(type_qualifier)
else:
xml_qualifier = ''
# cannot be too big... communication may not handle it.
if len(value) > MAXIMUM_VARIABLE_REPRESENTATION_SIZE and doTrim:
value = value[0:MAXIMUM_VARIABLE_REPRESENTATION_SIZE]
value += '...'
# fix to work with unicode values
try:
if not IS_PY3K:
if value.__class__ == unicode: # @UndefinedVariable
value = value.encode('utf-8')
else:
if value.__class__ == bytes:
value = value.encode('utf-8')
except TypeError: # in java, unicode is a function
pass
if is_pandas_container(type_qualifier, typeName, v):
value = pandas_to_str(v, typeName, value, pydevd_resolver.MAX_ITEMS_TO_HANDLE)
xml_value = ' value="%s"' % (make_valid_xml_value(quote(value, '/>_= ')))
xml_shape = ''
try:
if should_evaluate_shape():
if is_numeric_container(type_qualifier, typeName, v):
xml_shape = ' shape="%s"' % make_valid_xml_value(str(v.shape))
elif hasattr(v, '__len__') and not is_string(v):
xml_shape = ' shape="%s"' % make_valid_xml_value("%s" % str(len(v)))
except:
pass
if is_exception_on_eval:
xml_container = ' isErrorOnEval="True"'
else:
if resolver is not None:
xml_container = ' isContainer="True"'
else:
xml_container = ''
return ''.join((xml, xml_qualifier, xml_value, xml_container, xml_shape, additional_in_xml, ' />\n'))
| apache-2.0 |
rvraghav93/scikit-learn | examples/linear_model/plot_ols.py | 74 | 2047 | #!/usr/bin/python
# -*- coding: utf-8 -*-
"""
=========================================================
Linear Regression Example
=========================================================
This example uses the only the first feature of the `diabetes` dataset, in
order to illustrate a two-dimensional plot of this regression technique. The
straight line can be seen in the plot, showing how linear regression attempts
to draw a straight line that will best minimize the residual sum of squares
between the observed responses in the dataset, and the responses predicted by
the linear approximation.
The coefficients, the residual sum of squares and the variance score are also
calculated.
"""
print(__doc__)
# Code source: Jaques Grobler
# License: BSD 3 clause
import matplotlib.pyplot as plt
import numpy as np
from sklearn import datasets, linear_model
from sklearn.metrics import mean_squared_error, r2_score
# Load the diabetes dataset
diabetes = datasets.load_diabetes()
# Use only one feature
diabetes_X = diabetes.data[:, np.newaxis, 2]
# Split the data into training/testing sets
diabetes_X_train = diabetes_X[:-20]
diabetes_X_test = diabetes_X[-20:]
# Split the targets into training/testing sets
diabetes_y_train = diabetes.target[:-20]
diabetes_y_test = diabetes.target[-20:]
# Create linear regression object
regr = linear_model.LinearRegression()
# Train the model using the training sets
regr.fit(diabetes_X_train, diabetes_y_train)
# Make predictions using the testing set
diabetes_y_pred = regr.predict(diabetes_X_test)
# The coefficients
print('Coefficients: \n', regr.coef_)
# The mean squared error
print("Mean squared error: %.2f"
% mean_squared_error(diabetes_y_test, diabetes_y_pred))
# Explained variance score: 1 is perfect prediction
print('Variance score: %.2f' % r2_score(diabetes_y_test, diabetes_y_pred))
# Plot outputs
plt.scatter(diabetes_X_test, diabetes_y_test, color='black')
plt.plot(diabetes_X_test, diabetes_y_pred, color='blue', linewidth=3)
plt.xticks(())
plt.yticks(())
plt.show()
| bsd-3-clause |
rseubert/scikit-learn | sklearn/metrics/cluster/unsupervised.py | 8 | 8105 | """ Unsupervised evaluation metrics. """
# Authors: Robert Layton <[email protected]>
#
# License: BSD 3 clause
import numpy as np
from ...utils import check_random_state
from ..pairwise import pairwise_distances
def silhouette_score(X, labels, metric='euclidean', sample_size=None,
random_state=None, **kwds):
"""Compute the mean Silhouette Coefficient of all samples.
The Silhouette Coefficient is calculated using the mean intra-cluster
distance (``a``) and the mean nearest-cluster distance (``b``) for each
sample. The Silhouette Coefficient for a sample is ``(b - a) / max(a,
b)``. To clarify, ``b`` is the distance between a sample and the nearest
cluster that the sample is not a part of.
Note that Silhouette Coefficent is only defined if number of labels
is 2 <= n_labels <= n_samples - 1.
This function returns the mean Silhouette Coefficient over all samples.
To obtain the values for each sample, use :func:`silhouette_samples`.
The best value is 1 and the worst value is -1. Values near 0 indicate
overlapping clusters. Negative values generally indicate that a sample has
been assigned to the wrong cluster, as a different cluster is more similar.
Parameters
----------
X : array [n_samples_a, n_samples_a] if metric == "precomputed", or, \
[n_samples_a, n_features] otherwise
Array of pairwise distances between samples, or a feature array.
labels : array, shape = [n_samples]
Predicted labels for each sample.
metric : string, or callable
The metric to use when calculating distance between instances in a
feature array. If metric is a string, it must be one of the options
allowed by :func:`metrics.pairwise.pairwise_distances
<sklearn.metrics.pairwise.pairwise_distances>`. If X is the distance
array itself, use ``metric="precomputed"``.
sample_size : int or None
The size of the sample to use when computing the Silhouette
Coefficient. If ``sample_size is None``, no sampling is used.
random_state : integer or numpy.RandomState, optional
The generator used to initialize the centers. If an integer is
given, it fixes the seed. Defaults to the global numpy random
number generator.
`**kwds` : optional keyword parameters
Any further parameters are passed directly to the distance function.
If using a scipy.spatial.distance metric, the parameters are still
metric dependent. See the scipy docs for usage examples.
Returns
-------
silhouette : float
Mean Silhouette Coefficient for all samples.
References
----------
.. [1] `Peter J. Rousseeuw (1987). "Silhouettes: a Graphical Aid to the
Interpretation and Validation of Cluster Analysis". Computational
and Applied Mathematics 20: 53-65.
<http://www.sciencedirect.com/science/article/pii/0377042787901257>`_
.. [2] `Wikipedia entry on the Silhouette Coefficient
<http://en.wikipedia.org/wiki/Silhouette_(clustering)>`_
"""
n_labels = len(np.unique(labels))
n_samples = X.shape[0]
if not 2 <= n_labels <= n_samples-1:
raise ValueError("Number of labels is %d "
"but should be more than 2"
"and less than n_samples - 1" % n_labels)
if sample_size is not None:
random_state = check_random_state(random_state)
indices = random_state.permutation(X.shape[0])[:sample_size]
if metric == "precomputed":
X, labels = X[indices].T[indices].T, labels[indices]
else:
X, labels = X[indices], labels[indices]
return np.mean(silhouette_samples(X, labels, metric=metric, **kwds))
def silhouette_samples(X, labels, metric='euclidean', **kwds):
"""Compute the Silhouette Coefficient for each sample.
The Silhouette Coefficient is a measure of how well samples are clustered
with samples that are similar to themselves. Clustering models with a high
Silhouette Coefficient are said to be dense, where samples in the same
cluster are similar to each other, and well separated, where samples in
different clusters are not very similar to each other.
The Silhouette Coefficient is calculated using the mean intra-cluster
distance (``a``) and the mean nearest-cluster distance (``b``) for each
sample. The Silhouette Coefficient for a sample is ``(b - a) / max(a,
b)``.
Note that Silhouette Coefficent is only defined if number of labels
is 2 <= n_labels <= n_samples - 1.
This function returns the Silhouette Coefficient for each sample.
The best value is 1 and the worst value is -1. Values near 0 indicate
overlapping clusters.
Parameters
----------
X : array [n_samples_a, n_samples_a] if metric == "precomputed", or, \
[n_samples_a, n_features] otherwise
Array of pairwise distances between samples, or a feature array.
labels : array, shape = [n_samples]
label values for each sample
metric : string, or callable
The metric to use when calculating distance between instances in a
feature array. If metric is a string, it must be one of the options
allowed by :func:`sklearn.metrics.pairwise.pairwise_distances`. If X is
the distance array itself, use "precomputed" as the metric.
`**kwds` : optional keyword parameters
Any further parameters are passed directly to the distance function.
If using a ``scipy.spatial.distance`` metric, the parameters are still
metric dependent. See the scipy docs for usage examples.
Returns
-------
silhouette : array, shape = [n_samples]
Silhouette Coefficient for each samples.
References
----------
.. [1] `Peter J. Rousseeuw (1987). "Silhouettes: a Graphical Aid to the
Interpretation and Validation of Cluster Analysis". Computational
and Applied Mathematics 20: 53-65.
<http://www.sciencedirect.com/science/article/pii/0377042787901257>`_
.. [2] `Wikipedia entry on the Silhouette Coefficient
<http://en.wikipedia.org/wiki/Silhouette_(clustering)>`_
"""
distances = pairwise_distances(X, metric=metric, **kwds)
n = labels.shape[0]
A = np.array([_intra_cluster_distance(distances[i], labels, i)
for i in range(n)])
B = np.array([_nearest_cluster_distance(distances[i], labels, i)
for i in range(n)])
sil_samples = (B - A) / np.maximum(A, B)
# nan values are for clusters of size 1, and should be 0
return np.nan_to_num(sil_samples)
def _intra_cluster_distance(distances_row, labels, i):
"""Calculate the mean intra-cluster distance for sample i.
Parameters
----------
distances_row : array, shape = [n_samples]
Pairwise distance matrix between sample i and each sample.
labels : array, shape = [n_samples]
label values for each sample
i : int
Sample index being calculated. It is excluded from calculation and
used to determine the current label
Returns
-------
a : float
Mean intra-cluster distance for sample i
"""
mask = labels == labels[i]
mask[i] = False
a = np.mean(distances_row[mask])
return a
def _nearest_cluster_distance(distances_row, labels, i):
"""Calculate the mean nearest-cluster distance for sample i.
Parameters
----------
distances_row : array, shape = [n_samples]
Pairwise distance matrix between sample i and each sample.
labels : array, shape = [n_samples]
label values for each sample
i : int
Sample index being calculated. It is used to determine the current
label.
Returns
-------
b : float
Mean nearest-cluster distance for sample i
"""
label = labels[i]
b = np.min([np.mean(distances_row[labels == cur_label])
for cur_label in set(labels) if not cur_label == label])
return b
| bsd-3-clause |
vamsirajendra/nupic | external/linux32/lib/python2.6/site-packages/matplotlib/mlab.py | 69 | 104273 | """
Numerical python functions written for compatability with matlab(TM)
commands with the same names.
Matlab(TM) compatible functions
-------------------------------
:func:`cohere`
Coherence (normalized cross spectral density)
:func:`csd`
Cross spectral density uing Welch's average periodogram
:func:`detrend`
Remove the mean or best fit line from an array
:func:`find`
Return the indices where some condition is true;
numpy.nonzero is similar but more general.
:func:`griddata`
interpolate irregularly distributed data to a
regular grid.
:func:`prctile`
find the percentiles of a sequence
:func:`prepca`
Principal Component Analysis
:func:`psd`
Power spectral density uing Welch's average periodogram
:func:`rk4`
A 4th order runge kutta integrator for 1D or ND systems
:func:`specgram`
Spectrogram (power spectral density over segments of time)
Miscellaneous functions
-------------------------
Functions that don't exist in matlab(TM), but are useful anyway:
:meth:`cohere_pairs`
Coherence over all pairs. This is not a matlab function, but we
compute coherence a lot in my lab, and we compute it for a lot of
pairs. This function is optimized to do this efficiently by
caching the direct FFTs.
:meth:`rk4`
A 4th order Runge-Kutta ODE integrator in case you ever find
yourself stranded without scipy (and the far superior
scipy.integrate tools)
record array helper functions
-------------------------------
A collection of helper methods for numpyrecord arrays
.. _htmlonly::
See :ref:`misc-examples-index`
:meth:`rec2txt`
pretty print a record array
:meth:`rec2csv`
store record array in CSV file
:meth:`csv2rec`
import record array from CSV file with type inspection
:meth:`rec_append_fields`
adds field(s)/array(s) to record array
:meth:`rec_drop_fields`
drop fields from record array
:meth:`rec_join`
join two record arrays on sequence of fields
:meth:`rec_groupby`
summarize data by groups (similar to SQL GROUP BY)
:meth:`rec_summarize`
helper code to filter rec array fields into new fields
For the rec viewer functions(e rec2csv), there are a bunch of Format
objects you can pass into the functions that will do things like color
negative values red, set percent formatting and scaling, etc.
Example usage::
r = csv2rec('somefile.csv', checkrows=0)
formatd = dict(
weight = FormatFloat(2),
change = FormatPercent(2),
cost = FormatThousands(2),
)
rec2excel(r, 'test.xls', formatd=formatd)
rec2csv(r, 'test.csv', formatd=formatd)
scroll = rec2gtk(r, formatd=formatd)
win = gtk.Window()
win.set_size_request(600,800)
win.add(scroll)
win.show_all()
gtk.main()
Deprecated functions
---------------------
The following are deprecated; please import directly from numpy (with
care--function signatures may differ):
:meth:`conv`
convolution (numpy.convolve)
:meth:`corrcoef`
The matrix of correlation coefficients
:meth:`hist`
Histogram (numpy.histogram)
:meth:`linspace`
Linear spaced array from min to max
:meth:`load`
load ASCII file - use numpy.loadtxt
:meth:`meshgrid`
Make a 2D grid from 2 1 arrays (numpy.meshgrid)
:meth:`polyfit`
least squares best polynomial fit of x to y (numpy.polyfit)
:meth:`polyval`
evaluate a vector for a vector of polynomial coeffs (numpy.polyval)
:meth:`save`
save ASCII file - use numpy.savetxt
:meth:`trapz`
trapeziodal integration (trapz(x,y) -> numpy.trapz(y,x))
:meth:`vander`
the Vandermonde matrix (numpy.vander)
"""
from __future__ import division
import csv, warnings, copy, os
import numpy as np
ma = np.ma
from matplotlib import verbose
import matplotlib.nxutils as nxutils
import matplotlib.cbook as cbook
# set is a new builtin function in 2.4; delete the following when
# support for 2.3 is dropped.
try:
set
except NameError:
from sets import Set as set
def linspace(*args, **kw):
warnings.warn("use numpy.linspace", DeprecationWarning)
return np.linspace(*args, **kw)
def meshgrid(x,y):
warnings.warn("use numpy.meshgrid", DeprecationWarning)
return np.meshgrid(x,y)
def mean(x, dim=None):
warnings.warn("Use numpy.mean(x) or x.mean()", DeprecationWarning)
if len(x)==0: return None
return np.mean(x, axis=dim)
def logspace(xmin,xmax,N):
return np.exp(np.linspace(np.log(xmin), np.log(xmax), N))
def _norm(x):
"return sqrt(x dot x)"
return np.sqrt(np.dot(x,x))
def window_hanning(x):
"return x times the hanning window of len(x)"
return np.hanning(len(x))*x
def window_none(x):
"No window function; simply return x"
return x
#from numpy import convolve as conv
def conv(x, y, mode=2):
'convolve x with y'
warnings.warn("Use numpy.convolve(x, y, mode='full')", DeprecationWarning)
return np.convolve(x,y,mode)
def detrend(x, key=None):
if key is None or key=='constant':
return detrend_mean(x)
elif key=='linear':
return detrend_linear(x)
def demean(x, axis=0):
"Return x minus its mean along the specified axis"
x = np.asarray(x)
if axis:
ind = [slice(None)] * axis
ind.append(np.newaxis)
return x - x.mean(axis)[ind]
return x - x.mean(axis)
def detrend_mean(x):
"Return x minus the mean(x)"
return x - x.mean()
def detrend_none(x):
"Return x: no detrending"
return x
def detrend_linear(y):
"Return y minus best fit line; 'linear' detrending "
# This is faster than an algorithm based on linalg.lstsq.
x = np.arange(len(y), dtype=np.float_)
C = np.cov(x, y, bias=1)
b = C[0,1]/C[0,0]
a = y.mean() - b*x.mean()
return y - (b*x + a)
#This is a helper function that implements the commonality between the
#psd, csd, and spectrogram. It is *NOT* meant to be used outside of mlab
def _spectral_helper(x, y, NFFT=256, Fs=2, detrend=detrend_none,
window=window_hanning, noverlap=0, pad_to=None, sides='default',
scale_by_freq=None):
#The checks for if y is x are so that we can use the same function to
#implement the core of psd(), csd(), and spectrogram() without doing
#extra calculations. We return the unaveraged Pxy, freqs, and t.
same_data = y is x
#Make sure we're dealing with a numpy array. If y and x were the same
#object to start with, keep them that way
x = np.asarray(x)
if not same_data:
y = np.asarray(y)
# zero pad x and y up to NFFT if they are shorter than NFFT
if len(x)<NFFT:
n = len(x)
x = np.resize(x, (NFFT,))
x[n:] = 0
if not same_data and len(y)<NFFT:
n = len(y)
y = np.resize(y, (NFFT,))
y[n:] = 0
if pad_to is None:
pad_to = NFFT
if scale_by_freq is None:
warnings.warn("psd, csd, and specgram have changed to scale their "
"densities by the sampling frequency for better MatLab "
"compatibility. You can pass scale_by_freq=False to disable "
"this behavior. Also, one-sided densities are scaled by a "
"factor of 2.")
scale_by_freq = True
# For real x, ignore the negative frequencies unless told otherwise
if (sides == 'default' and np.iscomplexobj(x)) or sides == 'twosided':
numFreqs = pad_to
scaling_factor = 1.
elif sides in ('default', 'onesided'):
numFreqs = pad_to//2 + 1
scaling_factor = 2.
else:
raise ValueError("sides must be one of: 'default', 'onesided', or "
"'twosided'")
# Matlab divides by the sampling frequency so that density function
# has units of dB/Hz and can be integrated by the plotted frequency
# values. Perform the same scaling here.
if scale_by_freq:
scaling_factor /= Fs
if cbook.iterable(window):
assert(len(window) == NFFT)
windowVals = window
else:
windowVals = window(np.ones((NFFT,), x.dtype))
step = NFFT - noverlap
ind = np.arange(0, len(x) - NFFT + 1, step)
n = len(ind)
Pxy = np.zeros((numFreqs,n), np.complex_)
# do the ffts of the slices
for i in range(n):
thisX = x[ind[i]:ind[i]+NFFT]
thisX = windowVals * detrend(thisX)
fx = np.fft.fft(thisX, n=pad_to)
if same_data:
fy = fx
else:
thisY = y[ind[i]:ind[i]+NFFT]
thisY = windowVals * detrend(thisY)
fy = np.fft.fft(thisY, n=pad_to)
Pxy[:,i] = np.conjugate(fx[:numFreqs]) * fy[:numFreqs]
# Scale the spectrum by the norm of the window to compensate for
# windowing loss; see Bendat & Piersol Sec 11.5.2. Also include
# scaling factors for one-sided densities and dividing by the sampling
# frequency, if desired.
Pxy *= scaling_factor / (np.abs(windowVals)**2).sum()
t = 1./Fs * (ind + NFFT / 2.)
freqs = float(Fs) / pad_to * np.arange(numFreqs)
return Pxy, freqs, t
#Split out these keyword docs so that they can be used elsewhere
kwdocd = dict()
kwdocd['PSD'] ="""
Keyword arguments:
*NFFT*: integer
The number of data points used in each block for the FFT.
Must be even; a power 2 is most efficient. The default value is 256.
*Fs*: scalar
The sampling frequency (samples per time unit). It is used
to calculate the Fourier frequencies, freqs, in cycles per time
unit. The default value is 2.
*detrend*: callable
The function applied to each segment before fft-ing,
designed to remove the mean or linear trend. Unlike in
matlab, where the *detrend* parameter is a vector, in
matplotlib is it a function. The :mod:`~matplotlib.pylab`
module defines :func:`~matplotlib.pylab.detrend_none`,
:func:`~matplotlib.pylab.detrend_mean`, and
:func:`~matplotlib.pylab.detrend_linear`, but you can use
a custom function as well.
*window*: callable or ndarray
A function or a vector of length *NFFT*. To create window
vectors see :func:`window_hanning`, :func:`window_none`,
:func:`numpy.blackman`, :func:`numpy.hamming`,
:func:`numpy.bartlett`, :func:`scipy.signal`,
:func:`scipy.signal.get_window`, etc. The default is
:func:`window_hanning`. If a function is passed as the
argument, it must take a data segment as an argument and
return the windowed version of the segment.
*noverlap*: integer
The number of points of overlap between blocks. The default value
is 0 (no overlap).
*pad_to*: integer
The number of points to which the data segment is padded when
performing the FFT. This can be different from *NFFT*, which
specifies the number of data points used. While not increasing
the actual resolution of the psd (the minimum distance between
resolvable peaks), this can give more points in the plot,
allowing for more detail. This corresponds to the *n* parameter
in the call to fft(). The default is None, which sets *pad_to*
equal to *NFFT*
*sides*: [ 'default' | 'onesided' | 'twosided' ]
Specifies which sides of the PSD to return. Default gives the
default behavior, which returns one-sided for real data and both
for complex data. 'onesided' forces the return of a one-sided PSD,
while 'twosided' forces two-sided.
*scale_by_freq*: boolean
Specifies whether the resulting density values should be scaled
by the scaling frequency, which gives density in units of Hz^-1.
This allows for integration over the returned frequency values.
The default is True for MatLab compatibility.
"""
def psd(x, NFFT=256, Fs=2, detrend=detrend_none, window=window_hanning,
noverlap=0, pad_to=None, sides='default', scale_by_freq=None):
"""
The power spectral density by Welch's average periodogram method.
The vector *x* is divided into *NFFT* length blocks. Each block
is detrended by the function *detrend* and windowed by the function
*window*. *noverlap* gives the length of the overlap between blocks.
The absolute(fft(block))**2 of each segment are averaged to compute
*Pxx*, with a scaling to correct for power loss due to windowing.
If len(*x*) < *NFFT*, it will be zero padded to *NFFT*.
*x*
Array or sequence containing the data
%(PSD)s
Returns the tuple (*Pxx*, *freqs*).
Refs:
Bendat & Piersol -- Random Data: Analysis and Measurement
Procedures, John Wiley & Sons (1986)
"""
Pxx,freqs = csd(x, x, NFFT, Fs, detrend, window, noverlap, pad_to, sides,
scale_by_freq)
return Pxx.real,freqs
psd.__doc__ = psd.__doc__ % kwdocd
def csd(x, y, NFFT=256, Fs=2, detrend=detrend_none, window=window_hanning,
noverlap=0, pad_to=None, sides='default', scale_by_freq=None):
"""
The cross power spectral density by Welch's average periodogram
method. The vectors *x* and *y* are divided into *NFFT* length
blocks. Each block is detrended by the function *detrend* and
windowed by the function *window*. *noverlap* gives the length
of the overlap between blocks. The product of the direct FFTs
of *x* and *y* are averaged over each segment to compute *Pxy*,
with a scaling to correct for power loss due to windowing.
If len(*x*) < *NFFT* or len(*y*) < *NFFT*, they will be zero
padded to *NFFT*.
*x*, *y*
Array or sequence containing the data
%(PSD)s
Returns the tuple (*Pxy*, *freqs*).
Refs:
Bendat & Piersol -- Random Data: Analysis and Measurement
Procedures, John Wiley & Sons (1986)
"""
Pxy, freqs, t = _spectral_helper(x, y, NFFT, Fs, detrend, window,
noverlap, pad_to, sides, scale_by_freq)
if len(Pxy.shape) == 2 and Pxy.shape[1]>1:
Pxy = Pxy.mean(axis=1)
return Pxy, freqs
csd.__doc__ = csd.__doc__ % kwdocd
def specgram(x, NFFT=256, Fs=2, detrend=detrend_none, window=window_hanning,
noverlap=128, pad_to=None, sides='default', scale_by_freq=None):
"""
Compute a spectrogram of data in *x*. Data are split into *NFFT*
length segements and the PSD of each section is computed. The
windowing function *window* is applied to each segment, and the
amount of overlap of each segment is specified with *noverlap*.
If *x* is real (i.e. non-complex) only the spectrum of the positive
frequencie is returned. If *x* is complex then the complete
spectrum is returned.
%(PSD)s
Returns a tuple (*Pxx*, *freqs*, *t*):
- *Pxx*: 2-D array, columns are the periodograms of
successive segments
- *freqs*: 1-D array of frequencies corresponding to the rows
in Pxx
- *t*: 1-D array of times corresponding to midpoints of
segments.
.. seealso::
:func:`psd`:
:func:`psd` differs in the default overlap; in returning
the mean of the segment periodograms; and in not returning
times.
"""
assert(NFFT > noverlap)
Pxx, freqs, t = _spectral_helper(x, x, NFFT, Fs, detrend, window,
noverlap, pad_to, sides, scale_by_freq)
Pxx = Pxx.real #Needed since helper implements generically
if (np.iscomplexobj(x) and sides == 'default') or sides == 'twosided':
# center the frequency range at zero
freqs = np.concatenate((freqs[NFFT/2:]-Fs,freqs[:NFFT/2]))
Pxx = np.concatenate((Pxx[NFFT/2:,:],Pxx[:NFFT/2,:]),0)
return Pxx, freqs, t
specgram.__doc__ = specgram.__doc__ % kwdocd
_coh_error = """Coherence is calculated by averaging over *NFFT*
length segments. Your signal is too short for your choice of *NFFT*.
"""
def cohere(x, y, NFFT=256, Fs=2, detrend=detrend_none, window=window_hanning,
noverlap=0, pad_to=None, sides='default', scale_by_freq=None):
"""
The coherence between *x* and *y*. Coherence is the normalized
cross spectral density:
.. math::
C_{xy} = \\frac{|P_{xy}|^2}{P_{xx}P_{yy}}
*x*, *y*
Array or sequence containing the data
%(PSD)s
The return value is the tuple (*Cxy*, *f*), where *f* are the
frequencies of the coherence vector. For cohere, scaling the
individual densities by the sampling frequency has no effect, since
the factors cancel out.
.. seealso::
:func:`psd` and :func:`csd`:
For information about the methods used to compute
:math:`P_{xy}`, :math:`P_{xx}` and :math:`P_{yy}`.
"""
if len(x)<2*NFFT:
raise ValueError(_coh_error)
Pxx, f = psd(x, NFFT, Fs, detrend, window, noverlap, pad_to, sides,
scale_by_freq)
Pyy, f = psd(y, NFFT, Fs, detrend, window, noverlap, pad_to, sides,
scale_by_freq)
Pxy, f = csd(x, y, NFFT, Fs, detrend, window, noverlap, pad_to, sides,
scale_by_freq)
Cxy = np.divide(np.absolute(Pxy)**2, Pxx*Pyy)
Cxy.shape = (len(f),)
return Cxy, f
cohere.__doc__ = cohere.__doc__ % kwdocd
def corrcoef(*args):
"""
corrcoef(*X*) where *X* is a matrix returns a matrix of correlation
coefficients for the columns of *X*
corrcoef(*x*, *y*) where *x* and *y* are vectors returns the matrix of
correlation coefficients for *x* and *y*.
Numpy arrays can be real or complex.
The correlation matrix is defined from the covariance matrix *C*
as
.. math::
r_{ij} = \\frac{C_{ij}}{\\sqrt{C_{ii}C_{jj}}}
"""
warnings.warn("Use numpy.corrcoef", DeprecationWarning)
kw = dict(rowvar=False)
return np.corrcoef(*args, **kw)
def polyfit(*args, **kwargs):
u"""
polyfit(*x*, *y*, *N*)
Do a best fit polynomial of order *N* of *y* to *x*. Return value
is a vector of polynomial coefficients [pk ... p1 p0]. Eg, for
*N*=2::
p2*x0^2 + p1*x0 + p0 = y1
p2*x1^2 + p1*x1 + p0 = y1
p2*x2^2 + p1*x2 + p0 = y2
.....
p2*xk^2 + p1*xk + p0 = yk
Method: if *X* is a the Vandermonde Matrix computed from *x* (see
`vandermonds
<http://mathworld.wolfram.com/VandermondeMatrix.html>`_), then the
polynomial least squares solution is given by the '*p*' in
X*p = y
where *X* is a (len(*x*) \N{MULTIPLICATION SIGN} *N* + 1) matrix,
*p* is a *N*+1 length vector, and *y* is a (len(*x*)
\N{MULTIPLICATION SIGN} 1) vector.
This equation can be solved as
.. math::
p = (X_t X)^-1 X_t y
where :math:`X_t` is the transpose of *X* and -1 denotes the
inverse. Numerically, however, this is not a good method, so we
use :func:`numpy.linalg.lstsq`.
For more info, see `least squares fitting
<http://mathworld.wolfram.com/LeastSquaresFittingPolynomial.html>`_,
but note that the *k*'s and *n*'s in the superscripts and
subscripts on that page. The linear algebra is correct, however.
.. seealso::
:func:`polyval`
"""
warnings.warn("use numpy.poyfit", DeprecationWarning)
return np.polyfit(*args, **kwargs)
def polyval(*args, **kwargs):
"""
*y* = polyval(*p*, *x*)
*p* is a vector of polynomial coeffients and *y* is the polynomial
evaluated at *x*.
Example code to remove a polynomial (quadratic) trend from y::
p = polyfit(x, y, 2)
trend = polyval(p, x)
resid = y - trend
.. seealso::
:func:`polyfit`
"""
warnings.warn("use numpy.polyval", DeprecationWarning)
return np.polyval(*args, **kwargs)
def vander(*args, **kwargs):
"""
*X* = vander(*x*, *N* = *None*)
The Vandermonde matrix of vector *x*. The *i*-th column of *X* is the
the *i*-th power of *x*. *N* is the maximum power to compute; if *N* is
*None* it defaults to len(*x*).
"""
warnings.warn("Use numpy.vander()", DeprecationWarning)
return np.vander(*args, **kwargs)
def donothing_callback(*args):
pass
def cohere_pairs( X, ij, NFFT=256, Fs=2, detrend=detrend_none,
window=window_hanning, noverlap=0,
preferSpeedOverMemory=True,
progressCallback=donothing_callback,
returnPxx=False):
u"""
Cxy, Phase, freqs = cohere_pairs(X, ij, ...)
Compute the coherence for all pairs in *ij*. *X* is a
(*numSamples*, *numCols*) numpy array. *ij* is a list of tuples
(*i*, *j*). Each tuple is a pair of indexes into the columns of *X*
for which you want to compute coherence. For example, if *X* has 64
columns, and you want to compute all nonredundant pairs, define *ij*
as::
ij = []
for i in range(64):
for j in range(i+1,64):
ij.append( (i, j) )
The other function arguments, except for *preferSpeedOverMemory*
(see below), are explained in the help string of :func:`psd`.
Return value is a tuple (*Cxy*, *Phase*, *freqs*).
- *Cxy*: a dictionary of (*i*, *j*) tuples -> coherence vector for that
pair. I.e., ``Cxy[(i,j)] = cohere(X[:,i], X[:,j])``. Number of
dictionary keys is ``len(ij)``.
- *Phase*: a dictionary of phases of the cross spectral density at
each frequency for each pair. The keys are ``(i,j)``.
- *freqs*: a vector of frequencies, equal in length to either
the coherence or phase vectors for any (*i*, *j*) key.. Eg,
to make a coherence Bode plot::
subplot(211)
plot( freqs, Cxy[(12,19)])
subplot(212)
plot( freqs, Phase[(12,19)])
For a large number of pairs, :func:`cohere_pairs` can be much more
efficient than just calling :func:`cohere` for each pair, because
it caches most of the intensive computations. If *N* is the
number of pairs, this function is O(N) for most of the heavy
lifting, whereas calling cohere for each pair is
O(N\N{SUPERSCRIPT TWO}). However, because of the caching, it is
also more memory intensive, making 2 additional complex arrays
with approximately the same number of elements as *X*.
The parameter *preferSpeedOverMemory*, if *False*, limits the
caching by only making one, rather than two, complex cache arrays.
This is useful if memory becomes critical. Even when
*preferSpeedOverMemory* is *False*, :func:`cohere_pairs` will
still give significant performace gains over calling
:func:`cohere` for each pair, and will use subtantially less
memory than if *preferSpeedOverMemory* is *True*. In my tests
with a (43000, 64) array over all non-redundant pairs,
*preferSpeedOverMemory* = *True* delivered a 33% performace boost
on a 1.7GHZ Athlon with 512MB RAM compared with
*preferSpeedOverMemory* = *False*. But both solutions were more
than 10x faster than naievly crunching all possible pairs through
cohere.
.. seealso::
:file:`test/cohere_pairs_test.py` in the src tree:
For an example script that shows that this
:func:`cohere_pairs` and :func:`cohere` give the same
results for a given pair.
"""
numRows, numCols = X.shape
# zero pad if X is too short
if numRows < NFFT:
tmp = X
X = np.zeros( (NFFT, numCols), X.dtype)
X[:numRows,:] = tmp
del tmp
numRows, numCols = X.shape
# get all the columns of X that we are interested in by checking
# the ij tuples
seen = {}
for i,j in ij:
seen[i]=1; seen[j] = 1
allColumns = seen.keys()
Ncols = len(allColumns)
del seen
# for real X, ignore the negative frequencies
if np.iscomplexobj(X): numFreqs = NFFT
else: numFreqs = NFFT//2+1
# cache the FFT of every windowed, detrended NFFT length segement
# of every channel. If preferSpeedOverMemory, cache the conjugate
# as well
if cbook.iterable(window):
assert(len(window) == NFFT)
windowVals = window
else:
windowVals = window(np.ones((NFFT,), typecode(X)))
ind = range(0, numRows-NFFT+1, NFFT-noverlap)
numSlices = len(ind)
FFTSlices = {}
FFTConjSlices = {}
Pxx = {}
slices = range(numSlices)
normVal = norm(windowVals)**2
for iCol in allColumns:
progressCallback(i/Ncols, 'Cacheing FFTs')
Slices = np.zeros( (numSlices,numFreqs), dtype=np.complex_)
for iSlice in slices:
thisSlice = X[ind[iSlice]:ind[iSlice]+NFFT, iCol]
thisSlice = windowVals*detrend(thisSlice)
Slices[iSlice,:] = fft(thisSlice)[:numFreqs]
FFTSlices[iCol] = Slices
if preferSpeedOverMemory:
FFTConjSlices[iCol] = conjugate(Slices)
Pxx[iCol] = np.divide(np.mean(absolute(Slices)**2), normVal)
del Slices, ind, windowVals
# compute the coherences and phases for all pairs using the
# cached FFTs
Cxy = {}
Phase = {}
count = 0
N = len(ij)
for i,j in ij:
count +=1
if count%10==0:
progressCallback(count/N, 'Computing coherences')
if preferSpeedOverMemory:
Pxy = FFTSlices[i] * FFTConjSlices[j]
else:
Pxy = FFTSlices[i] * np.conjugate(FFTSlices[j])
if numSlices>1: Pxy = np.mean(Pxy)
Pxy = np.divide(Pxy, normVal)
Cxy[(i,j)] = np.divide(np.absolute(Pxy)**2, Pxx[i]*Pxx[j])
Phase[(i,j)] = np.arctan2(Pxy.imag, Pxy.real)
freqs = Fs/NFFT*np.arange(numFreqs)
if returnPxx:
return Cxy, Phase, freqs, Pxx
else:
return Cxy, Phase, freqs
def entropy(y, bins):
r"""
Return the entropy of the data in *y*.
.. math::
\sum p_i \log_2(p_i)
where :math:`p_i` is the probability of observing *y* in the
:math:`i^{th}` bin of *bins*. *bins* can be a number of bins or a
range of bins; see :func:`numpy.histogram`.
Compare *S* with analytic calculation for a Gaussian::
x = mu + sigma * randn(200000)
Sanalytic = 0.5 * ( 1.0 + log(2*pi*sigma**2.0) )
"""
n,bins = np.histogram(y, bins)
n = n.astype(np.float_)
n = np.take(n, np.nonzero(n)[0]) # get the positive
p = np.divide(n, len(y))
delta = bins[1]-bins[0]
S = -1.0*np.sum(p*log(p)) + log(delta)
#S = -1.0*np.sum(p*log(p))
return S
def hist(y, bins=10, normed=0):
"""
Return the histogram of *y* with *bins* equally sized bins. If
bins is an array, use those bins. Return value is (*n*, *x*)
where *n* is the count for each bin in *x*.
If *normed* is *False*, return the counts in the first element of
the returned tuple. If *normed* is *True*, return the probability
density :math:`\\frac{n}{(len(y)\mathrm{dbin}}`.
If *y* has rank > 1, it will be raveled. If *y* is masked, only the
unmasked values will be used.
Credits: the Numeric 22 documentation
"""
warnings.warn("Use numpy.histogram()", DeprecationWarning)
return np.histogram(y, bins=bins, range=None, normed=normed)
def normpdf(x, *args):
"Return the normal pdf evaluated at *x*; args provides *mu*, *sigma*"
mu, sigma = args
return 1./(np.sqrt(2*np.pi)*sigma)*np.exp(-0.5 * (1./sigma*(x - mu))**2)
def levypdf(x, gamma, alpha):
"Returm the levy pdf evaluated at *x* for params *gamma*, *alpha*"
N = len(x)
if N%2 != 0:
raise ValueError, 'x must be an event length array; try\n' + \
'x = np.linspace(minx, maxx, N), where N is even'
dx = x[1]-x[0]
f = 1/(N*dx)*np.arange(-N/2, N/2, np.float_)
ind = np.concatenate([np.arange(N/2, N, int),
np.arange(0, N/2, int)])
df = f[1]-f[0]
cfl = exp(-gamma*np.absolute(2*pi*f)**alpha)
px = np.fft.fft(np.take(cfl,ind)*df).astype(np.float_)
return np.take(px, ind)
def find(condition):
"Return the indices where ravel(condition) is true"
res, = np.nonzero(np.ravel(condition))
return res
def trapz(x, y):
"""
Trapezoidal integral of *y*(*x*).
"""
warnings.warn("Use numpy.trapz(y,x) instead of trapz(x,y)", DeprecationWarning)
return np.trapz(y, x)
#if len(x)!=len(y):
# raise ValueError, 'x and y must have the same length'
#if len(x)<2:
# raise ValueError, 'x and y must have > 1 element'
#return np.sum(0.5*np.diff(x)*(y[1:]+y[:-1]))
def longest_contiguous_ones(x):
"""
Return the indices of the longest stretch of contiguous ones in *x*,
assuming *x* is a vector of zeros and ones. If there are two
equally long stretches, pick the first.
"""
x = np.ravel(x)
if len(x)==0:
return np.array([])
ind = (x==0).nonzero()[0]
if len(ind)==0:
return np.arange(len(x))
if len(ind)==len(x):
return np.array([])
y = np.zeros( (len(x)+2,), x.dtype)
y[1:-1] = x
dif = np.diff(y)
up = (dif == 1).nonzero()[0];
dn = (dif == -1).nonzero()[0];
i = (dn-up == max(dn - up)).nonzero()[0][0]
ind = np.arange(up[i], dn[i])
return ind
def longest_ones(x):
'''alias for longest_contiguous_ones'''
return longest_contiguous_ones(x)
def prepca(P, frac=0):
"""
Compute the principal components of *P*. *P* is a (*numVars*,
*numObs*) array. *frac* is the minimum fraction of variance that a
component must contain to be included.
Return value is a tuple of the form (*Pcomponents*, *Trans*,
*fracVar*) where:
- *Pcomponents* : a (numVars, numObs) array
- *Trans* : the weights matrix, ie, *Pcomponents* = *Trans* *
*P*
- *fracVar* : the fraction of the variance accounted for by each
component returned
A similar function of the same name was in the Matlab (TM)
R13 Neural Network Toolbox but is not found in later versions;
its successor seems to be called "processpcs".
"""
U,s,v = np.linalg.svd(P)
varEach = s**2/P.shape[1]
totVar = varEach.sum()
fracVar = varEach/totVar
ind = slice((fracVar>=frac).sum())
# select the components that are greater
Trans = U[:,ind].transpose()
# The transformed data
Pcomponents = np.dot(Trans,P)
return Pcomponents, Trans, fracVar[ind]
def prctile(x, p = (0.0, 25.0, 50.0, 75.0, 100.0)):
"""
Return the percentiles of *x*. *p* can either be a sequence of
percentile values or a scalar. If *p* is a sequence, the ith
element of the return sequence is the *p*(i)-th percentile of *x*.
If *p* is a scalar, the largest value of *x* less than or equal to
the *p* percentage point in the sequence is returned.
"""
x = np.array(x).ravel() # we need a copy
x.sort()
Nx = len(x)
if not cbook.iterable(p):
return x[int(p*Nx/100.0)]
p = np.asarray(p)* Nx/100.0
ind = p.astype(int)
ind = np.where(ind>=Nx, Nx-1, ind)
return x.take(ind)
def prctile_rank(x, p):
"""
Return the rank for each element in *x*, return the rank
0..len(*p*). Eg if *p* = (25, 50, 75), the return value will be a
len(*x*) array with values in [0,1,2,3] where 0 indicates the
value is less than the 25th percentile, 1 indicates the value is
>= the 25th and < 50th percentile, ... and 3 indicates the value
is above the 75th percentile cutoff.
*p* is either an array of percentiles in [0..100] or a scalar which
indicates how many quantiles of data you want ranked.
"""
if not cbook.iterable(p):
p = np.arange(100.0/p, 100.0, 100.0/p)
else:
p = np.asarray(p)
if p.max()<=1 or p.min()<0 or p.max()>100:
raise ValueError('percentiles should be in range 0..100, not 0..1')
ptiles = prctile(x, p)
return np.searchsorted(ptiles, x)
def center_matrix(M, dim=0):
"""
Return the matrix *M* with each row having zero mean and unit std.
If *dim* = 1 operate on columns instead of rows. (*dim* is
opposite to the numpy axis kwarg.)
"""
M = np.asarray(M, np.float_)
if dim:
M = (M - M.mean(axis=0)) / M.std(axis=0)
else:
M = (M - M.mean(axis=1)[:,np.newaxis])
M = M / M.std(axis=1)[:,np.newaxis]
return M
def rk4(derivs, y0, t):
"""
Integrate 1D or ND system of ODEs using 4-th order Runge-Kutta.
This is a toy implementation which may be useful if you find
yourself stranded on a system w/o scipy. Otherwise use
:func:`scipy.integrate`.
*y0*
initial state vector
*t*
sample times
*derivs*
returns the derivative of the system and has the
signature ``dy = derivs(yi, ti)``
Example 1 ::
## 2D system
def derivs6(x,t):
d1 = x[0] + 2*x[1]
d2 = -3*x[0] + 4*x[1]
return (d1, d2)
dt = 0.0005
t = arange(0.0, 2.0, dt)
y0 = (1,2)
yout = rk4(derivs6, y0, t)
Example 2::
## 1D system
alpha = 2
def derivs(x,t):
return -alpha*x + exp(-t)
y0 = 1
yout = rk4(derivs, y0, t)
If you have access to scipy, you should probably be using the
scipy.integrate tools rather than this function.
"""
try: Ny = len(y0)
except TypeError:
yout = np.zeros( (len(t),), np.float_)
else:
yout = np.zeros( (len(t), Ny), np.float_)
yout[0] = y0
i = 0
for i in np.arange(len(t)-1):
thist = t[i]
dt = t[i+1] - thist
dt2 = dt/2.0
y0 = yout[i]
k1 = np.asarray(derivs(y0, thist))
k2 = np.asarray(derivs(y0 + dt2*k1, thist+dt2))
k3 = np.asarray(derivs(y0 + dt2*k2, thist+dt2))
k4 = np.asarray(derivs(y0 + dt*k3, thist+dt))
yout[i+1] = y0 + dt/6.0*(k1 + 2*k2 + 2*k3 + k4)
return yout
def bivariate_normal(X, Y, sigmax=1.0, sigmay=1.0,
mux=0.0, muy=0.0, sigmaxy=0.0):
"""
Bivariate Gaussian distribution for equal shape *X*, *Y*.
See `bivariate normal
<http://mathworld.wolfram.com/BivariateNormalDistribution.html>`_
at mathworld.
"""
Xmu = X-mux
Ymu = Y-muy
rho = sigmaxy/(sigmax*sigmay)
z = Xmu**2/sigmax**2 + Ymu**2/sigmay**2 - 2*rho*Xmu*Ymu/(sigmax*sigmay)
denom = 2*np.pi*sigmax*sigmay*np.sqrt(1-rho**2)
return np.exp( -z/(2*(1-rho**2))) / denom
def get_xyz_where(Z, Cond):
"""
*Z* and *Cond* are *M* x *N* matrices. *Z* are data and *Cond* is
a boolean matrix where some condition is satisfied. Return value
is (*x*, *y*, *z*) where *x* and *y* are the indices into *Z* and
*z* are the values of *Z* at those indices. *x*, *y*, and *z* are
1D arrays.
"""
X,Y = np.indices(Z.shape)
return X[Cond], Y[Cond], Z[Cond]
def get_sparse_matrix(M,N,frac=0.1):
"""
Return a *M* x *N* sparse matrix with *frac* elements randomly
filled.
"""
data = np.zeros((M,N))*0.
for i in range(int(M*N*frac)):
x = np.random.randint(0,M-1)
y = np.random.randint(0,N-1)
data[x,y] = np.random.rand()
return data
def dist(x,y):
"""
Return the distance between two points.
"""
d = x-y
return np.sqrt(np.dot(d,d))
def dist_point_to_segment(p, s0, s1):
"""
Get the distance of a point to a segment.
*p*, *s0*, *s1* are *xy* sequences
This algorithm from
http://softsurfer.com/Archive/algorithm_0102/algorithm_0102.htm#Distance%20to%20Ray%20or%20Segment
"""
p = np.asarray(p, np.float_)
s0 = np.asarray(s0, np.float_)
s1 = np.asarray(s1, np.float_)
v = s1 - s0
w = p - s0
c1 = np.dot(w,v);
if ( c1 <= 0 ):
return dist(p, s0);
c2 = np.dot(v,v)
if ( c2 <= c1 ):
return dist(p, s1);
b = c1 / c2
pb = s0 + b * v;
return dist(p, pb)
def segments_intersect(s1, s2):
"""
Return *True* if *s1* and *s2* intersect.
*s1* and *s2* are defined as::
s1: (x1, y1), (x2, y2)
s2: (x3, y3), (x4, y4)
"""
(x1, y1), (x2, y2) = s1
(x3, y3), (x4, y4) = s2
den = ((y4-y3) * (x2-x1)) - ((x4-x3)*(y2-y1))
n1 = ((x4-x3) * (y1-y3)) - ((y4-y3)*(x1-x3))
n2 = ((x2-x1) * (y1-y3)) - ((y2-y1)*(x1-x3))
if den == 0:
# lines parallel
return False
u1 = n1/den
u2 = n2/den
return 0.0 <= u1 <= 1.0 and 0.0 <= u2 <= 1.0
def fftsurr(x, detrend=detrend_none, window=window_none):
"""
Compute an FFT phase randomized surrogate of *x*.
"""
if cbook.iterable(window):
x=window*detrend(x)
else:
x = window(detrend(x))
z = np.fft.fft(x)
a = 2.*np.pi*1j
phase = a * np.random.rand(len(x))
z = z*np.exp(phase)
return np.fft.ifft(z).real
def liaupunov(x, fprime):
"""
*x* is a very long trajectory from a map, and *fprime* returns the
derivative of *x*.
Returns :
.. math::
\lambda = \\frac{1}{n}\\sum \\ln|f^'(x_i)|
.. seealso::
Sec 10.5 Strogatz (1994) "Nonlinear Dynamics and Chaos".
`Wikipedia article on Lyapunov Exponent
<http://en.wikipedia.org/wiki/Lyapunov_exponent>`_.
.. note::
What the function here calculates may not be what you really want;
*caveat emptor*.
It also seems that this function's name is badly misspelled.
"""
return np.mean(np.log(np.absolute(fprime(x))))
class FIFOBuffer:
"""
A FIFO queue to hold incoming *x*, *y* data in a rotating buffer
using numpy arrays under the hood. It is assumed that you will
call asarrays much less frequently than you add data to the queue
-- otherwise another data structure will be faster.
This can be used to support plots where data is added from a real
time feed and the plot object wants to grab data from the buffer
and plot it to screen less freqeuently than the incoming.
If you set the *dataLim* attr to
:class:`~matplotlib.transforms.BBox` (eg
:attr:`matplotlib.Axes.dataLim`), the *dataLim* will be updated as
new data come in.
TODO: add a grow method that will extend nmax
.. note::
mlab seems like the wrong place for this class.
"""
def __init__(self, nmax):
"""
Buffer up to *nmax* points.
"""
self._xa = np.zeros((nmax,), np.float_)
self._ya = np.zeros((nmax,), np.float_)
self._xs = np.zeros((nmax,), np.float_)
self._ys = np.zeros((nmax,), np.float_)
self._ind = 0
self._nmax = nmax
self.dataLim = None
self.callbackd = {}
def register(self, func, N):
"""
Call *func* every time *N* events are passed; *func* signature
is ``func(fifo)``.
"""
self.callbackd.setdefault(N, []).append(func)
def add(self, x, y):
"""
Add scalar *x* and *y* to the queue.
"""
if self.dataLim is not None:
xys = ((x,y),)
self.dataLim.update(xys, -1) #-1 means use the default ignore setting
ind = self._ind % self._nmax
#print 'adding to fifo:', ind, x, y
self._xs[ind] = x
self._ys[ind] = y
for N,funcs in self.callbackd.items():
if (self._ind%N)==0:
for func in funcs:
func(self)
self._ind += 1
def last(self):
"""
Get the last *x*, *y* or *None*. *None* if no data set.
"""
if self._ind==0: return None, None
ind = (self._ind-1) % self._nmax
return self._xs[ind], self._ys[ind]
def asarrays(self):
"""
Return *x* and *y* as arrays; their length will be the len of
data added or *nmax*.
"""
if self._ind<self._nmax:
return self._xs[:self._ind], self._ys[:self._ind]
ind = self._ind % self._nmax
self._xa[:self._nmax-ind] = self._xs[ind:]
self._xa[self._nmax-ind:] = self._xs[:ind]
self._ya[:self._nmax-ind] = self._ys[ind:]
self._ya[self._nmax-ind:] = self._ys[:ind]
return self._xa, self._ya
def update_datalim_to_current(self):
"""
Update the *datalim* in the current data in the fifo.
"""
if self.dataLim is None:
raise ValueError('You must first set the dataLim attr')
x, y = self.asarrays()
self.dataLim.update_numerix(x, y, True)
def movavg(x,n):
"""
Compute the len(*n*) moving average of *x*.
"""
w = np.empty((n,), dtype=np.float_)
w[:] = 1.0/n
return np.convolve(x, w, mode='valid')
def save(fname, X, fmt='%.18e',delimiter=' '):
"""
Save the data in *X* to file *fname* using *fmt* string to convert the
data to strings.
*fname* can be a filename or a file handle. If the filename ends
in '.gz', the file is automatically saved in compressed gzip
format. The :func:`load` function understands gzipped files
transparently.
Example usage::
save('test.out', X) # X is an array
save('test1.out', (x,y,z)) # x,y,z equal sized 1D arrays
save('test2.out', x) # x is 1D
save('test3.out', x, fmt='%1.4e') # use exponential notation
*delimiter* is used to separate the fields, eg. *delimiter* ','
for comma-separated values.
"""
if cbook.is_string_like(fname):
if fname.endswith('.gz'):
import gzip
fh = gzip.open(fname,'wb')
else:
fh = file(fname,'w')
elif hasattr(fname, 'seek'):
fh = fname
else:
raise ValueError('fname must be a string or file handle')
X = np.asarray(X)
origShape = None
if X.ndim == 1:
origShape = X.shape
X.shape = len(X), 1
for row in X:
fh.write(delimiter.join([fmt%val for val in row]) + '\n')
if origShape is not None:
X.shape = origShape
def load(fname,comments='#',delimiter=None, converters=None,skiprows=0,
usecols=None, unpack=False, dtype=np.float_):
"""
Load ASCII data from *fname* into an array and return the array.
The data must be regular, same number of values in every row
*fname* can be a filename or a file handle. Support for gzipped
files is automatic, if the filename ends in '.gz'.
matfile data is not supported; for that, use :mod:`scipy.io.mio`
module.
Example usage::
X = load('test.dat') # data in two columns
t = X[:,0]
y = X[:,1]
Alternatively, you can do the same with "unpack"; see below::
X = load('test.dat') # a matrix of data
x = load('test.dat') # a single column of data
- *comments*: the character used to indicate the start of a comment
in the file
- *delimiter* is a string-like character used to seperate values
in the file. If *delimiter* is unspecified or *None*, any
whitespace string is a separator.
- *converters*, if not *None*, is a dictionary mapping column number to
a function that will convert that column to a float (or the optional
*dtype* if specified). Eg, if column 0 is a date string::
converters = {0:datestr2num}
- *skiprows* is the number of rows from the top to skip.
- *usecols*, if not *None*, is a sequence of integer column indexes to
extract where 0 is the first column, eg ``usecols=[1,4,5]`` to extract
just the 2nd, 5th and 6th columns
- *unpack*, if *True*, will transpose the matrix allowing you to unpack
into named arguments on the left hand side::
t,y = load('test.dat', unpack=True) # for two column data
x,y,z = load('somefile.dat', usecols=[3,5,7], unpack=True)
- *dtype*: the array will have this dtype. default: ``numpy.float_``
.. seealso::
See :file:`examples/pylab_examples/load_converter.py` in the source tree:
Exercises many of these options.
"""
if converters is None: converters = {}
fh = cbook.to_filehandle(fname)
X = []
if delimiter==' ':
# space splitting is a special case since x.split() is what
# you want, not x.split(' ')
def splitfunc(x):
return x.split()
else:
def splitfunc(x):
return x.split(delimiter)
converterseq = None
for i,line in enumerate(fh):
if i<skiprows: continue
line = line.split(comments, 1)[0].strip()
if not len(line): continue
if converterseq is None:
converterseq = [converters.get(j,float)
for j,val in enumerate(splitfunc(line))]
if usecols is not None:
vals = splitfunc(line)
row = [converterseq[j](vals[j]) for j in usecols]
else:
row = [converterseq[j](val)
for j,val in enumerate(splitfunc(line))]
thisLen = len(row)
X.append(row)
X = np.array(X, dtype)
r,c = X.shape
if r==1 or c==1:
X.shape = max(r,c),
if unpack: return X.transpose()
else: return X
def slopes(x,y):
"""
SLOPES calculate the slope y'(x) Given data vectors X and Y SLOPES
calculates Y'(X), i.e the slope of a curve Y(X). The slope is
estimated using the slope obtained from that of a parabola through
any three consecutive points.
This method should be superior to that described in the appendix
of A CONSISTENTLY WELL BEHAVED METHOD OF INTERPOLATION by Russel
W. Stineman (Creative Computing July 1980) in at least one aspect:
Circles for interpolation demand a known aspect ratio between x-
and y-values. For many functions, however, the abscissa are given
in different dimensions, so an aspect ratio is completely
arbitrary.
The parabola method gives very similar results to the circle
method for most regular cases but behaves much better in special
cases
Norbert Nemec, Institute of Theoretical Physics, University or
Regensburg, April 2006 Norbert.Nemec at physik.uni-regensburg.de
(inspired by a original implementation by Halldor Bjornsson,
Icelandic Meteorological Office, March 2006 halldor at vedur.is)
"""
# Cast key variables as float.
x=np.asarray(x, np.float_)
y=np.asarray(y, np.float_)
yp=np.zeros(y.shape, np.float_)
dx=x[1:] - x[:-1]
dy=y[1:] - y[:-1]
dydx = dy/dx
yp[1:-1] = (dydx[:-1] * dx[1:] + dydx[1:] * dx[:-1])/(dx[1:] + dx[:-1])
yp[0] = 2.0 * dy[0]/dx[0] - yp[1]
yp[-1] = 2.0 * dy[-1]/dx[-1] - yp[-2]
return yp
def stineman_interp(xi,x,y,yp=None):
"""
STINEMAN_INTERP Well behaved data interpolation. Given data
vectors X and Y, the slope vector YP and a new abscissa vector XI
the function stineman_interp(xi,x,y,yp) uses Stineman
interpolation to calculate a vector YI corresponding to XI.
Here's an example that generates a coarse sine curve, then
interpolates over a finer abscissa:
x = linspace(0,2*pi,20); y = sin(x); yp = cos(x)
xi = linspace(0,2*pi,40);
yi = stineman_interp(xi,x,y,yp);
plot(x,y,'o',xi,yi)
The interpolation method is described in the article A
CONSISTENTLY WELL BEHAVED METHOD OF INTERPOLATION by Russell
W. Stineman. The article appeared in the July 1980 issue of
Creative Computing with a note from the editor stating that while
they were
not an academic journal but once in a while something serious
and original comes in adding that this was
"apparently a real solution" to a well known problem.
For yp=None, the routine automatically determines the slopes using
the "slopes" routine.
X is assumed to be sorted in increasing order
For values xi[j] < x[0] or xi[j] > x[-1], the routine tries a
extrapolation. The relevance of the data obtained from this, of
course, questionable...
original implementation by Halldor Bjornsson, Icelandic
Meteorolocial Office, March 2006 halldor at vedur.is
completely reworked and optimized for Python by Norbert Nemec,
Institute of Theoretical Physics, University or Regensburg, April
2006 Norbert.Nemec at physik.uni-regensburg.de
"""
# Cast key variables as float.
x=np.asarray(x, np.float_)
y=np.asarray(y, np.float_)
assert x.shape == y.shape
N=len(y)
if yp is None:
yp = slopes(x,y)
else:
yp=np.asarray(yp, np.float_)
xi=np.asarray(xi, np.float_)
yi=np.zeros(xi.shape, np.float_)
# calculate linear slopes
dx = x[1:] - x[:-1]
dy = y[1:] - y[:-1]
s = dy/dx #note length of s is N-1 so last element is #N-2
# find the segment each xi is in
# this line actually is the key to the efficiency of this implementation
idx = np.searchsorted(x[1:-1], xi)
# now we have generally: x[idx[j]] <= xi[j] <= x[idx[j]+1]
# except at the boundaries, where it may be that xi[j] < x[0] or xi[j] > x[-1]
# the y-values that would come out from a linear interpolation:
sidx = s.take(idx)
xidx = x.take(idx)
yidx = y.take(idx)
xidxp1 = x.take(idx+1)
yo = yidx + sidx * (xi - xidx)
# the difference that comes when using the slopes given in yp
dy1 = (yp.take(idx)- sidx) * (xi - xidx) # using the yp slope of the left point
dy2 = (yp.take(idx+1)-sidx) * (xi - xidxp1) # using the yp slope of the right point
dy1dy2 = dy1*dy2
# The following is optimized for Python. The solution actually
# does more calculations than necessary but exploiting the power
# of numpy, this is far more efficient than coding a loop by hand
# in Python
yi = yo + dy1dy2 * np.choose(np.array(np.sign(dy1dy2), np.int32)+1,
((2*xi-xidx-xidxp1)/((dy1-dy2)*(xidxp1-xidx)),
0.0,
1/(dy1+dy2),))
return yi
def inside_poly(points, verts):
"""
points is a sequence of x,y points
verts is a sequence of x,y vertices of a poygon
return value is a sequence of indices into points for the points
that are inside the polygon
"""
res, = np.nonzero(nxutils.points_inside_poly(points, verts))
return res
def poly_below(ymin, xs, ys):
"""
given a arrays *xs* and *ys*, return the vertices of a polygon
that has a scalar lower bound *ymin* and an upper bound at the *ys*.
intended for use with Axes.fill, eg::
xv, yv = poly_below(0, x, y)
ax.fill(xv, yv)
"""
return poly_between(xs, ys, xmin)
def poly_between(x, ylower, yupper):
"""
given a sequence of x, ylower and yupper, return the polygon that
fills the regions between them. ylower or yupper can be scalar or
iterable. If they are iterable, they must be equal in length to x
return value is x, y arrays for use with Axes.fill
"""
Nx = len(x)
if not cbook.iterable(ylower):
ylower = ylower*np.ones(Nx)
if not cbook.iterable(yupper):
yupper = yupper*np.ones(Nx)
x = np.concatenate( (x, x[::-1]) )
y = np.concatenate( (yupper, ylower[::-1]) )
return x,y
### the following code was written and submitted by Fernando Perez
### from the ipython numutils package under a BSD license
# begin fperez functions
"""
A set of convenient utilities for numerical work.
Most of this module requires numpy or is meant to be used with it.
Copyright (c) 2001-2004, Fernando Perez. <[email protected]>
All rights reserved.
This license was generated from the BSD license template as found in:
http://www.opensource.org/licenses/bsd-license.php
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
* Redistributions of source code must retain the above copyright notice,
this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the distribution.
* Neither the name of the IPython project nor the names of its
contributors may be used to endorse or promote products derived from
this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE
FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""
import operator
import math
#*****************************************************************************
# Globals
#****************************************************************************
# function definitions
exp_safe_MIN = math.log(2.2250738585072014e-308)
exp_safe_MAX = 1.7976931348623157e+308
def exp_safe(x):
"""
Compute exponentials which safely underflow to zero.
Slow, but convenient to use. Note that numpy provides proper
floating point exception handling with access to the underlying
hardware.
"""
if type(x) is np.ndarray:
return exp(np.clip(x,exp_safe_MIN,exp_safe_MAX))
else:
return math.exp(x)
def amap(fn,*args):
"""
amap(function, sequence[, sequence, ...]) -> array.
Works like :func:`map`, but it returns an array. This is just a
convenient shorthand for ``numpy.array(map(...))``.
"""
return np.array(map(fn,*args))
#from numpy import zeros_like
def zeros_like(a):
"""
Return an array of zeros of the shape and typecode of *a*.
"""
warnings.warn("Use numpy.zeros_like(a)", DeprecationWarning)
return np.zeros_like(a)
#from numpy import sum as sum_flat
def sum_flat(a):
"""
Return the sum of all the elements of *a*, flattened out.
It uses ``a.flat``, and if *a* is not contiguous, a call to
``ravel(a)`` is made.
"""
warnings.warn("Use numpy.sum(a) or a.sum()", DeprecationWarning)
return np.sum(a)
#from numpy import mean as mean_flat
def mean_flat(a):
"""
Return the mean of all the elements of *a*, flattened out.
"""
warnings.warn("Use numpy.mean(a) or a.mean()", DeprecationWarning)
return np.mean(a)
def rms_flat(a):
"""
Return the root mean square of all the elements of *a*, flattened out.
"""
return np.sqrt(np.mean(np.absolute(a)**2))
def l1norm(a):
"""
Return the *l1* norm of *a*, flattened out.
Implemented as a separate function (not a call to :func:`norm` for speed).
"""
return np.sum(np.absolute(a))
def l2norm(a):
"""
Return the *l2* norm of *a*, flattened out.
Implemented as a separate function (not a call to :func:`norm` for speed).
"""
return np.sqrt(np.sum(np.absolute(a)**2))
def norm_flat(a,p=2):
"""
norm(a,p=2) -> l-p norm of a.flat
Return the l-p norm of *a*, considered as a flat array. This is NOT a true
matrix norm, since arrays of arbitrary rank are always flattened.
*p* can be a number or the string 'Infinity' to get the L-infinity norm.
"""
# This function was being masked by a more general norm later in
# the file. We may want to simply delete it.
if p=='Infinity':
return np.amax(np.absolute(a))
else:
return (np.sum(np.absolute(a)**p))**(1.0/p)
def frange(xini,xfin=None,delta=None,**kw):
"""
frange([start,] stop[, step, keywords]) -> array of floats
Return a numpy ndarray containing a progression of floats. Similar to
:func:`numpy.arange`, but defaults to a closed interval.
``frange(x0, x1)`` returns ``[x0, x0+1, x0+2, ..., x1]``; *start*
defaults to 0, and the endpoint *is included*. This behavior is
different from that of :func:`range` and
:func:`numpy.arange`. This is deliberate, since :func:`frange`
will probably be more useful for generating lists of points for
function evaluation, and endpoints are often desired in this
use. The usual behavior of :func:`range` can be obtained by
setting the keyword *closed* = 0, in this case, :func:`frange`
basically becomes :func:numpy.arange`.
When *step* is given, it specifies the increment (or
decrement). All arguments can be floating point numbers.
``frange(x0,x1,d)`` returns ``[x0,x0+d,x0+2d,...,xfin]`` where
*xfin* <= *x1*.
:func:`frange` can also be called with the keyword *npts*. This
sets the number of points the list should contain (and overrides
the value *step* might have been given). :func:`numpy.arange`
doesn't offer this option.
Examples::
>>> frange(3)
array([ 0., 1., 2., 3.])
>>> frange(3,closed=0)
array([ 0., 1., 2.])
>>> frange(1,6,2)
array([1, 3, 5]) or 1,3,5,7, depending on floating point vagueries
>>> frange(1,6.5,npts=5)
array([ 1. , 2.375, 3.75 , 5.125, 6.5 ])
"""
#defaults
kw.setdefault('closed',1)
endpoint = kw['closed'] != 0
# funny logic to allow the *first* argument to be optional (like range())
# This was modified with a simpler version from a similar frange() found
# at http://aspn.activestate.com/ASPN/Cookbook/Python/Recipe/66472
if xfin == None:
xfin = xini + 0.0
xini = 0.0
if delta == None:
delta = 1.0
# compute # of points, spacing and return final list
try:
npts=kw['npts']
delta=(xfin-xini)/float(npts-endpoint)
except KeyError:
npts = int(round((xfin-xini)/delta)) + endpoint
#npts = int(floor((xfin-xini)/delta)*(1.0+1e-10)) + endpoint
# round finds the nearest, so the endpoint can be up to
# delta/2 larger than xfin.
return np.arange(npts)*delta+xini
# end frange()
#import numpy.diag as diagonal_matrix
def diagonal_matrix(diag):
"""
Return square diagonal matrix whose non-zero elements are given by the
input array.
"""
warnings.warn("Use numpy.diag(d)", DeprecationWarning)
return np.diag(diag)
def identity(n, rank=2, dtype='l', typecode=None):
"""
Returns the identity matrix of shape (*n*, *n*, ..., *n*) (rank *r*).
For ranks higher than 2, this object is simply a multi-index Kronecker
delta::
/ 1 if i0=i1=...=iR,
id[i0,i1,...,iR] = -|
\ 0 otherwise.
Optionally a *dtype* (or typecode) may be given (it defaults to 'l').
Since rank defaults to 2, this function behaves in the default case (when
only *n* is given) like ``numpy.identity(n)`` -- but surprisingly, it is
much faster.
"""
if typecode is not None:
warnings.warn("Use dtype kwarg instead of typecode",
DeprecationWarning)
dtype = typecode
iden = np.zeros((n,)*rank, dtype)
for i in range(n):
idx = (i,)*rank
iden[idx] = 1
return iden
def base_repr (number, base = 2, padding = 0):
"""
Return the representation of a *number* in any given *base*.
"""
chars = '0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZ'
if number < base: \
return (padding - 1) * chars [0] + chars [int (number)]
max_exponent = int (math.log (number)/math.log (base))
max_power = long (base) ** max_exponent
lead_digit = int (number/max_power)
return chars [lead_digit] + \
base_repr (number - max_power * lead_digit, base, \
max (padding - 1, max_exponent))
def binary_repr(number, max_length = 1025):
"""
Return the binary representation of the input *number* as a
string.
This is more efficient than using :func:`base_repr` with base 2.
Increase the value of max_length for very large numbers. Note that
on 32-bit machines, 2**1023 is the largest integer power of 2
which can be converted to a Python float.
"""
#assert number < 2L << max_length
shifts = map (operator.rshift, max_length * [number], \
range (max_length - 1, -1, -1))
digits = map (operator.mod, shifts, max_length * [2])
if not digits.count (1): return 0
digits = digits [digits.index (1):]
return ''.join (map (repr, digits)).replace('L','')
def log2(x,ln2 = math.log(2.0)):
"""
Return the log(*x*) in base 2.
This is a _slow_ function but which is guaranteed to return the correct
integer value if the input is an integer exact power of 2.
"""
try:
bin_n = binary_repr(x)[1:]
except (AssertionError,TypeError):
return math.log(x)/ln2
else:
if '1' in bin_n:
return math.log(x)/ln2
else:
return len(bin_n)
def ispower2(n):
"""
Returns the log base 2 of *n* if *n* is a power of 2, zero otherwise.
Note the potential ambiguity if *n* == 1: 2**0 == 1, interpret accordingly.
"""
bin_n = binary_repr(n)[1:]
if '1' in bin_n:
return 0
else:
return len(bin_n)
def isvector(X):
"""
Like the Matlab (TM) function with the same name, returns *True*
if the supplied numpy array or matrix *X* looks like a vector,
meaning it has a one non-singleton axis (i.e., it can have
multiple axes, but all must have length 1, except for one of
them).
If you just want to see if the array has 1 axis, use X.ndim == 1.
"""
return np.prod(X.shape)==np.max(X.shape)
#from numpy import fromfunction as fromfunction_kw
def fromfunction_kw(function, dimensions, **kwargs):
"""
Drop-in replacement for :func:`numpy.fromfunction`.
Allows passing keyword arguments to the desired function.
Call it as (keywords are optional)::
fromfunction_kw(MyFunction, dimensions, keywords)
The function ``MyFunction`` is responsible for handling the
dictionary of keywords it will receive.
"""
warnings.warn("Use numpy.fromfunction()", DeprecationWarning)
return np.fromfunction(function, dimensions, **kwargs)
### end fperez numutils code
def rem(x,y):
"""
Deprecated - see :func:`numpy.remainder`
"""
raise NotImplementedError('Deprecated - see numpy.remainder')
def norm(x,y=2):
"""
Deprecated - see :func:`numpy.linalg.norm`
"""
raise NotImplementedError('Deprecated - see numpy.linalg.norm')
def orth(A):
"""
Deprecated - needs clean room implementation
"""
raise NotImplementedError('Deprecated - needs clean room implementation')
def rank(x):
"""
Deprecated - see :func:`numpy.rank`
"""
raise NotImplementedError('Deprecated - see numpy.rank')
def sqrtm(x):
"""
Deprecated - needs clean room implementation
"""
raise NotImplementedError('Deprecated - see scipy.linalg.sqrtm')
def mfuncC(f, x):
"""
Deprecated
"""
raise NotImplementedError('Deprecated - needs clean room implementation')
def approx_real(x):
"""
Deprecated - needs clean room implementation
"""
raise NotImplementedError('Deprecated - needs clean room implementation')
#helpers for loading, saving, manipulating and viewing numpy record arrays
def safe_isnan(x):
':func:`numpy.isnan` for arbitrary types'
if cbook.is_string_like(x):
return False
try: b = np.isnan(x)
except NotImplementedError: return False
except TypeError: return False
else: return b
def safe_isinf(x):
':func:`numpy.isinf` for arbitrary types'
if cbook.is_string_like(x):
return False
try: b = np.isinf(x)
except NotImplementedError: return False
except TypeError: return False
else: return b
def rec_view(rec):
"""
Return a view of an ndarray as a recarray
.. seealso::
http://projects.scipy.org/pipermail/numpy-discussion/2008-August/036429.html
"""
return rec.view(np.recarray)
#return rec.view(dtype=(np.record, rec.dtype), type=np.recarray)
def rec_append_field(rec, name, arr, dtype=None):
"""
Return a new record array with field name populated with data from
array *arr*. This function is Deprecated. Please use
:func:`rec_append_fields`.
"""
warnings.warn("use rec_append_fields", DeprecationWarning)
return rec_append_fields(rec, name, arr, dtype)
def rec_append_fields(rec, names, arrs, dtypes=None):
"""
Return a new record array with field names populated with data
from arrays in *arrs*. If appending a single field, then *names*,
*arrs* and *dtypes* do not have to be lists. They can just be the
values themselves.
"""
if (not cbook.is_string_like(names) and cbook.iterable(names) \
and len(names) and cbook.is_string_like(names[0])):
if len(names) != len(arrs):
raise ValueError, "number of arrays do not match number of names"
else: # we have only 1 name and 1 array
names = [names]
arrs = [arrs]
arrs = map(np.asarray, arrs)
if dtypes is None:
dtypes = [a.dtype for a in arrs]
elif not cbook.iterable(dtypes):
dtypes = [dtypes]
if len(arrs) != len(dtypes):
if len(dtypes) == 1:
dtypes = dtypes * len(arrs)
else:
raise ValueError, "dtypes must be None, a single dtype or a list"
newdtype = np.dtype(rec.dtype.descr + zip(names, dtypes))
newrec = np.empty(rec.shape, dtype=newdtype)
for field in rec.dtype.fields:
newrec[field] = rec[field]
for name, arr in zip(names, arrs):
newrec[name] = arr
return rec_view(newrec)
def rec_drop_fields(rec, names):
"""
Return a new numpy record array with fields in *names* dropped.
"""
names = set(names)
Nr = len(rec)
newdtype = np.dtype([(name, rec.dtype[name]) for name in rec.dtype.names
if name not in names])
newrec = np.empty(Nr, dtype=newdtype)
for field in newdtype.names:
newrec[field] = rec[field]
return rec_view(newrec)
def rec_groupby(r, groupby, stats):
"""
*r* is a numpy record array
*groupby* is a sequence of record array attribute names that
together form the grouping key. eg ('date', 'productcode')
*stats* is a sequence of (*attr*, *func*, *outname*) tuples which
will call ``x = func(attr)`` and assign *x* to the record array
output with attribute *outname*. For example::
stats = ( ('sales', len, 'numsales'), ('sales', np.mean, 'avgsale') )
Return record array has *dtype* names for each attribute name in
the the *groupby* argument, with the associated group values, and
for each outname name in the *stats* argument, with the associated
stat summary output.
"""
# build a dictionary from groupby keys-> list of indices into r with
# those keys
rowd = dict()
for i, row in enumerate(r):
key = tuple([row[attr] for attr in groupby])
rowd.setdefault(key, []).append(i)
# sort the output by groupby keys
keys = rowd.keys()
keys.sort()
rows = []
for key in keys:
row = list(key)
# get the indices for this groupby key
ind = rowd[key]
thisr = r[ind]
# call each stat function for this groupby slice
row.extend([func(thisr[attr]) for attr, func, outname in stats])
rows.append(row)
# build the output record array with groupby and outname attributes
attrs, funcs, outnames = zip(*stats)
names = list(groupby)
names.extend(outnames)
return np.rec.fromrecords(rows, names=names)
def rec_summarize(r, summaryfuncs):
"""
*r* is a numpy record array
*summaryfuncs* is a list of (*attr*, *func*, *outname*) tuples
which will apply *func* to the the array *r*[attr] and assign the
output to a new attribute name *outname*. The returned record
array is identical to *r*, with extra arrays for each element in
*summaryfuncs*.
"""
names = list(r.dtype.names)
arrays = [r[name] for name in names]
for attr, func, outname in summaryfuncs:
names.append(outname)
arrays.append(np.asarray(func(r[attr])))
return np.rec.fromarrays(arrays, names=names)
def rec_join(key, r1, r2, jointype='inner', defaults=None, r1postfix='1', r2postfix='2'):
"""
Join record arrays *r1* and *r2* on *key*; *key* is a tuple of
field names -- if *key* is a string it is assumed to be a single
attribute name. If *r1* and *r2* have equal values on all the keys
in the *key* tuple, then their fields will be merged into a new
record array containing the intersection of the fields of *r1* and
*r2*.
*r1* (also *r2*) must not have any duplicate keys.
The *jointype* keyword can be 'inner', 'outer', 'leftouter'. To
do a rightouter join just reverse *r1* and *r2*.
The *defaults* keyword is a dictionary filled with
``{column_name:default_value}`` pairs.
The keywords *r1postfix* and *r2postfix* are postfixed to column names
(other than keys) that are both in *r1* and *r2*.
"""
if cbook.is_string_like(key):
key = (key, )
for name in key:
if name not in r1.dtype.names:
raise ValueError('r1 does not have key field %s'%name)
if name not in r2.dtype.names:
raise ValueError('r2 does not have key field %s'%name)
def makekey(row):
return tuple([row[name] for name in key])
r1d = dict([(makekey(row),i) for i,row in enumerate(r1)])
r2d = dict([(makekey(row),i) for i,row in enumerate(r2)])
r1keys = set(r1d.keys())
r2keys = set(r2d.keys())
common_keys = r1keys & r2keys
r1ind = np.array([r1d[k] for k in common_keys])
r2ind = np.array([r2d[k] for k in common_keys])
common_len = len(common_keys)
left_len = right_len = 0
if jointype == "outer" or jointype == "leftouter":
left_keys = r1keys.difference(r2keys)
left_ind = np.array([r1d[k] for k in left_keys])
left_len = len(left_ind)
if jointype == "outer":
right_keys = r2keys.difference(r1keys)
right_ind = np.array([r2d[k] for k in right_keys])
right_len = len(right_ind)
def key_desc(name):
'if name is a string key, use the larger size of r1 or r2 before merging'
dt1 = r1.dtype[name]
if dt1.type != np.string_:
return (name, dt1.descr[0][1])
dt2 = r1.dtype[name]
assert dt2==dt1
if dt1.num>dt2.num:
return (name, dt1.descr[0][1])
else:
return (name, dt2.descr[0][1])
keydesc = [key_desc(name) for name in key]
def mapped_r1field(name):
"""
The column name in *newrec* that corresponds to the column in *r1*.
"""
if name in key or name not in r2.dtype.names: return name
else: return name + r1postfix
def mapped_r2field(name):
"""
The column name in *newrec* that corresponds to the column in *r2*.
"""
if name in key or name not in r1.dtype.names: return name
else: return name + r2postfix
r1desc = [(mapped_r1field(desc[0]), desc[1]) for desc in r1.dtype.descr if desc[0] not in key]
r2desc = [(mapped_r2field(desc[0]), desc[1]) for desc in r2.dtype.descr if desc[0] not in key]
newdtype = np.dtype(keydesc + r1desc + r2desc)
newrec = np.empty(common_len + left_len + right_len, dtype=newdtype)
if jointype != 'inner' and defaults is not None: # fill in the defaults enmasse
newrec_fields = newrec.dtype.fields.keys()
for k, v in defaults.items():
if k in newrec_fields:
newrec[k] = v
for field in r1.dtype.names:
newfield = mapped_r1field(field)
if common_len:
newrec[newfield][:common_len] = r1[field][r1ind]
if (jointype == "outer" or jointype == "leftouter") and left_len:
newrec[newfield][common_len:(common_len+left_len)] = r1[field][left_ind]
for field in r2.dtype.names:
newfield = mapped_r2field(field)
if field not in key and common_len:
newrec[newfield][:common_len] = r2[field][r2ind]
if jointype == "outer" and right_len:
newrec[newfield][-right_len:] = r2[field][right_ind]
newrec.sort(order=key)
return rec_view(newrec)
def csv2rec(fname, comments='#', skiprows=0, checkrows=0, delimiter=',',
converterd=None, names=None, missing='', missingd=None,
use_mrecords=True):
"""
Load data from comma/space/tab delimited file in *fname* into a
numpy record array and return the record array.
If *names* is *None*, a header row is required to automatically
assign the recarray names. The headers will be lower cased,
spaces will be converted to underscores, and illegal attribute
name characters removed. If *names* is not *None*, it is a
sequence of names to use for the column names. In this case, it
is assumed there is no header row.
- *fname*: can be a filename or a file handle. Support for gzipped
files is automatic, if the filename ends in '.gz'
- *comments*: the character used to indicate the start of a comment
in the file
- *skiprows*: is the number of rows from the top to skip
- *checkrows*: is the number of rows to check to validate the column
data type. When set to zero all rows are validated.
- *converted*: if not *None*, is a dictionary mapping column number or
munged column name to a converter function.
- *names*: if not None, is a list of header names. In this case, no
header will be read from the file
- *missingd* is a dictionary mapping munged column names to field values
which signify that the field does not contain actual data and should
be masked, e.g. '0000-00-00' or 'unused'
- *missing*: a string whose value signals a missing field regardless of
the column it appears in
- *use_mrecords*: if True, return an mrecords.fromrecords record array if any of the data are missing
If no rows are found, *None* is returned -- see :file:`examples/loadrec.py`
"""
if converterd is None:
converterd = dict()
if missingd is None:
missingd = {}
import dateutil.parser
import datetime
parsedate = dateutil.parser.parse
fh = cbook.to_filehandle(fname)
class FH:
"""
For space-delimited files, we want different behavior than
comma or tab. Generally, we want multiple spaces to be
treated as a single separator, whereas with comma and tab we
want multiple commas to return multiple (empty) fields. The
join/strip trick below effects this.
"""
def __init__(self, fh):
self.fh = fh
def close(self):
self.fh.close()
def seek(self, arg):
self.fh.seek(arg)
def fix(self, s):
return ' '.join(s.split())
def next(self):
return self.fix(self.fh.next())
def __iter__(self):
for line in self.fh:
yield self.fix(line)
if delimiter==' ':
fh = FH(fh)
reader = csv.reader(fh, delimiter=delimiter)
def process_skiprows(reader):
if skiprows:
for i, row in enumerate(reader):
if i>=(skiprows-1): break
return fh, reader
process_skiprows(reader)
def ismissing(name, val):
"Should the value val in column name be masked?"
if val == missing or val == missingd.get(name) or val == '':
return True
else:
return False
def with_default_value(func, default):
def newfunc(name, val):
if ismissing(name, val):
return default
else:
return func(val)
return newfunc
def mybool(x):
if x=='True': return True
elif x=='False': return False
else: raise ValueError('invalid bool')
dateparser = dateutil.parser.parse
mydateparser = with_default_value(dateparser, datetime.date(1,1,1))
myfloat = with_default_value(float, np.nan)
myint = with_default_value(int, -1)
mystr = with_default_value(str, '')
mybool = with_default_value(mybool, None)
def mydate(x):
# try and return a date object
d = dateparser(x)
if d.hour>0 or d.minute>0 or d.second>0:
raise ValueError('not a date')
return d.date()
mydate = with_default_value(mydate, datetime.date(1,1,1))
def get_func(name, item, func):
# promote functions in this order
funcmap = {mybool:myint,myint:myfloat, myfloat:mydate, mydate:mydateparser, mydateparser:mystr}
try: func(name, item)
except:
if func==mystr:
raise ValueError('Could not find a working conversion function')
else: return get_func(name, item, funcmap[func]) # recurse
else: return func
# map column names that clash with builtins -- TODO - extend this list
itemd = {
'return' : 'return_',
'file' : 'file_',
'print' : 'print_',
}
def get_converters(reader):
converters = None
for i, row in enumerate(reader):
if i==0:
converters = [mybool]*len(row)
if checkrows and i>checkrows:
break
#print i, len(names), len(row)
#print 'converters', zip(converters, row)
for j, (name, item) in enumerate(zip(names, row)):
func = converterd.get(j)
if func is None:
func = converterd.get(name)
if func is None:
#if not item.strip(): continue
func = converters[j]
if len(item.strip()):
func = get_func(name, item, func)
else:
# how should we handle custom converters and defaults?
func = with_default_value(func, None)
converters[j] = func
return converters
# Get header and remove invalid characters
needheader = names is None
if needheader:
for row in reader:
#print 'csv2rec', row
if len(row) and row[0].startswith(comments):
continue
headers = row
break
# remove these chars
delete = set("""~!@#$%^&*()-=+~\|]}[{';: /?.>,<""")
delete.add('"')
names = []
seen = dict()
for i, item in enumerate(headers):
item = item.strip().lower().replace(' ', '_')
item = ''.join([c for c in item if c not in delete])
if not len(item):
item = 'column%d'%i
item = itemd.get(item, item)
cnt = seen.get(item, 0)
if cnt>0:
names.append(item + '_%d'%cnt)
else:
names.append(item)
seen[item] = cnt+1
else:
if cbook.is_string_like(names):
names = [n.strip() for n in names.split(',')]
# get the converter functions by inspecting checkrows
converters = get_converters(reader)
if converters is None:
raise ValueError('Could not find any valid data in CSV file')
# reset the reader and start over
fh.seek(0)
reader = csv.reader(fh, delimiter=delimiter)
process_skiprows(reader)
if needheader:
skipheader = reader.next()
# iterate over the remaining rows and convert the data to date
# objects, ints, or floats as approriate
rows = []
rowmasks = []
for i, row in enumerate(reader):
if not len(row): continue
if row[0].startswith(comments): continue
rows.append([func(name, val) for func, name, val in zip(converters, names, row)])
rowmasks.append([ismissing(name, val) for name, val in zip(names, row)])
fh.close()
if not len(rows):
return None
if use_mrecords and np.any(rowmasks):
try: from numpy.ma import mrecords
except ImportError:
raise RuntimeError('numpy 1.05 or later is required for masked array support')
else:
r = mrecords.fromrecords(rows, names=names, mask=rowmasks)
else:
r = np.rec.fromrecords(rows, names=names)
return r
# a series of classes for describing the format intentions of various rec views
class FormatObj:
def tostr(self, x):
return self.toval(x)
def toval(self, x):
return str(x)
def fromstr(self, s):
return s
class FormatString(FormatObj):
def tostr(self, x):
val = repr(x)
return val[1:-1]
#class FormatString(FormatObj):
# def tostr(self, x):
# return '"%r"'%self.toval(x)
class FormatFormatStr(FormatObj):
def __init__(self, fmt):
self.fmt = fmt
def tostr(self, x):
if x is None: return 'None'
return self.fmt%self.toval(x)
class FormatFloat(FormatFormatStr):
def __init__(self, precision=4, scale=1.):
FormatFormatStr.__init__(self, '%%1.%df'%precision)
self.precision = precision
self.scale = scale
def toval(self, x):
if x is not None:
x = x * self.scale
return x
def fromstr(self, s):
return float(s)/self.scale
class FormatInt(FormatObj):
def tostr(self, x):
return '%d'%int(x)
def toval(self, x):
return int(x)
def fromstr(self, s):
return int(s)
class FormatBool(FormatObj):
def toval(self, x):
return str(x)
def fromstr(self, s):
return bool(s)
class FormatPercent(FormatFloat):
def __init__(self, precision=4):
FormatFloat.__init__(self, precision, scale=100.)
class FormatThousands(FormatFloat):
def __init__(self, precision=4):
FormatFloat.__init__(self, precision, scale=1e-3)
class FormatMillions(FormatFloat):
def __init__(self, precision=4):
FormatFloat.__init__(self, precision, scale=1e-6)
class FormatDate(FormatObj):
def __init__(self, fmt):
self.fmt = fmt
def toval(self, x):
if x is None: return 'None'
return x.strftime(self.fmt)
def fromstr(self, x):
import dateutil.parser
return dateutil.parser.parse(x).date()
class FormatDatetime(FormatDate):
def __init__(self, fmt='%Y-%m-%d %H:%M:%S'):
FormatDate.__init__(self, fmt)
def fromstr(self, x):
import dateutil.parser
return dateutil.parser.parse(x)
defaultformatd = {
np.bool_ : FormatBool(),
np.int16 : FormatInt(),
np.int32 : FormatInt(),
np.int64 : FormatInt(),
np.float32 : FormatFloat(),
np.float64 : FormatFloat(),
np.object_ : FormatObj(),
np.string_ : FormatString(),
}
def get_formatd(r, formatd=None):
'build a formatd guaranteed to have a key for every dtype name'
if formatd is None:
formatd = dict()
for i, name in enumerate(r.dtype.names):
dt = r.dtype[name]
format = formatd.get(name)
if format is None:
format = defaultformatd.get(dt.type, FormatObj())
formatd[name] = format
return formatd
def csvformat_factory(format):
format = copy.deepcopy(format)
if isinstance(format, FormatFloat):
format.scale = 1. # override scaling for storage
format.fmt = '%r'
return format
def rec2txt(r, header=None, padding=3, precision=3):
"""
Returns a textual representation of a record array.
*r*: numpy recarray
*header*: list of column headers
*padding*: space between each column
*precision*: number of decimal places to use for floats.
Set to an integer to apply to all floats. Set to a
list of integers to apply precision individually.
Precision for non-floats is simply ignored.
Example::
precision=[0,2,3]
Output::
ID Price Return
ABC 12.54 0.234
XYZ 6.32 -0.076
"""
if cbook.is_numlike(precision):
precision = [precision]*len(r.dtype)
def get_type(item,atype=int):
tdict = {None:int, int:float, float:str}
try: atype(str(item))
except: return get_type(item,tdict[atype])
return atype
def get_justify(colname, column, precision):
ntype = type(column[0])
if ntype==np.str or ntype==np.str_ or ntype==np.string0 or ntype==np.string_:
length = max(len(colname),column.itemsize)
return 0, length+padding, "%s" # left justify
if ntype==np.int or ntype==np.int16 or ntype==np.int32 or ntype==np.int64 or ntype==np.int8 or ntype==np.int_:
length = max(len(colname),np.max(map(len,map(str,column))))
return 1, length+padding, "%d" # right justify
# JDH: my powerbook does not have np.float96 using np 1.3.0
"""
In [2]: np.__version__
Out[2]: '1.3.0.dev5948'
In [3]: !uname -a
Darwin Macintosh-5.local 9.4.0 Darwin Kernel Version 9.4.0: Mon Jun 9 19:30:53 PDT 2008; root:xnu-1228.5.20~1/RELEASE_I386 i386 i386
In [4]: np.float96
---------------------------------------------------------------------------
AttributeError Traceback (most recent call la
"""
if ntype==np.float or ntype==np.float32 or ntype==np.float64 or (hasattr(np, 'float96') and (ntype==np.float96)) or ntype==np.float_:
fmt = "%." + str(precision) + "f"
length = max(len(colname),np.max(map(len,map(lambda x:fmt%x,column))))
return 1, length+padding, fmt # right justify
return 0, max(len(colname),np.max(map(len,map(str,column))))+padding, "%s"
if header is None:
header = r.dtype.names
justify_pad_prec = [get_justify(header[i],r.__getitem__(colname),precision[i]) for i, colname in enumerate(r.dtype.names)]
justify_pad_prec_spacer = []
for i in range(len(justify_pad_prec)):
just,pad,prec = justify_pad_prec[i]
if i == 0:
justify_pad_prec_spacer.append((just,pad,prec,0))
else:
pjust,ppad,pprec = justify_pad_prec[i-1]
if pjust == 0 and just == 1:
justify_pad_prec_spacer.append((just,pad-padding,prec,0))
elif pjust == 1 and just == 0:
justify_pad_prec_spacer.append((just,pad,prec,padding))
else:
justify_pad_prec_spacer.append((just,pad,prec,0))
def format(item, just_pad_prec_spacer):
just, pad, prec, spacer = just_pad_prec_spacer
if just == 0:
return spacer*' ' + str(item).ljust(pad)
else:
if get_type(item) == float:
item = (prec%float(item))
elif get_type(item) == int:
item = (prec%int(item))
return item.rjust(pad)
textl = []
textl.append(''.join([format(colitem,justify_pad_prec_spacer[j]) for j, colitem in enumerate(header)]))
for i, row in enumerate(r):
textl.append(''.join([format(colitem,justify_pad_prec_spacer[j]) for j, colitem in enumerate(row)]))
if i==0:
textl[0] = textl[0].rstrip()
text = os.linesep.join(textl)
return text
def rec2csv(r, fname, delimiter=',', formatd=None, missing='',
missingd=None):
"""
Save the data from numpy recarray *r* into a
comma-/space-/tab-delimited file. The record array dtype names
will be used for column headers.
*fname*: can be a filename or a file handle. Support for gzipped
files is automatic, if the filename ends in '.gz'
.. seealso::
:func:`csv2rec`:
For information about *missing* and *missingd*, which can
be used to fill in masked values into your CSV file.
"""
if missingd is None:
missingd = dict()
def with_mask(func):
def newfunc(val, mask, mval):
if mask:
return mval
else:
return func(val)
return newfunc
formatd = get_formatd(r, formatd)
funcs = []
for i, name in enumerate(r.dtype.names):
funcs.append(with_mask(csvformat_factory(formatd[name]).tostr))
fh, opened = cbook.to_filehandle(fname, 'w', return_opened=True)
writer = csv.writer(fh, delimiter=delimiter)
header = r.dtype.names
writer.writerow(header)
# Our list of specials for missing values
mvals = []
for name in header:
mvals.append(missingd.get(name, missing))
ismasked = False
if len(r):
row = r[0]
ismasked = hasattr(row, '_fieldmask')
for row in r:
if ismasked:
row, rowmask = row.item(), row._fieldmask.item()
else:
rowmask = [False] * len(row)
writer.writerow([func(val, mask, mval) for func, val, mask, mval
in zip(funcs, row, rowmask, mvals)])
if opened:
fh.close()
def griddata(x,y,z,xi,yi):
"""
``zi = griddata(x,y,z,xi,yi)`` fits a surface of the form *z* =
*f*(*x*, *y*) to the data in the (usually) nonuniformly spaced
vectors (*x*, *y*, *z*). :func:`griddata` interpolates this
surface at the points specified by (*xi*, *yi*) to produce
*zi*. *xi* and *yi* must describe a regular grid, can be either 1D
or 2D, but must be monotonically increasing.
A masked array is returned if any grid points are outside convex
hull defined by input data (no extrapolation is done).
Uses natural neighbor interpolation based on Delaunay
triangulation. By default, this algorithm is provided by the
:mod:`matplotlib.delaunay` package, written by Robert Kern. The
triangulation algorithm in this package is known to fail on some
nearly pathological cases. For this reason, a separate toolkit
(:mod:`mpl_tookits.natgrid`) has been created that provides a more
robust algorithm fof triangulation and interpolation. This
toolkit is based on the NCAR natgrid library, which contains code
that is not redistributable under a BSD-compatible license. When
installed, this function will use the :mod:`mpl_toolkits.natgrid`
algorithm, otherwise it will use the built-in
:mod:`matplotlib.delaunay` package.
The natgrid matplotlib toolkit can be downloaded from
http://sourceforge.net/project/showfiles.php?group_id=80706&package_id=142792
"""
try:
from mpl_toolkits.natgrid import _natgrid, __version__
_use_natgrid = True
except ImportError:
import matplotlib.delaunay as delaunay
from matplotlib.delaunay import __version__
_use_natgrid = False
if not griddata._reported:
if _use_natgrid:
verbose.report('using natgrid version %s' % __version__)
else:
verbose.report('using delaunay version %s' % __version__)
griddata._reported = True
if xi.ndim != yi.ndim:
raise TypeError("inputs xi and yi must have same number of dimensions (1 or 2)")
if xi.ndim != 1 and xi.ndim != 2:
raise TypeError("inputs xi and yi must be 1D or 2D.")
if not len(x)==len(y)==len(z):
raise TypeError("inputs x,y,z must all be 1D arrays of the same length")
# remove masked points.
if hasattr(z,'mask'):
x = x.compress(z.mask == False)
y = y.compress(z.mask == False)
z = z.compressed()
if _use_natgrid: # use natgrid toolkit if available.
if xi.ndim == 2:
xi = xi[0,:]
yi = yi[:,0]
# override default natgrid internal parameters.
_natgrid.seti('ext',0)
_natgrid.setr('nul',np.nan)
# cast input arrays to doubles (this makes a copy)
x = x.astype(np.float)
y = y.astype(np.float)
z = z.astype(np.float)
xo = xi.astype(np.float)
yo = yi.astype(np.float)
if min(xo[1:]-xo[0:-1]) < 0 or min(yo[1:]-yo[0:-1]) < 0:
raise ValueError, 'output grid defined by xi,yi must be monotone increasing'
# allocate array for output (buffer will be overwritten by nagridd)
zo = np.empty((yo.shape[0],xo.shape[0]), np.float)
_natgrid.natgridd(x,y,z,xo,yo,zo)
else: # use Robert Kern's delaunay package from scikits (default)
if xi.ndim != yi.ndim:
raise TypeError("inputs xi and yi must have same number of dimensions (1 or 2)")
if xi.ndim != 1 and xi.ndim != 2:
raise TypeError("inputs xi and yi must be 1D or 2D.")
if xi.ndim == 1:
xi,yi = np.meshgrid(xi,yi)
# triangulate data
tri = delaunay.Triangulation(x,y)
# interpolate data
interp = tri.nn_interpolator(z)
zo = interp(xi,yi)
# mask points on grid outside convex hull of input data.
if np.any(np.isnan(zo)):
zo = np.ma.masked_where(np.isnan(zo),zo)
return zo
griddata._reported = False
##################################################
# Linear interpolation algorithms
##################################################
def less_simple_linear_interpolation( x, y, xi, extrap=False ):
"""
This function provides simple (but somewhat less so than
:func:`cbook.simple_linear_interpolation`) linear interpolation.
:func:`simple_linear_interpolation` will give a list of point
between a start and an end, while this does true linear
interpolation at an arbitrary set of points.
This is very inefficient linear interpolation meant to be used
only for a small number of points in relatively non-intensive use
cases. For real linear interpolation, use scipy.
"""
if cbook.is_scalar(xi): xi = [xi]
x = np.asarray(x)
y = np.asarray(y)
xi = np.asarray(xi)
s = list(y.shape)
s[0] = len(xi)
yi = np.tile( np.nan, s )
for ii,xx in enumerate(xi):
bb = x == xx
if np.any(bb):
jj, = np.nonzero(bb)
yi[ii] = y[jj[0]]
elif xx<x[0]:
if extrap:
yi[ii] = y[0]
elif xx>x[-1]:
if extrap:
yi[ii] = y[-1]
else:
jj, = np.nonzero(x<xx)
jj = max(jj)
yi[ii] = y[jj] + (xx-x[jj])/(x[jj+1]-x[jj]) * (y[jj+1]-y[jj])
return yi
def slopes(x,y):
"""
:func:`slopes` calculates the slope *y*'(*x*)
The slope is estimated using the slope obtained from that of a
parabola through any three consecutive points.
This method should be superior to that described in the appendix
of A CONSISTENTLY WELL BEHAVED METHOD OF INTERPOLATION by Russel
W. Stineman (Creative Computing July 1980) in at least one aspect:
Circles for interpolation demand a known aspect ratio between
*x*- and *y*-values. For many functions, however, the abscissa
are given in different dimensions, so an aspect ratio is
completely arbitrary.
The parabola method gives very similar results to the circle
method for most regular cases but behaves much better in special
cases.
Norbert Nemec, Institute of Theoretical Physics, University or
Regensburg, April 2006 Norbert.Nemec at physik.uni-regensburg.de
(inspired by a original implementation by Halldor Bjornsson,
Icelandic Meteorological Office, March 2006 halldor at vedur.is)
"""
# Cast key variables as float.
x=np.asarray(x, np.float_)
y=np.asarray(y, np.float_)
yp=np.zeros(y.shape, np.float_)
dx=x[1:] - x[:-1]
dy=y[1:] - y[:-1]
dydx = dy/dx
yp[1:-1] = (dydx[:-1] * dx[1:] + dydx[1:] * dx[:-1])/(dx[1:] + dx[:-1])
yp[0] = 2.0 * dy[0]/dx[0] - yp[1]
yp[-1] = 2.0 * dy[-1]/dx[-1] - yp[-2]
return yp
def stineman_interp(xi,x,y,yp=None):
"""
Given data vectors *x* and *y*, the slope vector *yp* and a new
abscissa vector *xi*, the function :func:`stineman_interp` uses
Stineman interpolation to calculate a vector *yi* corresponding to
*xi*.
Here's an example that generates a coarse sine curve, then
interpolates over a finer abscissa::
x = linspace(0,2*pi,20); y = sin(x); yp = cos(x)
xi = linspace(0,2*pi,40);
yi = stineman_interp(xi,x,y,yp);
plot(x,y,'o',xi,yi)
The interpolation method is described in the article A
CONSISTENTLY WELL BEHAVED METHOD OF INTERPOLATION by Russell
W. Stineman. The article appeared in the July 1980 issue of
Creative Computing with a note from the editor stating that while
they were:
not an academic journal but once in a while something serious
and original comes in adding that this was
"apparently a real solution" to a well known problem.
For *yp* = *None*, the routine automatically determines the slopes
using the :func:`slopes` routine.
*x* is assumed to be sorted in increasing order.
For values ``xi[j] < x[0]`` or ``xi[j] > x[-1]``, the routine
tries an extrapolation. The relevance of the data obtained from
this, of course, is questionable...
Original implementation by Halldor Bjornsson, Icelandic
Meteorolocial Office, March 2006 halldor at vedur.is
Completely reworked and optimized for Python by Norbert Nemec,
Institute of Theoretical Physics, University or Regensburg, April
2006 Norbert.Nemec at physik.uni-regensburg.de
"""
# Cast key variables as float.
x=np.asarray(x, np.float_)
y=np.asarray(y, np.float_)
assert x.shape == y.shape
N=len(y)
if yp is None:
yp = slopes(x,y)
else:
yp=np.asarray(yp, np.float_)
xi=np.asarray(xi, np.float_)
yi=np.zeros(xi.shape, np.float_)
# calculate linear slopes
dx = x[1:] - x[:-1]
dy = y[1:] - y[:-1]
s = dy/dx #note length of s is N-1 so last element is #N-2
# find the segment each xi is in
# this line actually is the key to the efficiency of this implementation
idx = np.searchsorted(x[1:-1], xi)
# now we have generally: x[idx[j]] <= xi[j] <= x[idx[j]+1]
# except at the boundaries, where it may be that xi[j] < x[0] or xi[j] > x[-1]
# the y-values that would come out from a linear interpolation:
sidx = s.take(idx)
xidx = x.take(idx)
yidx = y.take(idx)
xidxp1 = x.take(idx+1)
yo = yidx + sidx * (xi - xidx)
# the difference that comes when using the slopes given in yp
dy1 = (yp.take(idx)- sidx) * (xi - xidx) # using the yp slope of the left point
dy2 = (yp.take(idx+1)-sidx) * (xi - xidxp1) # using the yp slope of the right point
dy1dy2 = dy1*dy2
# The following is optimized for Python. The solution actually
# does more calculations than necessary but exploiting the power
# of numpy, this is far more efficient than coding a loop by hand
# in Python
yi = yo + dy1dy2 * np.choose(np.array(np.sign(dy1dy2), np.int32)+1,
((2*xi-xidx-xidxp1)/((dy1-dy2)*(xidxp1-xidx)),
0.0,
1/(dy1+dy2),))
return yi
##################################################
# Code related to things in and around polygons
##################################################
def inside_poly(points, verts):
"""
*points* is a sequence of *x*, *y* points.
*verts* is a sequence of *x*, *y* vertices of a polygon.
Return value is a sequence of indices into points for the points
that are inside the polygon.
"""
res, = np.nonzero(nxutils.points_inside_poly(points, verts))
return res
def poly_below(xmin, xs, ys):
"""
Given a sequence of *xs* and *ys*, return the vertices of a
polygon that has a horizontal base at *xmin* and an upper bound at
the *ys*. *xmin* is a scalar.
Intended for use with :meth:`matplotlib.axes.Axes.fill`, eg::
xv, yv = poly_below(0, x, y)
ax.fill(xv, yv)
"""
if ma.isMaskedArray(xs) or ma.isMaskedArray(ys):
nx = ma
else:
nx = np
xs = nx.asarray(xs)
ys = nx.asarray(ys)
Nx = len(xs)
Ny = len(ys)
assert(Nx==Ny)
x = xmin*nx.ones(2*Nx)
y = nx.ones(2*Nx)
x[:Nx] = xs
y[:Nx] = ys
y[Nx:] = ys[::-1]
return x, y
def poly_between(x, ylower, yupper):
"""
Given a sequence of *x*, *ylower* and *yupper*, return the polygon
that fills the regions between them. *ylower* or *yupper* can be
scalar or iterable. If they are iterable, they must be equal in
length to *x*.
Return value is *x*, *y* arrays for use with
:meth:`matplotlib.axes.Axes.fill`.
"""
if ma.isMaskedArray(ylower) or ma.isMaskedArray(yupper) or ma.isMaskedArray(x):
nx = ma
else:
nx = np
Nx = len(x)
if not cbook.iterable(ylower):
ylower = ylower*nx.ones(Nx)
if not cbook.iterable(yupper):
yupper = yupper*nx.ones(Nx)
x = nx.concatenate( (x, x[::-1]) )
y = nx.concatenate( (yupper, ylower[::-1]) )
return x,y
def is_closed_polygon(X):
"""
Tests whether first and last object in a sequence are the same. These are
presumably coordinates on a polygonal curve, in which case this function
tests if that curve is closed.
"""
return np.all(X[0] == X[-1])
def contiguous_regions(mask):
"""
return a list of (ind0, ind1) such that mask[ind0:ind1].all() is
True and we cover all such regions
TODO: this is a pure python implementation which probably has a much faster numpy impl
"""
in_region = None
boundaries = []
for i, val in enumerate(mask):
if in_region is None and val:
in_region = i
elif in_region is not None and not val:
boundaries.append((in_region, i))
in_region = None
if in_region is not None:
boundaries.append((in_region, i+1))
return boundaries
##################################################
# Vector and path length geometry calculations
##################################################
def vector_lengths( X, P=2., axis=None ):
"""
Finds the length of a set of vectors in *n* dimensions. This is
like the :func:`numpy.norm` function for vectors, but has the ability to
work over a particular axis of the supplied array or matrix.
Computes ``(sum((x_i)^P))^(1/P)`` for each ``{x_i}`` being the
elements of *X* along the given axis. If *axis* is *None*,
compute over all elements of *X*.
"""
X = np.asarray(X)
return (np.sum(X**(P),axis=axis))**(1./P)
def distances_along_curve( X ):
"""
Computes the distance between a set of successive points in *N* dimensions.
Where *X* is an *M* x *N* array or matrix. The distances between
successive rows is computed. Distance is the standard Euclidean
distance.
"""
X = np.diff( X, axis=0 )
return vector_lengths(X,axis=1)
def path_length(X):
"""
Computes the distance travelled along a polygonal curve in *N* dimensions.
Where *X* is an *M* x *N* array or matrix. Returns an array of
length *M* consisting of the distance along the curve at each point
(i.e., the rows of *X*).
"""
X = distances_along_curve(X)
return np.concatenate( (np.zeros(1), np.cumsum(X)) )
def quad2cubic(q0x, q0y, q1x, q1y, q2x, q2y):
"""
Converts a quadratic Bezier curve to a cubic approximation.
The inputs are the *x* and *y* coordinates of the three control
points of a quadratic curve, and the output is a tuple of *x* and
*y* coordinates of the four control points of the cubic curve.
"""
# c0x, c0y = q0x, q0y
c1x, c1y = q0x + 2./3. * (q1x - q0x), q0y + 2./3. * (q1y - q0y)
c2x, c2y = c1x + 1./3. * (q2x - q0x), c1y + 1./3. * (q2y - q0y)
# c3x, c3y = q2x, q2y
return q0x, q0y, c1x, c1y, c2x, c2y, q2x, q2y
| agpl-3.0 |
GoogleCloudPlatform/ml-on-gcp | example_zoo/tensorflow/probability/logistic_regression/trainer/logistic_regression.py | 1 | 8774 | # Copyright 2018 The TensorFlow Probability Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Trains a Bayesian logistic regression model on synthetic data."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from trainer.tfgfile_wrapper import tfgfile_wrapper
import os
# Dependency imports
from absl import flags
flags.DEFINE_string(name="job-dir", default="/tmp", help="AI Platform Training passes this to the training script.")
from matplotlib import cm
from matplotlib import figure
from matplotlib.backends import backend_agg
import numpy as np
import tensorflow as tf
import tensorflow_probability as tfp
tfd = tfp.distributions
flags.DEFINE_float("learning_rate",
default=0.01,
help="Initial learning rate.")
flags.DEFINE_integer("max_steps",
default=1500,
help="Number of training steps to run.")
flags.DEFINE_integer("batch_size",
default=32,
help="Batch size.")
flags.DEFINE_string(
"model_dir",
default=os.path.join(os.getenv("TEST_TMPDIR", "/tmp"),
"logistic_regression/"),
help="Directory to put the model's fit.")
flags.DEFINE_integer("num_examples",
default=256,
help="Number of datapoints to generate.")
flags.DEFINE_integer("num_monte_carlo",
default=50,
help="Monte Carlo samples to visualize weight posterior.")
FLAGS = flags.FLAGS
def toy_logistic_data(num_examples, input_size=2, weights_prior_stddev=5.0):
"""Generates synthetic data for binary classification.
Args:
num_examples: The number of samples to generate (scalar Python `int`).
input_size: The input space dimension (scalar Python `int`).
weights_prior_stddev: The prior standard deviation of the weight
vector. (scalar Python `float`).
Returns:
random_weights: Sampled weights as a Numpy `array` of shape
`[input_size]`.
random_bias: Sampled bias as a scalar Python `float`.
design_matrix: Points sampled uniformly from the cube `[-1,
1]^{input_size}`, as a Numpy `array` of shape `(num_examples,
input_size)`.
labels: Labels sampled from the logistic model `p(label=1) =
logistic(dot(features, random_weights) + random_bias)`, as a Numpy
`int32` `array` of shape `(num_examples, 1)`.
"""
random_weights = weights_prior_stddev * np.random.randn(input_size)
random_bias = np.random.randn()
design_matrix = np.random.rand(num_examples, input_size) * 2 - 1
logits = np.reshape(
np.dot(design_matrix, random_weights) + random_bias,
(-1, 1))
p_labels = 1. / (1 + np.exp(-logits))
labels = np.int32(p_labels > np.random.rand(num_examples, 1))
return random_weights, random_bias, np.float32(design_matrix), labels
@tfgfile_wrapper
def visualize_decision(features, labels, true_w_b, candidate_w_bs, fname):
"""Utility method to visualize decision boundaries in R^2.
Args:
features: Input points, as a Numpy `array` of shape `[num_examples, 2]`.
labels: Numpy `float`-like array of shape `[num_examples, 1]` giving a
label for each point.
true_w_b: A `tuple` `(w, b)` where `w` is a Numpy array of
shape `[2]` and `b` is a scalar `float`, interpreted as a
decision rule of the form `dot(features, w) + b > 0`.
candidate_w_bs: Python `iterable` containing tuples of the same form as
true_w_b.
fname: The filename to save the plot as a PNG image (Python `str`).
"""
fig = figure.Figure(figsize=(6, 6))
canvas = backend_agg.FigureCanvasAgg(fig)
ax = fig.add_subplot(1, 1, 1)
ax.scatter(features[:, 0], features[:, 1],
c=np.float32(labels[:, 0]),
cmap=cm.get_cmap("binary"),
edgecolors="k")
def plot_weights(w, b, **kwargs):
w1, w2 = w
x1s = np.linspace(-1, 1, 100)
x2s = -(w1 * x1s + b) / w2
ax.plot(x1s, x2s, **kwargs)
for w, b in candidate_w_bs:
plot_weights(w, b,
alpha=1./np.sqrt(len(candidate_w_bs)),
lw=1, color="blue")
if true_w_b is not None:
plot_weights(*true_w_b, lw=4,
color="green", label="true separator")
ax.set_xlim([-1.5, 1.5])
ax.set_ylim([-1.5, 1.5])
ax.legend()
canvas.print_figure(fname, format="png")
print("saved {}".format(fname))
def build_input_pipeline(x, y, batch_size):
"""Build a Dataset iterator for supervised classification.
Args:
x: Numpy `array` of features, indexed by the first dimension.
y: Numpy `array` of labels, with the same first dimension as `x`.
batch_size: Number of elements in each training batch.
Returns:
batch_features: `Tensor` feed features, of shape
`[batch_size] + x.shape[1:]`.
batch_labels: `Tensor` feed of labels, of shape
`[batch_size] + y.shape[1:]`.
"""
training_dataset = tf.data.Dataset.from_tensor_slices((x, y))
training_batches = training_dataset.repeat().batch(batch_size)
training_iterator = tf.compat.v1.data.make_one_shot_iterator(training_batches)
batch_features, batch_labels = training_iterator.get_next()
return batch_features, batch_labels
def main(argv):
del argv # unused
if tf.io.gfile.exists(FLAGS.model_dir):
tf.compat.v1.logging.warning(
"Warning: deleting old log directory at {}".format(FLAGS.model_dir))
tf.io.gfile.rmtree(FLAGS.model_dir)
tf.io.gfile.makedirs(FLAGS.model_dir)
# Generate (and visualize) a toy classification dataset.
w_true, b_true, x, y = toy_logistic_data(FLAGS.num_examples, 2)
features, labels = build_input_pipeline(x, y, FLAGS.batch_size)
# Define a logistic regression model as a Bernoulli distribution
# parameterized by logits from a single linear layer. We use the Flipout
# Monte Carlo estimator for the layer: this enables lower variance
# stochastic gradients than naive reparameterization.
with tf.compat.v1.name_scope("logistic_regression", values=[features]):
layer = tfp.layers.DenseFlipout(
units=1,
activation=None,
kernel_posterior_fn=tfp.layers.default_mean_field_normal_fn(),
bias_posterior_fn=tfp.layers.default_mean_field_normal_fn())
logits = layer(features)
labels_distribution = tfd.Bernoulli(logits=logits)
# Compute the -ELBO as the loss, averaged over the batch size.
neg_log_likelihood = -tf.reduce_mean(
input_tensor=labels_distribution.log_prob(labels))
kl = sum(layer.losses) / FLAGS.num_examples
elbo_loss = neg_log_likelihood + kl
# Build metrics for evaluation. Predictions are formed from a single forward
# pass of the probabilistic layers. They are cheap but noisy predictions.
predictions = tf.cast(logits > 0, dtype=tf.int32)
accuracy, accuracy_update_op = tf.compat.v1.metrics.accuracy(
labels=labels, predictions=predictions)
with tf.compat.v1.name_scope("train"):
optimizer = tf.compat.v1.train.AdamOptimizer(
learning_rate=FLAGS.learning_rate)
train_op = optimizer.minimize(elbo_loss)
init_op = tf.group(tf.compat.v1.global_variables_initializer(),
tf.compat.v1.local_variables_initializer())
with tf.compat.v1.Session() as sess:
sess.run(init_op)
# Fit the model to data.
for step in range(FLAGS.max_steps):
_ = sess.run([train_op, accuracy_update_op])
if step % 100 == 0:
loss_value, accuracy_value = sess.run([elbo_loss, accuracy])
print("Step: {:>3d} Loss: {:.3f} Accuracy: {:.3f}".format(
step, loss_value, accuracy_value))
# Visualize some draws from the weights posterior.
w_draw = layer.kernel_posterior.sample()
b_draw = layer.bias_posterior.sample()
candidate_w_bs = []
for _ in range(FLAGS.num_monte_carlo):
w, b = sess.run((w_draw, b_draw))
candidate_w_bs.append((w, b))
visualize_decision(x, y, (w_true, b_true),
candidate_w_bs,
fname=os.path.join(FLAGS.model_dir,
"weights_inferred.png"))
if __name__ == "__main__":
tf.compat.v1.app.run()
| apache-2.0 |
videlec/sage-flatsurf | flatsurf/geometry/straight_line_trajectory.py | 1 | 31149 | from __future__ import absolute_import, print_function, division
from six.moves import range, map, filter, zip
from six import iteritems
from collections import deque, defaultdict
from .polygon import is_same_direction, line_intersection
from .surface_objects import SaddleConnection
# Vincent question:
# using deque has the disadvantage of losing the initial points
# ideally doig
# my_line[i]
# we should always access to the same element
# I wanted to be able to flow backward thus inserting at the beginning of a list.
# Perhaps it would be better to model this on a deque-like class that is indexed by
# all integers rather than just the non-negative ones? Do you know of such
# a class? Alternately, we could store an offset.
def get_linearity_coeff(u, v):
r"""
Given the two 2-dimensional vectors ``u`` and ``v``, return ``a`` so that
``v = a*u``
If the vectors are not colinear, a ``ValueError`` is raised.
EXAMPLES::
sage: from flatsurf.geometry.straight_line_trajectory import get_linearity_coeff
sage: V = VectorSpace(QQ,2)
sage: get_linearity_coeff(V((1,0)), V((2,0)))
2
sage: get_linearity_coeff(V((2,0)), V((1,0)))
1/2
sage: get_linearity_coeff(V((0,1)), V((0,2)))
2
sage: get_linearity_coeff(V((0,2)), V((0,1)))
1/2
sage: get_linearity_coeff(V((1,2)), V((-2,-4)))
-2
sage: get_linearity_coeff(V((1,1)), V((-1,1)))
Traceback (most recent call last):
...
ValueError: non colinear
"""
if u[0]:
a = v[0]/u[0]
if v[1] != a*u[1]:
raise ValueError("non colinear")
return a
elif v[0]:
raise ValueError("non colinear")
elif u[1]:
return v[1]/u[1]
else:
raise ValueError("zero vector")
class SegmentInPolygon:
r"""
Maximal segment in a polygon of a similarity surface
EXAMPLES::
sage: from flatsurf import *
sage: from flatsurf.geometry.straight_line_trajectory import SegmentInPolygon
sage: s = similarity_surfaces.example()
sage: v = s.tangent_vector(0, (1/3,-1/4), (0,1))
sage: SegmentInPolygon(v)
Segment in polygon 0 starting at (1/3, -1/3) and ending at (1/3, 0)
"""
def __init__(self, start, end=None):
if not end is None:
# WARNING: here we assume that both start and end are on the
# boundary
self._start = start
self._end = end
else:
self._end = start.forward_to_polygon_boundary()
self._start = self._end.forward_to_polygon_boundary()
def __eq__(self, other):
return type(self) is type(other) and \
self._start == other._start and \
self._end == other._end
def __ne__(self, other):
return type(self) is not type(other) or \
self._start != other._start or \
self._end != other._end
def __repr__(self):
r"""
TESTS::
sage: from flatsurf import *
sage: from flatsurf.geometry.straight_line_trajectory import SegmentInPolygon
sage: s = similarity_surfaces.example()
sage: v = s.tangent_vector(0, (0,0), (3,-1))
sage: SegmentInPolygon(v)
Segment in polygon 0 starting at (0, 0) and ending at (2, -2/3)
"""
return "Segment in polygon {} starting at {} and ending at {}".format(
self.polygon_label(), self.start().point(), self.end().point())
def start(self):
r"""
Return the tangent vector associated to the start of a trajectory pointed forward.
"""
return self._start
def start_is_singular(self):
return self._start.is_based_at_singularity()
def end(self):
r"""
Return a TangentVector associated to the end of a trajectory, pointed backward.
"""
return self._end
def end_is_singular(self):
return self._end.is_based_at_singularity()
def is_edge(self):
if not self.start_is_singular() or not self.end_is_singular():
return False
vv=self.start().vector()
vertex=self.start().vertex()
ww=self.start().polygon().edge(vertex)
from flatsurf.geometry.polygon import is_same_direction
return is_same_direction(vv,ww)
def edge(self):
if not self.is_edge():
raise ValueError("Segment asked for edge when not an edge")
return self.start().vertex()
def polygon_label(self):
return self._start.polygon_label()
def invert(self):
return SegmentInPolygon(self._end, self._start)
def next(self):
r"""
Return the next segment obtained by continuing straight through the end point.
EXAMPLES::
sage: from flatsurf import *
sage: from flatsurf.geometry.straight_line_trajectory import SegmentInPolygon
sage: s = similarity_surfaces.example()
sage: s.polygon(0)
Polygon: (0, 0), (2, -2), (2, 0)
sage: s.polygon(1)
Polygon: (0, 0), (2, 0), (1, 3)
sage: v = s.tangent_vector(0, (0,0), (3,-1))
sage: seg = SegmentInPolygon(v)
sage: seg
Segment in polygon 0 starting at (0, 0) and ending at (2, -2/3)
sage: seg.next()
Segment in polygon 1 starting at (2/3, 2) and ending at (14/9, 4/3)
"""
if self.end_is_singular():
raise ValueError("Cannot continue from singularity")
return SegmentInPolygon(self._end.invert())
def previous(self):
if self.end_is_singular():
raise ValueError("Cannot continue from singularity")
return SegmentInPolygon(self._start.invert()).invert()
# DEPRECATED STUFF THAT WILL BE REMOVED
def start_point(self):
from sage.misc.superseded import deprecation
deprecation(1, "do not use start_point but start().point()")
return self._start.point()
def start_direction(self):
from sage.misc.superseded import deprecation
deprecation(1, "do not use start_direction but start().vector()")
return self._start.vector()
def end_point(self):
from sage.misc.superseded import deprecation
deprecation(1, "do not use end_point but end().point()")
return self._end.point()
def end_direction(self):
from sage.misc.superseded import deprecation
deprecation(1, "do not use end_direction but end().vector()")
return self._end.vector()
class AbstractStraightLineTrajectory:
r"""
You need to implement:
- ``def segment(self, i)``
- ``def segments(self)``
"""
def surface(self):
raise NotImplementedError
def __repr__(self):
start = self.segment(0).start()
end = self.segment(-1).end()
return "Straight line trajectory made of {} segments from {} in polygon {} to {} in polygon {}".format(
self.combinatorial_length(),
start.point(), start.polygon_label(),
end.point(), end.polygon_label())
def plot(self, *args, **options):
r"""
Plot this trajectory by converting to a graphical trajectory.
If any arguments are provided in `*args` it must be only one argument containing a GraphicalSurface.
The keyword arguments in `**options` are passed on to :func:`GraphicalStraightLineTrajectory.plot`.
EXAMPLES::
sage: from flatsurf import *
sage: T = translation_surfaces.square_torus()
sage: v = T.tangent_vector(0, (0,0), (5,7))
sage: L = v.straight_line_trajectory()
sage: L.plot() # not tested (problem with matplotlib font caches on Travis)
Graphics object consisting of 1 graphics primitive
sage: L.plot(color='red') # not tested (problem with matplotlib font caches on Travis)
Graphics object consisting of 1 graphics primitive
"""
if len(args) > 1:
raise ValueError("SimilaritySurface.plot() can take at most one non-keyword argument.")
if len(args)==1:
from flatsurf.graphical.surface import GraphicalSurface
if not isinstance(args[0], GraphicalSurface):
raise ValueError("If an argument is provided, it must be a GraphicalSurface.")
return self.graphical_trajectory(graphical_surface = args[0]).plot(**options)
return self.graphical_trajectory().plot(**options)
def graphical_trajectory(self, graphical_surface=None, **options):
r"""
Returns a ``GraphicalStraightLineTrajectory`` corresponding to this
trajectory in the provided ``GraphicalSurface``.
"""
from flatsurf.graphical.straight_line_trajectory import GraphicalStraightLineTrajectory
if graphical_surface is None:
graphical_surface = self.surface().graphical_surface()
return GraphicalStraightLineTrajectory(self, graphical_surface, **options)
def cylinder(self):
r"""
If this is a closed orbit, return the associated maximal cylinder.
Raises a ValueError if this trajectory is not closed.
EXAMPLES::
sage: from flatsurf import *
sage: s = translation_surfaces.regular_octagon()
sage: v = s.tangent_vector(0,(1/2,0),(sqrt(2),1))
sage: traj = v.straight_line_trajectory()
sage: traj.flow(4)
sage: traj.is_closed()
True
sage: cyl = traj.cylinder()
sage: cyl.area() # a = sqrt(2)
a + 1
sage: cyl.holonomy()
(3*a + 4, 2*a + 3)
sage: cyl.edges()
(2, 3, 3, 2, 4)
"""
# Note may not be defined.
if not self.is_closed():
raise ValueError("Cylinder is only defined for closed straight-line trajectories.")
from .surface_objects import Cylinder
coding = self.coding()
label = coding[0][0]
edges = [ e for l,e in coding[1:] ]
edges.append(self.surface().opposite_edge(coding[0][0],coding[0][1])[1])
return Cylinder(self.surface(), label, edges)
def coding(self, alphabet=None):
r"""
Return the coding of this trajectory with respect to the sides of the
polygons
INPUT:
- ``alphabet`` -- an optional dictionary ``(lab,nb) -> letter``. If some
labels are avoided then these crossings are ignored.
EXAMPLES::
sage: from flatsurf import *
sage: t = translation_surfaces.square_torus()
sage: v = t.tangent_vector(0, (1/2,0), (5,6))
sage: l = v.straight_line_trajectory()
sage: alphabet = {(0,0): 'a', (0,1): 'b', (0,2):'a', (0,3): 'b'}
sage: l.coding()
[(0, 0), (0, 1)]
sage: l.coding(alphabet)
['a', 'b']
sage: l.flow(10); l.flow(-10)
sage: l.coding()
[(0, 2), (0, 1), (0, 2), (0, 1), (0, 2), (0, 1), (0, 2), (0, 1), (0, 2)]
sage: print(''.join(l.coding(alphabet)))
ababababa
sage: v = t.tangent_vector(0, (1/2,0), (7,13))
sage: l = v.straight_line_trajectory()
sage: l.flow(10); l.flow(-10)
sage: print(''.join(l.coding(alphabet)))
aabaabaababaabaabaab
For a closed trajectory, the last label (corresponding also to the
starting point) is not considered::
sage: v = t.tangent_vector(0, (1/5,1/7), (1,1))
sage: l = v.straight_line_trajectory()
sage: l.flow(10)
sage: l.is_closed()
True
sage: l.coding(alphabet)
['a', 'b']
Check that the saddle connections that are obtained in the torus get the
expected coding::
sage: for _ in range(10):
....: x = ZZ.random_element(1,30)
....: y = ZZ.random_element(1,30)
....: x,y = x/gcd(x,y), y/gcd(x,y)
....: v = t.tangent_vector(0, (0,0), (x,y))
....: l = v.straight_line_trajectory()
....: l.flow(200); l.flow(-200)
....: w = ''.join(l.coding(alphabet))
....: assert Word(w+'ab'+w).is_balanced()
....: assert Word(w+'ba'+w).is_balanced()
....: assert w.count('a') == y-1
....: assert w.count('b') == x-1
"""
ans = []
segments = self.segments()
s = segments[0]
start = s.start()
if start._position._position_type == start._position.EDGE_INTERIOR:
p = s.polygon_label()
e = start._position.get_edge()
lab = (p,e) if alphabet is None else alphabet.get((p,e))
if lab is not None:
ans.append(lab)
for i in range(len(segments)-1):
s = segments[i]
end = s.end()
p = s.polygon_label()
e = end._position.get_edge()
lab = (p,e) if alphabet is None else alphabet.get((p,e))
if lab is not None:
ans.append(lab)
s = segments[-1]
end = s.end()
if end._position._position_type == end._position.EDGE_INTERIOR and \
end.invert() != start:
p = s.polygon_label()
e = end._position.get_edge()
lab = (p,e) if alphabet is None else alphabet.get((p,e))
if lab is not None:
ans.append(lab)
return ans
def initial_tangent_vector(self):
return self.segment(0).start()
def terminal_tangent_vector(self):
return self.segment(-1).end()
def intersects(self, traj, count_singularities = False):
r"""
Return true if this trajectory intersects the other trajectory.
"""
try:
next(self.intersections(traj, count_singularities = count_singularities))
except StopIteration:
return False
return True
def intersections(self, traj, count_singularities = False, include_segments = False):
r"""
Return the set of SurfacePoints representing the intersections
of this trajectory with the provided trajectory or SaddleConnection.
Singularities will be included only if count_singularities is
set to True.
If include_segments is True, it iterates over triples consisting of the SurfacePoint,
and two sets. The first set consists of segments of this trajectory that contain the point
and the second set consists of segments of traj that contain the point.
EXAMPLES::
sage: from flatsurf import *
sage: s=translation_surfaces.square_torus()
sage: traj1 = s.tangent_vector(0,(1/2,0),(1,1)).straight_line_trajectory()
sage: traj1.flow(3)
sage: traj1.is_closed()
True
sage: traj2 = s.tangent_vector(0,(1/2,0),(-1,1)).straight_line_trajectory()
sage: traj2.flow(3)
sage: traj2.is_closed()
True
sage: sum(1 for _ in traj1.intersections(traj2))
2
"""
# Partition the segments making up the trajectories by label.
if isinstance(traj,SaddleConnection):
traj = traj.trajectory()
lab_to_seg1 = {}
for seg1 in self.segments():
label = seg1.polygon_label()
if label in lab_to_seg1:
lab_to_seg1[label].append(seg1)
else:
lab_to_seg1[label] = [seg1]
lab_to_seg2 = {}
for seg2 in traj.segments():
label = seg2.polygon_label()
if label in lab_to_seg2:
lab_to_seg2[label].append(seg2)
else:
lab_to_seg2[label] = [seg2]
intersection_points = set()
if include_segments:
segments={}
for label,seg_list_1 in iteritems(lab_to_seg1):
if label in lab_to_seg2:
seg_list_2 = lab_to_seg2[label]
for seg1 in seg_list_1:
for seg2 in seg_list_2:
x = line_intersection(seg1.start().point(),
seg1.start().point()+seg1.start().vector(),
seg2.start().point(),
seg2.start().point()+seg2.start().vector())
if x is not None:
pos = self._s.polygon(seg1.polygon_label()).get_point_position(x)
if pos.is_inside() and (count_singularities or not pos.is_vertex()):
new_point = self._s.surface_point(seg1.polygon_label(),x)
if new_point not in intersection_points:
intersection_points.add(new_point)
if include_segments:
segments[new_point]=({seg1},{seg2})
else:
yield new_point
elif include_segments:
segments[new_point][0].append(seg1)
segments[new_point][1].append(seg2)
if include_segments:
for x in iteritems(segments):
yield x
class StraightLineTrajectory(AbstractStraightLineTrajectory):
r"""
Straight-line trajectory in a similarity surface.
EXAMPLES::
# Demonstrate the handling of edges
sage: from flatsurf import *
sage: from flatsurf.geometry.straight_line_trajectory import StraightLineTrajectory
sage: p = SymmetricGroup(2)('(1,2)')
sage: s = translation_surfaces.origami(p,p)
sage: traj = StraightLineTrajectory(s.tangent_vector(1,(0,0),(1,0)))
sage: traj
Straight line trajectory made of 1 segments from (0, 0) in polygon 1 to (1, 1) in polygon 2
sage: traj.is_saddle_connection()
True
sage: traj2 = StraightLineTrajectory(s.tangent_vector(1,(0,0),(0,1)))
sage: traj2
Straight line trajectory made of 1 segments from (1, 0) in polygon 2 to (0, 1) in polygon 1
sage: traj2.is_saddle_connection()
True
"""
def __init__(self, tangent_vector):
self._segments = deque()
seg = SegmentInPolygon(tangent_vector)
self._segments.append(seg)
self._setup_forward()
self._setup_backward()
self._s=tangent_vector.surface()
def surface(self):
return self._s
def segment(self, i):
r"""
EXAMPLES::
sage: from flatsurf import *
sage: O = translation_surfaces.regular_octagon()
sage: v = O.tangent_vector(0, (1,1), (33,45))
sage: L = v.straight_line_trajectory()
sage: L.segment(0)
Segment in polygon 0 starting at (4/15, 0) and ending at (11/26*a +
1, 15/26*a + 1)
sage: L.flow(-1)
sage: L.segment(0)
Segment in polygon 0 starting at (-1/2*a, 7/22*a + 7/11) and ending
at (4/15, a + 1)
sage: L.flow(1)
sage: L.segment(2)
Segment in polygon 0 starting at (-1/13*a, 1/13*a) and ending at
(9/26*a + 11/13, 17/26*a + 15/13)
"""
return self.segments()[i]
def combinatorial_length(self):
return len(self.segments())
def segments(self):
return self._segments
def _setup_forward(self):
v = self.terminal_tangent_vector()
if v.is_based_at_singularity():
self._forward = None
else:
self._forward = v.invert()
def _setup_backward(self):
v = self.initial_tangent_vector()
if v.is_based_at_singularity():
self._backward = None
else:
self._backward = v.invert()
def is_forward_separatrix(self):
return self._forward is None
def is_backward_separatrix(self):
return self._backward is None
def is_saddle_connection(self):
return (self._forward is None) and (self._backward is None)
def is_closed(self):
r"""
Test whether this is a closed trajectory.
By convention, by a closed trajectory we mean a trajectory without any
singularities.
.. SEEALSO::
:meth:`is_saddle_connection`
EXAMPLES:
An example in a cone surface covered by the torus::
sage: from flatsurf import *
sage: p = polygons.square()
sage: s = Surface_list(base_ring=p.base_ring())
sage: s.add_polygon(p,[(0,3),(0,2),(0,1),(0,0)])
0
sage: s.set_immutable()
sage: t = RationalConeSurface(s)
sage: v = t.tangent_vector(0, (1/2,0), (1/3,7/5))
sage: l = v.straight_line_trajectory()
sage: l.is_closed()
False
sage: l.flow(100)
sage: l.is_closed()
True
sage: v = t.tangent_vector(0, (1/2,0), (1/3,2/5))
sage: l = v.straight_line_trajectory()
sage: l.flow(100)
sage: l.is_closed()
False
sage: l.is_saddle_connection()
False
sage: l.flow(-100)
sage: l.is_saddle_connection()
True
"""
return (not self.is_forward_separatrix()) and \
self._forward.differs_by_scaling(self.initial_tangent_vector())
def flow(self, steps):
r"""
Append or preprend segments to the trajectory.
If steps is positive, attempt to append this many segments.
If steps is negative, attempt to prepend this many segments.
Will fail gracefully the trajectory hits a singularity or closes up.
EXAMPLES::
sage: from flatsurf import *
sage: s = similarity_surfaces.example()
sage: v = s.tangent_vector(0, (1,-1/2), (3,-1))
sage: traj = v.straight_line_trajectory()
sage: traj
Straight line trajectory made of 1 segments from (1/4, -1/4) in polygon 0 to (2, -5/6) in polygon 0
sage: traj.flow(1)
sage: traj
Straight line trajectory made of 2 segments from (1/4, -1/4) in polygon 0 to (61/36, 11/12) in polygon 1
sage: traj.flow(-1)
sage: traj
Straight line trajectory made of 3 segments from (15/16, 45/16) in polygon 1 to (61/36, 11/12) in polygon 1
"""
while steps>0 and \
(not self.is_forward_separatrix()) and \
(not self.is_closed()):
self._segments.append(SegmentInPolygon(self._forward))
self._setup_forward()
steps -= 1
while steps<0 and \
(not self.is_backward_separatrix()) and \
(not self.is_closed()):
self._segments.appendleft(SegmentInPolygon(self._backward).invert())
self._setup_backward()
steps += 1
class StraightLineTrajectoryTranslation(AbstractStraightLineTrajectory):
r"""
Straight line trajectory in a translation surface.
This is similar to :class:`StraightLineTrajectory` but implemented using
interval exchange maps. It should be faster than the implementation via
segments and flowing in polygons.
This class only stores a list of triples ``(p, e, x)`` where:
- ``p`` is a label of a polygon
- ``e`` is the number of some edge in ``p``
- ``x`` is the position of the point in ``e`` (be careful that it is not
necessarily a number between 0 and 1. It is given relatively to the length
of the induced interval in the iet)
(see the methods :meth:`_prev` and :meth:`_next`)
"""
def __init__(self, tangent_vector):
t = tangent_vector.polygon_label()
self._vector = tangent_vector.vector()
self._s = tangent_vector.surface()
seg = SegmentInPolygon(tangent_vector)
if seg.is_edge():
self._points = None
self._edge = seg
return
start = seg.start()
pos = start._position
if pos._position_type == pos.EDGE_INTERIOR:
i = pos.get_edge()
elif pos._position_type == pos.VERTEX:
i = pos.get_vertex()
else:
raise RuntimeError("PROBLEM!")
p = start.polygon_label()
poly = self._s.polygon(p)
T = self._get_iet(p)
x = get_linearity_coeff(poly.vertex(i+1) - poly.vertex(i),
start.point() - poly.vertex(i))
x *= T.length_bot(i)
self._points = deque() # we store triples (lab, edge, rel_pos)
self._points.append((p, i, x))
def _next(self, p, e, x):
r"""
Return the image of ``(p, e, x)``
EXAMPLES::
sage: from flatsurf import *
sage: from flatsurf.geometry.straight_line_trajectory import StraightLineTrajectoryTranslation
sage: S = SymmetricGroup(3)
sage: r = S('(1,2)')
sage: u = S('(1,3)')
sage: o = translation_surfaces.origami(r,u)
sage: v = o.tangent_vector(1, (1/3,1/7), (5,13))
sage: L = StraightLineTrajectoryTranslation(v)
sage: t0 = (1,0,1/3)
sage: t1 = L._next(*t0)
sage: t2 = L._next(*t1)
sage: t0,t1,t2
((1, 0, 1/3), (3, 0, 16/3), (1, 0, 31/3))
sage: assert L._previous(*t2) == t1
sage: assert L._previous(*t1) == t0
"""
e, x = self._get_iet(p).forward_image(e, x)
p, e = self._s.opposite_edge(p, e)
return (p, e, x)
def _previous(self, p, e, x):
r"""
Return the preimage of ``(p, e, x)``
"""
p, e = self._s.opposite_edge(p, e)
e, x = self._get_iet(p).backward_image(e, x)
return (p, e, x)
def combinatorial_length(self):
if self._points is None:
return 1
return len(self._points)
def _get_iet(self, label):
polygon = self._s.polygon(label)
try:
return self._iets[polygon]
except AttributeError:
self._iets = {polygon: polygon.flow_map(self._vector)}
except KeyError:
self._iets[polygon] = polygon.flow_map(self._vector)
return self._iets[polygon]
def segment(self, i):
r"""
EXAMPLES::
sage: from flatsurf import *
sage: from flatsurf.geometry.straight_line_trajectory import StraightLineTrajectoryTranslation
sage: O = translation_surfaces.regular_octagon()
sage: v = O.tangent_vector(0, (1,1), (33,45))
sage: L = StraightLineTrajectoryTranslation(v)
sage: L.segment(0)
Segment in polygon 0 starting at (4/15, 0) and ending at (11/26*a +
1, 15/26*a + 1)
sage: L.flow(-1)
sage: L.segment(0)
Segment in polygon 0 starting at (-1/2*a, 7/22*a + 7/11) and ending
at (4/15, a + 1)
sage: L.flow(1)
sage: L.segment(2)
Segment in polygon 0 starting at (-1/13*a, 1/13*a) and ending at
(9/26*a + 11/13, 17/26*a + 15/13)
"""
if self._points is None:
return self._edge
lab, e0, x0 = self._points[i]
iet = self._get_iet(lab)
e1, x1 = iet.forward_image(e0, x0)
poly = self._s.polygon(lab)
l0 = iet.length_bot(e0)
l1 = iet.length_top(e1)
point0 = poly.vertex(e0) + poly.edge(e0) * x0/l0
point1 = poly.vertex(e1) + poly.edge(e1) * (l1-x1)/l1
v0 = self._s.tangent_vector(lab, point0, self._vector, ring=self._vector.base_ring())
v1 = self._s.tangent_vector(lab, point1, -self._vector, ring=self._vector.base_ring())
return SegmentInPolygon(v0,v1)
def segments(self):
r"""
EXAMPLES::
sage: from flatsurf import *
sage: from flatsurf.geometry.straight_line_trajectory import StraightLineTrajectoryTranslation
sage: s = translation_surfaces.square_torus()
sage: v = s.tangent_vector(0, (0,0), (1,1+AA(5).sqrt()), ring=AA)
sage: L = StraightLineTrajectoryTranslation(v)
sage: L.flow(2)
sage: L.segments()
[Segment in polygon 0 starting at (0, 0) and ending at (0.3090169943749474?, 1),
Segment in polygon 0 starting at (0.3090169943749474?, 0) and ending at (0.618033988749895?, 1),
Segment in polygon 0 starting at (0.618033988749895?, 0) and ending at (0.9270509831248423?, 1)]
"""
return [self.segment(i) for i in range(self.combinatorial_length())]
def is_closed(self):
if self._points is None:
raise NotImplementedError
return self._points[0] == self._next(*self._points[-1])
def is_forward_separatrix(self):
if self._points is None:
return True
p1,e1,x1 = self._next(*self._points[-1])
return x1.is_zero()
def is_backward_separatrix(self):
return self._points is None or self._points[0][2].is_zero()
def is_saddle_connection(self):
r"""
EXAMPLES::
sage: from flatsurf import *
sage: from flatsurf.geometry.straight_line_trajectory import StraightLineTrajectoryTranslation
sage: torus = translation_surfaces.square_torus()
sage: v = torus.tangent_vector(0, (1/2,1/2), (1,1))
sage: S = StraightLineTrajectoryTranslation(v)
sage: S.is_saddle_connection()
True
sage: v = torus.tangent_vector(0, (1/3,2/3), (1,2))
sage: S = StraightLineTrajectoryTranslation(v)
sage: S.is_saddle_connection()
False
sage: S.flow(1)
sage: S.is_saddle_connection()
True
"""
return self._points is None or (self.is_forward_separatrix() and self.is_backward_separatrix())
def flow(self, steps):
if self._points is None:
return
if steps > 0:
t = self._points[-1]
for i in range(steps):
t = self._next(*t)
if t == self._points[0] or t[2].is_zero():
break
self._points.append(t)
elif steps < 0:
t = self._points[0]
for i in range(-steps):
if t[2].is_zero():
break
t = self._previous(*t)
if t == self._points[-1]:
# closed curve or backward separatrix
break
self._points.appendleft(t)
| gpl-2.0 |
mlyundin/scikit-learn | examples/linear_model/plot_sgd_loss_functions.py | 249 | 1095 | """
==========================
SGD: convex loss functions
==========================
A plot that compares the various convex loss functions supported by
:class:`sklearn.linear_model.SGDClassifier` .
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
def modified_huber_loss(y_true, y_pred):
z = y_pred * y_true
loss = -4 * z
loss[z >= -1] = (1 - z[z >= -1]) ** 2
loss[z >= 1.] = 0
return loss
xmin, xmax = -4, 4
xx = np.linspace(xmin, xmax, 100)
plt.plot([xmin, 0, 0, xmax], [1, 1, 0, 0], 'k-',
label="Zero-one loss")
plt.plot(xx, np.where(xx < 1, 1 - xx, 0), 'g-',
label="Hinge loss")
plt.plot(xx, -np.minimum(xx, 0), 'm-',
label="Perceptron loss")
plt.plot(xx, np.log2(1 + np.exp(-xx)), 'r-',
label="Log loss")
plt.plot(xx, np.where(xx < 1, 1 - xx, 0) ** 2, 'b-',
label="Squared hinge loss")
plt.plot(xx, modified_huber_loss(xx, 1), 'y--',
label="Modified Huber loss")
plt.ylim((0, 8))
plt.legend(loc="upper right")
plt.xlabel(r"Decision function $f(x)$")
plt.ylabel("$L(y, f(x))$")
plt.show()
| bsd-3-clause |
treycausey/scikit-learn | examples/neighbors/plot_species_kde.py | 9 | 4043 | """
================================================
Kernel Density Estimate of Species Distributions
================================================
This shows an example of a neighbors-based query (in particular a kernel
density estimate) on geospatial data, using a Ball Tree built upon the
Haversine distance metric -- i.e. distances over points in latitude/longitude.
The dataset is provided by Phillips et. al. (2006).
If available, the example uses
`basemap <http://matplotlib.sourceforge.net/basemap/doc/html/>`_
to plot the coast lines and national boundaries of South America.
This example does not perform any learning over the data
(see :ref:`example_applications_plot_species_distribution_modeling.py` for
an example of classification based on the attributes in this dataset). It
simply shows the kernel density estimate of observed data points in
geospatial coordinates.
The two species are:
- `"Bradypus variegatus"
<http://www.iucnredlist.org/apps/redlist/details/3038/0>`_ ,
the Brown-throated Sloth.
- `"Microryzomys minutus"
<http://www.iucnredlist.org/apps/redlist/details/13408/0>`_ ,
also known as the Forest Small Rice Rat, a rodent that lives in Peru,
Colombia, Ecuador, Peru, and Venezuela.
References
----------
* `"Maximum entropy modeling of species geographic distributions"
<http://www.cs.princeton.edu/~schapire/papers/ecolmod.pdf>`_
S. J. Phillips, R. P. Anderson, R. E. Schapire - Ecological Modelling,
190:231-259, 2006.
"""
# Author: Jake Vanderplas <[email protected]>
#
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
from sklearn.datasets import fetch_species_distributions
from sklearn.datasets.species_distributions import construct_grids
from sklearn.neighbors import KernelDensity
# if basemap is available, we'll use it.
# otherwise, we'll improvise later...
try:
from mpl_toolkits.basemap import Basemap
basemap = True
except ImportError:
basemap = False
# Get matrices/arrays of species IDs and locations
data = fetch_species_distributions()
species_names = ['Bradypus Variegatus', 'Microryzomys Minutus']
Xtrain = np.vstack([data['train']['dd lat'],
data['train']['dd long']]).T
ytrain = np.array([d.startswith('micro') for d in data['train']['species']],
dtype='int')
Xtrain *= np.pi / 180. # Convert lat/long to radians
# Set up the data grid for the contour plot
xgrid, ygrid = construct_grids(data)
X, Y = np.meshgrid(xgrid[::5], ygrid[::5][::-1])
land_reference = data.coverages[6][::5, ::5]
land_mask = (land_reference > -9999).ravel()
xy = np.vstack([Y.ravel(), X.ravel()]).T
xy = xy[land_mask]
xy *= np.pi / 180.
# Plot map of South America with distributions of each species
fig = plt.figure()
fig.subplots_adjust(left=0.05, right=0.95, wspace=0.05)
for i in range(2):
plt.subplot(1, 2, i + 1)
# construct a kernel density estimate of the distribution
print(" - computing KDE in spherical coordinates")
kde = KernelDensity(bandwidth=0.04, metric='haversine',
kernel='gaussian', algorithm='ball_tree')
kde.fit(Xtrain[ytrain == i])
# evaluate only on the land: -9999 indicates ocean
Z = -9999 + np.zeros(land_mask.shape[0])
Z[land_mask] = np.exp(kde.score_samples(xy))
Z = Z.reshape(X.shape)
# plot contours of the density
levels = np.linspace(0, Z.max(), 25)
plt.contourf(X, Y, Z, levels=levels, cmap=plt.cm.Reds)
if basemap:
print(" - plot coastlines using basemap")
m = Basemap(projection='cyl', llcrnrlat=Y.min(),
urcrnrlat=Y.max(), llcrnrlon=X.min(),
urcrnrlon=X.max(), resolution='c')
m.drawcoastlines()
m.drawcountries()
else:
print(" - plot coastlines from coverage")
plt.contour(X, Y, land_reference,
levels=[-9999], colors="k",
linestyles="solid")
plt.xticks([])
plt.yticks([])
plt.title(species_names[i])
plt.show()
| bsd-3-clause |
xiaolonw/fast-rcnn-backup | lib/roi_data_layer/minibatch.py | 3 | 7887 | # --------------------------------------------------------
# Fast R-CNN
# Copyright (c) 2015 Microsoft
# Licensed under The MIT License [see LICENSE for details]
# Written by Ross Girshick
# --------------------------------------------------------
"""Compute minibatch blobs for training a Fast R-CNN network."""
import numpy as np
import numpy.random as npr
import cv2
from fast_rcnn.config import cfg
from utils.blob import prep_im_for_blob, im_list_to_blob
from IPython.core.debugger import Tracer
def get_minibatch(roidb, num_classes, num_data):
"""Given a roidb, construct a minibatch sampled from it."""
num_images = len(roidb)
# Sample random scales to use for each image in this batch
random_scale_inds = npr.randint(0, high=len(cfg.TRAIN.SCALES),
size=num_images)
assert(cfg.TRAIN.BATCH_SIZE % num_images == 0), \
'num_images ({}) must divide BATCH_SIZE ({})'. \
format(num_images, cfg.TRAIN.BATCH_SIZE)
rois_per_image = cfg.TRAIN.BATCH_SIZE / num_images
fg_rois_per_image = np.round(cfg.TRAIN.FG_FRACTION * rois_per_image)
# Get the input image blob, formatted for caffe
im_scales = None; im_blob = [];
for data_i in xrange(num_data):
im_blob_i, im_scales_i = _get_image_blob(roidb, random_scale_inds, data_i)
if im_scales is None:
im_scales = im_scales_i
else:
assert(im_scales == im_scales_i), 'im_scales are different between different data sources'
im_blob.append(im_blob_i)
# Now, build the region of interest and label blobs
rois_blob = np.zeros((0, 5), dtype=np.float32)
labels_blob = np.zeros((0), dtype=np.float32)
bbox_targets_blob = np.zeros((0, 4 * num_classes), dtype=np.float32)
bbox_loss_blob = np.zeros(bbox_targets_blob.shape, dtype=np.float32)
# all_overlaps = []
for im_i in xrange(num_images):
labels, overlaps, im_rois, bbox_targets, bbox_loss \
= _sample_rois(roidb[im_i], fg_rois_per_image, rois_per_image,
num_classes)
# Add to RoIs blob
rois = _project_im_rois(im_rois, im_scales[im_i])
batch_ind = im_i * np.ones((rois.shape[0], 1))
rois_blob_this_image = np.hstack((batch_ind, rois))
rois_blob = np.vstack((rois_blob, rois_blob_this_image))
# Add to labels, bbox targets, and bbox loss blobs
labels_blob = np.hstack((labels_blob, labels))
bbox_targets_blob = np.vstack((bbox_targets_blob, bbox_targets))
bbox_loss_blob = np.vstack((bbox_loss_blob, bbox_loss))
# all_overlaps = np.hstack((all_overlaps, overlaps))
# For debug visualizations
# _vis_minibatch(im_blob, rois_blob, labels_blob, all_overlaps)
blobs = {'data': im_blob[0],
'rois': rois_blob,
'labels': labels_blob}
for i in xrange(1, num_data):
blobs['data_{:d}'.format(i)] = im_blob[i]
assert(im_blob[0].shape == im_blob[i].shape), "images do not have the same size"
if cfg.TRAIN.BBOX_REG:
blobs['bbox_targets'] = bbox_targets_blob
blobs['bbox_loss_weights'] = bbox_loss_blob
return blobs
def _sample_rois(roidb, fg_rois_per_image, rois_per_image, num_classes):
"""Generate a random sample of RoIs comprising foreground and background
examples.
"""
# label = class RoI has max overlap with
labels = roidb['max_classes']
overlaps = roidb['max_overlaps']
rois = roidb['boxes']
# Select foreground RoIs as those with >= FG_THRESH overlap
fg_inds = np.where(overlaps >= cfg.TRAIN.FG_THRESH)[0]
# Guard against the case when an image has fewer than fg_rois_per_image
# foreground RoIs
fg_rois_per_this_image = np.minimum(fg_rois_per_image, fg_inds.size)
# Sample foreground regions without replacement
if fg_inds.size > 0:
fg_inds = npr.choice(fg_inds, size=fg_rois_per_this_image,
replace=False)
# Select background RoIs as those within [BG_THRESH_LO, BG_THRESH_HI)
bg_inds = np.where((overlaps < cfg.TRAIN.BG_THRESH_HI) &
(overlaps >= cfg.TRAIN.BG_THRESH_LO))[0]
# Compute number of background RoIs to take from this image (guarding
# against there being fewer than desired)
bg_rois_per_this_image = rois_per_image - fg_rois_per_this_image
bg_rois_per_this_image = np.minimum(bg_rois_per_this_image,
bg_inds.size)
# Sample foreground regions without replacement
if bg_inds.size > 0:
bg_inds = npr.choice(bg_inds, size=bg_rois_per_this_image,
replace=False)
# The indices that we're selecting (both fg and bg)
keep_inds = np.append(fg_inds, bg_inds)
# Select sampled values from various arrays:
labels = labels[keep_inds]
# Clamp labels for the background RoIs to 0
labels[fg_rois_per_this_image:] = 0
overlaps = overlaps[keep_inds]
rois = rois[keep_inds]
bbox_targets, bbox_loss_weights = \
_get_bbox_regression_labels(roidb['bbox_targets'][keep_inds, :],
num_classes)
return labels, overlaps, rois, bbox_targets, bbox_loss_weights
def _get_image_blob(roidb, scale_inds, data_i):
"""Builds an input blob from the images in the roidb at the specified
scales.
"""
num_images = len(roidb)
processed_ims = []
im_scales = []
for i in xrange(num_images):
im = cv2.imread(roidb[i]['image'][data_i])
if roidb[i]['flipped']:
im = im[:, ::-1, :]
target_size = cfg.TRAIN.SCALES[scale_inds[i]]
im, im_scale = prep_im_for_blob(im, cfg.PIXEL_MEANS, target_size,
cfg.TRAIN.MAX_SIZE)
im_scales.append(im_scale)
processed_ims.append(im)
# Create a blob to hold the input images
blob = im_list_to_blob(processed_ims)
return blob, im_scales
def _project_im_rois(im_rois, im_scale_factor):
"""Project image RoIs into the rescaled training image."""
rois = im_rois * im_scale_factor
return rois
def _get_bbox_regression_labels(bbox_target_data, num_classes):
"""Bounding-box regression targets are stored in a compact form in the
roidb.
This function expands those targets into the 4-of-4*K representation used
by the network (i.e. only one class has non-zero targets). The loss weights
are similarly expanded.
Returns:
bbox_target_data (ndarray): N x 4K blob of regression targets
bbox_loss_weights (ndarray): N x 4K blob of loss weights
"""
clss = bbox_target_data[:, 0]
bbox_targets = np.zeros((clss.size, 4 * num_classes), dtype=np.float32)
bbox_loss_weights = np.zeros(bbox_targets.shape, dtype=np.float32)
inds = np.where(clss > 0)[0]
for ind in inds:
cls = clss[ind]
start = 4 * cls
end = start + 4
bbox_targets[ind, start:end] = bbox_target_data[ind, 1:]
bbox_loss_weights[ind, start:end] = [1., 1., 1., 1.]
return bbox_targets, bbox_loss_weights
def _vis_minibatch(im_blob, rois_blob, labels_blob, overlaps):
"""Visualize a mini-batch for debugging."""
import matplotlib.pyplot as plt
for i in xrange(rois_blob.shape[0]):
rois = rois_blob[i, :]
im_ind = rois[0]
roi = rois[1:]
im = im_blob[im_ind, :, :, :].transpose((1, 2, 0)).copy()
im += cfg.PIXEL_MEANS
im = im[:, :, (2, 1, 0)]
im = im.astype(np.uint8)
cls = labels_blob[i]
plt.imshow(im)
print 'class: ', cls, ' overlap: ', overlaps[i]
plt.gca().add_patch(
plt.Rectangle((roi[0], roi[1]), roi[2] - roi[0],
roi[3] - roi[1], fill=False,
edgecolor='r', linewidth=3)
)
plt.show()
| mit |
wagnerpeer/gitexplorer | gitexplorer/__main__.py | 1 | 2626 | '''
Created on 24.08.2017
@author: Peer
'''
import argparse
import matplotlib.pyplot as plt
import networkx as nx
from gitexplorer import queries, git_log_processing
from gitexplorer.basics import GitExplorerBase
def _get_arguments():
parser = argparse.ArgumentParser(description='Parse configuration parameters for gitexplorer from command line arguments.')
parser.add_argument('directory',
metavar='DIR',
type=str,
help='The repository to run gitexplorer in.')
return parser.parse_args()
def main(directory):
log_reader = git_log_processing.GitLogReader(directory)
log = log_reader.get_log_information()
gitexplorer_database = GitExplorerBase.get_gitexplorer_database()
gitexplorer_database.commit_collection.drop()
gitexplorer_database.commit_collection.insert_many(log)
queries.AggregatorRegistry.load('gitexplorer.queries.authors_per_file',
'gitexplorer.queries.commits_by_datetime',
'gitexplorer.queries.commits_by_filestats',
'gitexplorer.queries.commits_per_author',
'gitexplorer.queries.queries_per_commit')
aggregations = list(map(queries.AggregatorRegistry.get,
['authors_per_file_path',
'commits_by_day_of_week',
'commits_by_hour_of_day',
'additions_deletions_lines_commits_by_file_path',
'commits_per_author',
'additions_deletions_lines_modifications_per_commit',
'average_additions_deletions_lines_modifications_per_commit',
'additions_deletions_lines_modifications_commits_by_date',
'average_additions_deletions_lines_modifications_commits_by_date',
]))
dependencies = nx.DiGraph()
for aggregation in aggregations:
provides = aggregation.provides()
dependencies.add_edge(provides, aggregation.requires())
sorted_dependencies = nx.topological_sort(dependencies, reverse=True)
print(sorted_dependencies)
for dependency in sorted_dependencies:
for aggregation in aggregations:
if(aggregation.name == dependency):
aggregation().run()
nx.draw(dependencies, with_labels=True)
plt.show()
if(__name__ == '__main__'):
args = _get_arguments()
main(args.directory)
| mit |
GiulioRossetti/ndlib | ndlib/viz/mpl/ComparisonViz.py | 1 | 3783 | import abc
from bokeh.palettes import Category20_9 as cols
import os
import matplotlib as mpl
if os.environ.get('DISPLAY', '') == '':
print('no display found. Using non-interactive Agg backend')
mpl.use('Agg')
import matplotlib.pyplot as plt
import future.utils
import past
import six
__author__ = 'Giulio Rossetti'
__license__ = "BSD-2-Clause"
__email__ = "[email protected]"
class InitializationException(Exception):
"""Initialization Exception"""
@six.add_metaclass(abc.ABCMeta)
class ComparisonPlot(object):
# __metaclass__ = abc.ABCMeta
def __init__(self, models, trends, statuses=["Infected"]):
self.models = models
self.trends = trends
if len(models) != len(trends):
raise InitializationException({"message": "The number of models does not match the number of trends"})
sts = [model.available_statuses for model in models]
self.mnames = ["%s_%s" % (models[i].name, i) for i in past.builtins.xrange(0, len(models))]
self.srev = {}
i = 0
available_classes = {}
for model in models:
srev = {v: k for k, v in future.utils.iteritems(sts[i])}
self.nnodes = model.graph.number_of_nodes()
for cl in srev.values():
available_classes[cl] = None
self.srev["%s_%s" % (model.name, i)] = srev
i += 1
if type(statuses) == list:
cls = set(statuses) & set(available_classes.keys())
else:
cls = set([statuses]) & set(available_classes.keys())
if len(cls) > 0:
self.classes = cls
else:
raise InitializationException({"message": "Statuses specified not available for the model (or missing)"})
self.ylabel = ""
self.title = ""
self.normalized = True
@abc.abstractmethod
def iteration_series(self, percentile):
"""
Prepare the data to be visualized
:param percentile: The percentile for the trend variance area
:return: a dictionary where iteration ids are keys and the associated values are the computed measures.
"""
pass
def plot(self, filename=None, percentile=90):
"""
Plot the comparison on file.
:param filename: the output filename
:param percentile: The percentile for the trend variance area. Default 90.
"""
pres = self.iteration_series(percentile)
mx = 0
i, h = 0, 0
for k, l in future.utils.iteritems(pres):
j = 0
for st in l:
mx = len(l[st][0])
if self.normalized:
plt.plot(range(0, mx), l[st][1]/self.nnodes, lw=2,
label="%s - %s" % (k.split("_")[0], st), alpha=0.9, color=cols[h+j])
plt.fill_between(range(0, mx), l[st][0]/self.nnodes,
l[st][2]/self.nnodes, alpha=0.2, color=cols[h+j])
else:
plt.plot(range(0, mx), l[st][1], lw=2,
label="%s - %s" % (k.split("_")[0], st), alpha=0.9, color=cols[h + j])
plt.fill_between(range(0, mx), l[st][0],
l[st][2], alpha=0.2, color=cols[h + j])
j += 1
i += 1
h += 2
plt.grid(axis="y")
plt.xlabel("Iterations", fontsize=24)
plt.ylabel(self.ylabel, fontsize=24)
plt.legend(loc="best", fontsize=18)
plt.xlim((0, mx))
if self.normalized:
plt.ylim((0, 1))
plt.tight_layout()
if filename is not None:
plt.savefig(filename)
plt.clf()
else:
plt.show()
| bsd-2-clause |
tobiasgehring/qudi | logic/spectrum.py | 1 | 7721 | # -*- coding: utf-8 -*-
"""
This file contains the Qudi logic class that captures and processes fluorescence spectra.
Qudi is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
Qudi is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with Qudi. If not, see <http://www.gnu.org/licenses/>.
Copyright (c) the Qudi Developers. See the COPYRIGHT.txt file at the
top-level directory of this distribution and at <https://github.com/Ulm-IQO/qudi/>
"""
from qtpy import QtCore
from collections import OrderedDict
import numpy as np
import matplotlib.pyplot as plt
from core.util.mutex import Mutex
from core.util.network import netobtain
from logic.generic_logic import GenericLogic
class SpectrumLogic(GenericLogic):
"""This logic module gathers data from the spectrometer.
"""
sig_specdata_updated = QtCore.Signal()
sig_next_diff_loop = QtCore.Signal()
_modclass = 'spectrumlogic'
_modtype = 'logic'
# declare connectors
_connectors = {
'spectrometer': 'SpectrometerInterface',
'odmrlogic1': 'ODMRLogic',
'savelogic': 'SaveLogic'
}
def __init__(self, **kwargs):
""" Create SpectrometerLogic object with connectors.
@param dict kwargs: optional parameters
"""
super().__init__(**kwargs)
# locking for thread safety
self.threadlock = Mutex()
def on_activate(self):
""" Initialisation performed during activation of the module.
"""
self.spectrum_data = np.array([])
self.diff_spec_data_mod_on = np.array([])
self.diff_spec_data_mod_off = np.array([])
self.repetition_count = 0 # count loops for differential spectrum
self._spectrometer_device = self.get_connector('spectrometer')
self._odmr_logic = self.get_connector('odmrlogic1')
self._save_logic = self.get_connector('savelogic')
self.sig_next_diff_loop.connect(self._loop_differential_spectrum)
def on_deactivate(self):
""" Deinitialisation performed during deactivation of the module.
"""
if self.getState() != 'idle' and self.getState() != 'deactivated':
pass
def get_single_spectrum(self):
""" Record a single spectrum from the spectrometer.
"""
self.spectrum_data = netobtain(self._spectrometer_device.recordSpectrum())
# Clearing the differential spectra data arrays so that they do not get
# saved with this single spectrum.
self.diff_spec_data_mod_on = np.array([])
self.diff_spec_data_mod_off = np.array([])
self.sig_specdata_updated.emit()
def save_raw_spectrometer_file(self, path='', postfix=''):
"""Ask the hardware device to save its own raw file.
"""
# TODO: sanity check the passed parameters.
self._spectrometer_device.saveSpectrum(path, postfix=postfix)
def start_differential_spectrum(self):
"""Start a differential spectrum acquisition. An initial spectrum is recorded to initialise the data arrays to the right size.
"""
self._continue_differential = True
# Taking a demo spectrum gives us the wavelength values and the length of the spectrum data.
demo_data = netobtain(self._spectrometer_device.recordSpectrum())
wavelengths = demo_data[0, :]
empty_signal = np.zeros(len(wavelengths))
# Using this information to initialise the differential spectrum data arrays.
self.spectrum_data = np.array([wavelengths, empty_signal])
self.diff_spec_data_mod_on = np.array([wavelengths, empty_signal])
self.diff_spec_data_mod_off = np.array([wavelengths, empty_signal])
self.repetition_count = 0
# Starting the measurement loop
self._loop_differential_spectrum()
def resume_differential_spectrum(self):
"""Resume a differential spectrum acquisition.
"""
self._continue_differential = True
# Starting the measurement loop
self._loop_differential_spectrum()
def _loop_differential_spectrum(self):
""" This loop toggles the modulation and iteratively records a differential spectrum.
"""
# If the loop should not continue, then return immediately without
# emitting any signal to repeat.
if not self._continue_differential:
return
# Otherwise, we make a measurement and then emit a signal to repeat this loop.
# Toggle on, take spectrum and add data to the mod_on data
self.toggle_modulation(on=True)
these_data = netobtain(self._spectrometer_device.recordSpectrum())
self.diff_spec_data_mod_on[1, :] += these_data[1, :]
# Toggle off, take spectrum and add data to the mod_off data
self.toggle_modulation(on=False)
these_data = netobtain(self._spectrometer_device.recordSpectrum())
self.diff_spec_data_mod_off[1, :] += these_data[1, :]
self.repetition_count += 1 # increment the loop count
# Calculate the differential spectrum
self.spectrum_data[1, :] = self.diff_spec_data_mod_on[
1, :] - self.diff_spec_data_mod_off[1, :]
self.sig_specdata_updated.emit()
self.sig_next_diff_loop.emit()
def stop_differential_spectrum(self):
"""Stop an ongoing differential spectrum acquisition
"""
self._continue_differential = False
def toggle_modulation(self, on):
""" Toggle the modulation.
"""
if on:
self._odmr_logic.MW_on()
elif not on:
self._odmr_logic.MW_off()
else:
print("Parameter 'on' needs to be boolean")
def save_spectrum_data(self):
""" Saves the current spectrum data to a file.
"""
filepath = self._save_logic.get_path_for_module(module_name='spectra')
filelabel = 'spectrum'
# write experimental parameters
parameters = OrderedDict()
parameters['Spectrometer acquisition repetitions'] = self.repetition_count
# prepare the data in an OrderedDict:
data = OrderedDict()
data['wavelength'] = self.spectrum_data[0, :]
# If the differential spectra arrays are not empty, save them as raw data
if len(self.diff_spec_data_mod_on) != 0 and len(self.diff_spec_data_mod_off) != 0:
data['signal_mod_on'] = self.diff_spec_data_mod_on[1, :]
data['signal_mod_off'] = self.diff_spec_data_mod_off[1, :]
data['differential'] = self.spectrum_data[1, :]
else:
data['signal'] = self.spectrum_data[1, :]
# Prepare the figure to save as a "data thumbnail"
plt.style.use(self._save_logic.mpl_qd_style)
fig, ax1 = plt.subplots()
ax1.plot(data['wavelength'], data['signal'])
ax1.set_xlabel('Wavelength (nm)')
ax1.set_ylabel('Signal (arb. u.)')
fig.tight_layout()
# Save to file
self._save_logic.save_data(data,
filepath=filepath,
parameters=parameters,
filelabel=filelabel,
plotfig=fig)
self.log.debug('Spectrum saved to:\n{0}'.format(filepath))
| gpl-3.0 |
jcmt/WRFTools | wrftools/WRFTools.py | 1 | 23195 | import netCDF4 as n4
import numpy as np
class get:
def __init__(self, path):
self.path = path
self.nc = n4.Dataset(path)
def close(self):
self.nc.close()
def variables(self):
'''
Prints the variables in the wrfout file.
'''
varname = np.asarray([v for v in self.nc.variables])
return varname
def dim(self):
'''
Returns an array containing the domain dimensios
time, levels, latitude, longitude
'''
nt = len(self.nc.dimensions['Time'])
nx = len(self.nc.dimensions['west_east'])
ny = len(self.nc.dimensions['south_north'])
nz = len(self.nc.dimensions['bottom_top'])
return np.array([nt, nz, ny, nx])
def lat(self):
'''
Returns the latitude array
'''
LAT = self.nc.variables['XLAT'][0, ]
return LAT
def lon(self):
'''
Returns the longitude array
'''
LON = self.nc.variables['XLONG'][0, ]
return LON
def height(self, tstep=None, nlev=':', ny=':', nx=':'):
'''
Returns the height of the model levels at a given time
usage:
height(tstep)
tstep is the time instant, if not specified all the written times
will be used
'''
if not tstep:
Z = self.getvar('PH', tstep=':', nlev=nlev, ny=ny, nx=nx) / 9.81
else:
Z = self.getvar('PH', tstep=tstep, nlev=nlev, ny=ny, nx=nx) / 9.81
return Z
def time(self, tstep=None):
'''
Returns a datetime
'''
tstart = self.nc.SIMULATION_START_DATE
if not tstep:
t = self.nc.variables['XTIME'][...]
else:
t = self.nc.variables['XTIME'][tstep]
TIME = n4.num2date(t, units='minutes since ' + tstart, calendar='gregorian')
return TIME
def getvar(self, var, tstep=':', nlev=':', ny=':', nx=':'):
'''
Returns the data from a given variable in the wrfout file
usage:
getvar(var, tstep)
var is a string with the variable name (example 'U10')
tstep is the time instant, if not specified all the written times will
be used
Warning:
For the variable P, PH and T, their base state will be added
'''
if len(self.nc.variables[var].dimensions) == 4:
SLICE = str(tstep) + ',' + str(nlev) + ',' + str(ny) + ',' + str(nx)
elif len(self.nc.variables[var].dimensions) == 3:
SLICE = str(tstep) + ',' + str(ny) + ',' + str(nx)
elif len(self.nc.variables[var].dimensions) == 2:
SLICE = str(ny) + ',' + str(nx)
VAR = eval("self.nc.variables['" + var + "'][" + SLICE + "]")
if (var == 'P') | (var == 'PH'):
VAR += eval("self.nc.variables['" + var + "B'][" + SLICE + "]")
if (var == 'T'):
VAR += 300
return VAR
def SLP(self, tstep=0):
'''
Calculation of Sea-level pressure.
usage:
WRF_SLP = SLP(tstep)
tstep is the time instant, if not specified the first written time
will be used
From NCL fortran source code wrf_user.f
'''
PR = self.getvar('P', tstep=tstep)
TH = self.getvar('T', tstep=tstep)
QVAPOR = self.getvar('QVAPOR', tstep=tstep)
ELEVATION = self.height(tstep=tstep)
#constants:
R=287.04
G=9.81
GAMMA=0.0065
TC=273.16+17.05
PCONST=10000
c = 2.0/7.0
#calculate TK:
TK = TH*np.power(PR*.00001,c)
#Find least z that is PCONST Pa above the surface
#Sweep array from bottom to top
s = np.shape(PR) #size of the input array
ss = [s[1],s[2]] # shape of 2d arrays
WRF_SLP = np.empty(ss,np.float32)
LEVEL = np.empty(ss,np.int32)
# Ridiculous MM5 test:
RIDTEST = np.empty(ss,np.int32)
PLO = np.empty(ss, np.float32)
ZLO = np.empty(ss,np.float32)
TLO = np.empty(ss,np.float32)
PHI = np.empty(ss,np.float32)
ZHI = np.empty(ss,np.float32)
THI = np.empty(ss,np.float32)
LEVEL[:,:] = -1
for K in range(s[0]):
KHI = np.minimum(K+1, s[0]-1)
LEVNEED = np.logical_and(np.less(LEVEL,0), np.less(PR[K,:,:] , PR[0,:,:] - PCONST))
LEVEL[LEVNEED]=K
PLO=np.where(LEVNEED,PR[K,:,:],PLO[:,:])
TLO=np.where(LEVNEED,TK[K,:,:]*(1.+0.608*QVAPOR[K,:,:]), TLO[:,:])
ZLO=np.where(LEVNEED,ELEVATION[K,:,:],ZLO[:,:])
PHI=np.where(LEVNEED,PR[KHI,:,:],PHI[:,:])
THI=np.where(LEVNEED,TK[KHI,:,:]*(1.+0.608*QVAPOR[KHI,:,:]), THI[:,:])
ZHI=np.where(LEVNEED,ELEVATION[KHI,:,:],ZHI[:,:])
P_AT_PCONST = PR[0,:,:]-PCONST
T_AT_PCONST = THI - (THI-TLO)*np.log(P_AT_PCONST/PHI)*np.log(PLO/PHI)
Z_AT_PCONST = ZHI - (ZHI-ZLO)*np.log(P_AT_PCONST/PHI)*np.log(PLO/PHI)
T_SURF = T_AT_PCONST*np.power((PR[0,:,:]/P_AT_PCONST),(GAMMA*R/G))
T_SEA_LEVEL = T_AT_PCONST + GAMMA*Z_AT_PCONST
RIDTEST = np.logical_and(T_SURF <= TC, T_SEA_LEVEL >= TC)
T_SEA_LEVEL = np.where(RIDTEST, TC, TC - .005*(T_SURF -TC)**2)
Z_HALF_LOWEST=ELEVATION[0,:,:]
WRF_SLP = 0.01*(PR[0,:,:]*np.exp(2.*G*Z_HALF_LOWEST/(R*(T_SEA_LEVEL+T_SURF))))
return WRF_SLP
def ETH(self, tstep=0):
'''
Program to calculate equivalent potential temperature.
usage:
WRF_ETH = ETH(tsetp)
tstep is the time instant, if not specified the first written time
will be used
From NCL/Fortran source code DEQTHECALC in eqthecalc.f
'''
PRESS = self.getvar('P', tstep=tstep)
TH = self.getvar('T', tstep=tstep)
QVAPOR = self.getvar('QVAPOR', tstep=tstep)
c = 2.0/7.0
EPS = 0.622
GAMMA = 287.04/1004.0
GAMMAMD = 0.608 -0.887
TLCLC1 = 2840.0
TLCLC2 = 3.5
TLCLC3 = 4.805
TLCLC4 = 55.0
THTECON1 = 3376.0
THTECON2 = 2.54
THTECON3 = 0.81
#calculate Temp. in Kelvin
PRESS *= 0.01
TK = TH*np.power(PRESS*.001, c)
Q = np.maximum(QVAPOR, 1.e-15)
E = Q*PRESS/(EPS+Q)
TLCL = TLCLC4+ TLCLC1/(np.log(np.power(TK, TLCLC2)/E)-TLCLC3)
EXPNT = (THTECON1/TLCL - THTECON2)*Q*(1.0+THTECON3*Q)
WRF_ETH = TK*np.power(1000.0/PRESS, GAMMA*(1.0+GAMMAMD*Q))*np.exp(EXPNT)
return WRF_ETH
def RH(self, tstep=0):
'''
Calculation of relative humidity.
usage:
WRF_RH = RH(tstep)
tstep is the time instant, if not specified the first written time
will be used
From NCL formula in wrf_user.f
'''
PRESS = self.getvar('P', tstep=tstep)
TH = self.getvar('T', tstep=tstep)
QVAPOR = self.getvar('QVAPOR', tstep=tstep)
c = 2.0/7.0
SVP1 = 0.6112
SVP2 = 17.67
SVPT0 = 273.15
SVP3 = 29.65
EP_3 = 0.622
TK = TH * np.power(PRESS * .00001, c)
ES = 10 * SVP1 * np.exp(SVP2 * (TK-SVPT0) / (TK-SVP3))
QVS = EP_3 * ES / (0.01*PRESS - (1.-EP_3) * ES)
WRF_RH = 100.0 * np.maximum(np.minimum(QVAPOR/QVS, 1.0), 0)
return WRF_RH
def SHEAR(self, tstep=0, level1=200., level2=850., leveltype='pressure'):
'''
Program calculates horizontal wind shear
usage:
SHR = SHEAR(tstep, level1, level2)
tstep is the time instant, if not specified the first written time
will be used
level1 is the top level to consider for (200 hPa)
level2 is the bottom level to consider (850 hPa)
From NCAR VAPOR python utils
'''
if leveltype == 'pressure':
print(leveltype)
PR = self.getvar('P', tstep=tstep)
U = self.getvar('U', tstep=tstep)
V = self.getvar('V', tstep=tstep)
PR *= 0.01
uinterp1 = interp3d(U, PR, level1)
uinterp2 = interp3d(U, PR, level2)
vinterp1 = interp3d(V, PR, level1)
vinterp2 = interp3d(V, PR, level2)
result = (uinterp1-uinterp2)*(uinterp1-uinterp2)+(vinterp1-vinterp2)*(vinterp1-vinterp2)
result = np.sqrt(result)
elif leveltype == 'eta':
print(leveltype)
uinterp1 = self.getvar('U', tstep=tstep, nlev=level1, nx=':-1')
uinterp2 = self.getvar('U', tstep=tstep, nlev=level2, nx=':-1')
vinterp1 = self.getvar('V', tstep=tstep, nlev=level1, ny=':-1')
vinterp2 = self.getvar('V', tstep=tstep, nlev=level2, ny=':-1')
result = (uinterp1-uinterp2)*(uinterp1-uinterp2)+(vinterp1-vinterp2)*(vinterp1-vinterp2)
result = np.sqrt(result)
return result
def TD(self, tstep=0):
'''
Calculation of dewpoint temperature based on WRF variables.
usage:
WRFTD = TD(tstep)
tstep is the time instant, if not specified the first written time
will be used
'''
#Let PR = 0.1*(P+PB) (pressure in hPa)
#and QV = MAX(QVAPOR,0)
#Where TDC = QV*PR/(0.622+QV)
# TDC = MAX(TDC,0.001)
#Formula is (243.5*log(TDC) - 440.8)/(19.48-log(TDC))
P = self.getvar('P', tstep=tstep)
QVAPOR = self.getvar('QVAPOR', tstep=tstep)
QV = np.maximum(QVAPOR, 0.0)
TDC = 0.01 * QV * P / (0.622 + QV)
TDC = np.maximum(TDC, 0.001)
WRF_TD = (243.5 * np.log(TDC) - 440.8)/(19.48 - np.log(TDC))
return WRF_TD
def TK(self, tstep=0):
'''
Calculation of temperature in degrees kelvin using WRF variables.
usage:
TMP = TK(tstep)
tstep is the time instant, if not specified the first written time
will be used
'''
#Formula is (T+300)*((P+PB)*10**(-5))**c,
#Where c is 287/(7*287*.5) = 2/7
P = self.getvar('P', tstep=tstep)
TH = self.getvar('T', tstep=tstep)
c = 2.0/7.0
WRF_TK = TH * np.power(P * .00001, c)
return WRF_TK
def BRUNT(self, tstep=0):
'''
Calculation of Brunt-Vaisala frequency.
usage:
BV = BRUNT(tstep)
tstep is the time instant, if not specified the first written time
will be used
'''
THETA = self.getvar('T', tstep=tstep) * (1 + 0.61 * self.getvar('QVAPOR', tstep=tstep))
Z = self.height(tstep=tstep)
nz = self.dim()[1]
g = 9.81
BRUNT = np.zeros(shape=self.dim()[1:])
for i in range(nz-1):
BRUNT[i, :, :] = (g/THETA[i, :, :]) * ((THETA[i+1, :, :] - THETA[i, :, :]) / (Z[i+1, :, :] - Z[i, :, :]))
return BRUNT
def RI(self, tstep=0):
'''
Calculation of Richardson Number.
usage:
ri = RI(tstep)
tstep is the time instant, if not specified the first written time
will be used
'''
THETA = self.getvar('T', tstep=tstep) * (1 + 0.61 * self.getvar('QVAPOR', tstep=tstep))
Z = self.height(tstep=tstep)
U = self.getvar('U', tstep=tstep, nx=':-1')
V = self.getvar('V', tstep=tstep, ny=':-1')
nz = self.dim()[1]
g = 9.81
Td = 9.8 / 1000.# The dry adiabatic lapse rate 9.8 K/km
RI = np.zeros(shape=self.dim()[1:])
for i in range(nz-1):
RI[i, :, :] = (g*((THETA[i+1, :, :] - THETA[i, :, :]) + Td * (Z[i+1, :, :] - Z[i, :, :])) * (Z[i+1, :, :] - Z[i, :, :])) / \
(THETA[i, :, :] * ((U[i+1, :, :] - U[i, :, :])**2 + (V[i+1, :, :] - V[i, :, :])**2))
return RI
def pcolor(self, VAR, tstep=None, colorbar=True, level=0, pcolor=False, norm=None, coastcolor='k', **kargs):
'''
lat-lon plot on a base map
usage:
pcolor(VAR, colormap, colorbar, tstep, level, shading, norm)
VAR is a wrfout variable (string) or a 2D numpy array
if VAR is tring a tstep and level must be given to acquire the
variable. IF NOT the first level and time will be used
shading can be one of: flat (default), interp (contourf) or None
(pcolor)
'''
from mpl_toolkits.basemap import Basemap
import matplotlib.pyplot as plt
import ticks
if not tstep:
return "A time step must be specified..."
else:
if isinstance(VAR, str):
if len(self.nc.variables[VAR].dimensions) == 4:
VAR = self.getvar(VAR, tstep=tstep, nlev=level, ny=':', nx=':')
elif len(self.nc.variables[VAR].dimensions) == 3:
VAR = self.getvar(VAR, tstep=tstep)
elif len(self.nc.variables[VAR].dimensions) == 2:
VAR = self.getvar(VAR)
if self.nc.MAP_PROJ == 1:
proj = 'lcc'
elif self.nc.MAP_PROJ == 3:
proj = 'merc'
else:
return('Projection not suported')
lat_1 = self.nc.TRUELAT1
lat_2 = self.nc.TRUELAT2
lon_0 = self.nc.CEN_LON
lat_0 = self.nc.CEN_LAT
llcrnrlat = self.lat().min()
urcrnrlat = self.lat().max()
llcrnrlon = self.lon().min()
urcrnrlon = self.lon().max()
res = 'i'
if self.nc.DX < 25000:
res = 'h'
plt.figure()
ax = plt.axes()
m = Basemap(projection=proj, llcrnrlat=llcrnrlat, urcrnrlat=urcrnrlat, \
llcrnrlon=llcrnrlon, urcrnrlon=urcrnrlon, lat_1=lat_1, \
lat_2=lat_2, lat_0=lat_0, lon_0=lon_0, resolution=res, area_thresh=10000)
m.drawcoastlines(color=coastcolor, linewidth=2)
m.drawcountries(color=coastcolor, linewidth=1.5)
parallels = ticks.loose_label(self.lat().min(),self.lat().max())
m.drawparallels(parallels, labels=[1, 0, 0, 0], fontsize=14)
meridians = ticks.loose_label(self.lon().min(),self.lon().max())
m.drawmeridians(meridians, labels=[0, 0, 0, 1], fontsize=14)
x, y = m(self.lon(), self.lat())
if not pcolor:
if not norm:
levels = np.linspace(VAR.min(), VAR.max(), 200)
else:
levels = np.linspace(norm.min(), norm.max(), 200)
cs = ax.contourf(x, y, VAR, levels=levels, **kargs)
else:
cs = ax.pcolormesh(x, y, VAR, **kargs)
ax.set_title(self.time(tstep=tstep))
if colorbar:
fmt = plt.matplotlib.ticker.FormatStrFormatter("%.1f")
if not norm:
clev = np.linspace(np.round(VAR.min()), np.round(VAR.max()), 10, endpoint=True)
else:
clev = np.linspace(np.round(norm.min()), np.round(norm.max()), 10, endpoint=True)
cbar = m.colorbar(cs, location='right', ticks=clev, format=fmt, pad='5%')
cbar.ax.tick_params(labelsize=12)
return ax, m
def CrossPcolor(self, VAR, tstep=1, latitude=None, longitude=None, colorbar=True, \
norm=None, ymax=20000, ymin=0, pcolor=True, lev=None, **kargs):
import matplotlib.pyplot as plt
'''
VErtical cross section plot
usage:
pcolor(VAR, latitude, longitude, colormap, colorbar, tstep, level, shading, norm,
lev)
VAR is a wrfout variable (string) or a 2D numpy array
if VAR is tring a tstep and level must be given to acquire the
variable. IF NOT the first level and time will be used
shading can be one of: flat (default), interp (contourf) or None
(pcolor)
'''
plt.figure()
ax = plt.axes(axisbg='grey')
if not latitude and not longitude:
return('A latitude and longitude range must be chosen...')
elif not latitude:
pos_lon = np.argmin(abs(self.lon()[1, :] - longitude))
pos_lat = slice(0, np.size(self.lat(), axis=0))
y = self.height(tstep=tstep, nlev=':', ny=pos_lat, nx=pos_lon)
x = np.tile(self.lat()[:, pos_lon], (self.dim()[1], 1))
xlabel = 'Latitude ($^\circ$)'
elif not longitude:
pos_lon = slice(0, np.size(self.lon(), axis=1))
pos_lat = np.argmin(abs(self.lat()[:, 1] - latitude))
y = self.height(tstep=tstep, ny=pos_lat, nx=pos_lon)
x = np.tile(self.lon()[pos_lat, :], (self.dim()[1], 1))
xlabel = 'Longitude ($^\circ$)'
else:
return('I cant deal with this.. yet!!!')
if isinstance(VAR, str):
if len(self.nc.variables[VAR].dimensions) == 4:
VAR = self.getvar(VAR, tstep=tstep, nlev=':', ny=pos_lat, nx=pos_lon)
elif len(self.nc.variables[VAR].dimensions) == 3:
VAR = self.getvar(VAR, tstep=tstep, ny=pos_lat, nx=pos_lon)
elif len(self.nc.variables[VAR].dimensions) == 2:
VAR = self.getvar(VAR, ny=pos_lat, nx=pos_lon)
else:
VAR = np.squeeze(VAR[:, pos_lat, pos_lon])
if not pcolor:
if not lev:
levels = np.linspace(VAR.min(), VAR.max(), 200)
else:
levels = np.linspace(lev[0], lev[1], 100)
cs = ax.contourf(x[0:VAR.shape[0], :], y[0:VAR.shape[0], :], VAR, norm=norm, levels=levels, **kargs)
else:
cs = ax.pcolormesh(x[0:VAR.shape[0], :], y[0:VAR.shape[0], :], VAR, norm=norm, **kargs)
if colorbar:
fmt = plt.matplotlib.ticker.FormatStrFormatter("%.1f")
if not lev:
clev = np.linspace(np.round(VAR.min()), np.round(VAR.max()), 10)
else:
clev = np.linspace(lev[0], lev[1], 10)
cbar = plt.colorbar(cs, ticks=clev, format=fmt, norm=norm)
ax.set_title(self.time()[tstep])
ax.set_xlabel(xlabel)
ax.set_ylabel('Height (m)')
ax.set_ylim([ymin, ymax])
ax.set_xlim(x.min(), x.max())
return ax
def stloc(self, latitude, longitude):
'''
Returns nearst the grid points to location
usage:
nlat, nlon = stloc(latitude, longitude)
'''
pos_lat = np.argmin(abs(self.lat()[:, 1] - latitude))
pos_lon = np.argmin(abs(self.lon()[1, :] - longitude))
return pos_lat, pos_lon
def interpvar(self, VAR, ilon, ilat, tstep=None, nlev=None):
from matplotlib import tri
'''
Interpolates variable to latitude longitude (2D)
usage:
IVAR = interpvar(VAR, longitude, latitude, tstep, nlev)
'''
intp = tri.delaunay.Triangulation(self.lat().flatten(), self.lon().flatten())
if isinstance(VAR, str):
if (not nlev) & (tstep):
data = self.getvar(VAR, tstep=tstep)[0, ]
elif (nlev) & (not tstep):
data = self.getvar(VAR, nlev=nlev)
elif (not nlev) & (not tstep):
pass
else:
data = VAR
IVAR = np.zeros(shape=(data.shape[0], ilat.size, ilon.size))
for i in range(data.shape[0]):
IVAR[i, ] = intp.nn_interpolator(data[i, ].flatten())(ilat, ilon)
return np.squeeze(IVAR)
def sounding(self, tstep=0, lat=38.28, lon=-28.24):
import SkewT
import thermodynamics
'''
Creates a virtual sounding
'''
ny, nx = self.stloc(lat, lon)
data={}
data['pres'] = self.getvar('P', tstep=tstep, nlev=':', ny=ny, nx=nx) * 1e-2
data['MIXR'] = self.getvar('QVAPOR', tstep=tstep, nlev=':', ny=ny, nx=nx) * 1000.
data['temp'] = thermodynamics.TempK(self.getvar('T', tstep=tstep, nlev=':', ny=ny, nx=nx), data['pres'] * 1e2) - 273.15
data['dwpt'] = thermodynamics.MixR2VaporPress(data['MIXR']/1000. ,data['pres'])
data['sknt'] = self.WSPEED(tstep=tstep, nlev=':', ny=ny, nx=nx) * 1.94384449
data['drct'] = self.WDIR(tstep=tstep, nlev=':', ny=ny, nx=nx)
data['hght'] = self.getvar('HGT', tstep=tstep, nlev=':', ny=ny, nx=nx)
data['RELH'] = self.RH(tstep=tstep)[:, ny, nx]
data['StationNumber'] = 'Location = ' + str(lat) + ' N ' + str(lon) + ' E'
data['SoundingDate'] = self.time(tstep=tstep).strftime('%Y/%m/%d %H:%M')
data['THTV'] = thermodynamics.ThetaV(data['temp']+273.15, data['pres']*1e2, thermodynamics.VaporPressure(data['dwpt']))
# data['THTA'] =
# data['THTE'] =
return SkewT.Sounding(data=data)
def WSPEED(self, tstep=0, nlev=0, ny=':', nx=':'):
if nx == ':':
u = self.getvar('U', tstep=tstep, nlev=nlev, ny=ny, nx=':-1')
elif ny == ':':
v = self.getvar('V', tstep=tstep, nlev=nlev, ny=':-1', nx=nx)
else:
u = self.getvar('U', tstep=tstep, nlev=nlev, ny=ny, nx=nx)
v = self.getvar('V', tstep=tstep, nlev=nlev, ny=ny, nx=nx)
return np.sqrt(u*u + v*v)
def WDIR(self, tstep=0, nlev=0, ny=':', nx=':'):
if nx == ':':
u = self.getvar('U', tstep=tstep, nlev=nlev, ny=ny, nx=':-1')
elif ny == ':':
v = self.getvar('V', tstep=tstep, nlev=nlev, ny=':-1', nx=nx)
else:
u = self.getvar('U', tstep=tstep, nlev=nlev, ny=ny, nx=nx)
v = self.getvar('V', tstep=tstep, nlev=nlev, ny=ny, nx=nx)
wspeed = self.WSPEED(tstep=tstep, nlev=nlev, ny=ny, nx=nx)
dir_rad = np.arctan2(u/wspeed, v/wspeed)
dir_trig = (dir_rad * 180/np.pi) + 180
dir_cardinal = 90 - dir_trig
return dir_cardinal
def myround(x, base=5):
x *= 100
y = int(base * round(float(x)/base))
y /= 100.0
return y
def interp3d(A, PR, val):
s = np.shape(PR) #size of the input arrays
ss = [s[1], s[2]] #shape of 2d arrays
interpVal = np.empty(ss, np.float32)
ratio = np.zeros(ss, np.float32)
# the LEVEL value is determine the lowest level where P<=val
LEVEL = np.empty(ss, np.int32)
LEVEL[:, :] = -1 #value where PR<=val has not been found
for K in range(s[0]):
#LEVNEED is true if this is first time PR<val.
LEVNEED = np.logical_and(np.less(LEVEL, 0), np.less(PR[K, :, :], val))
LEVEL[LEVNEED] = K
ratio[LEVNEED] = (val-PR[K, LEVNEED]) / (PR[K-1, LEVNEED] - PR[K, LEVNEED])
interpVal[LEVNEED] = ratio[LEVNEED] * A[K, LEVNEED] + (1-ratio[LEVNEED]) * A[K-1, LEVNEED]
LEVNEED = np.greater(LEVEL, 0)
# Set unspecified values to value of A at top of data:
LEVNEED = np.less(LEVEL, 0)
interpVal[LEVNEED] = A[s[0]-1, LEVNEED]
return interpVal
def interp_delaunay(x0,y0,v0,x1, y1):
from matplotlib import tri
intp = tri.delaunay.Triangulation(x0.flatten(), y0.flatten())
v1 = intp.nn_interpolator(v0.flatten())(x1, y1)
return v1
def var_border(v,di=1,dj=1):
'''
Border of 2d numpy array
di,dj is the interval between points along columns and lines
Corner points are kept even with di and dj not 1
'''
j,i=v.shape
if (di,dj)==(1,1):
xb=np.arange(2*i+2*j,dtype=v.dtype)
yb=np.arange(2*i+2*j,dtype=v.dtype)
xb[0:j] = v[:,0]
xb[j:j+i] = v[-1,:]
xb[j+i:j+i+j] = np.flipud(v[:,-1])
xb[j+i+j:] = np.flipud(v[0,:])
else:
# ensure corner points are kept!!
tmp1 = v[::dj,0]
tmp2 = v[-1,::di]
tmp3 = np.flipud(v[:,-1])[::dj]
tmp4 = np.flipud(v[0,:])[::di]
xb=np.concatenate((tmp1,tmp2,tmp3,tmp4))
return xb
def plot_domains(dlist):
from mpl_toolkits.basemap import Basemap
import matplotlib.pyplot as plt
import ticks
wrf = {}
for i in range(1, len(dlist)+1):
wrf['d%02d'%i] = get(dlist[i-1])
if wrf['d01'].nc.MAP_PROJ == 1:
proj = 'lcc'
elif wrf['d01'].nc.MAP_PROJ == 3:
proj = 'merc'
else:
return('Projection not suported')
lat_1 = wrf['d01'].nc.TRUELAT1
lat_2 = wrf['d01'].nc.TRUELAT2
lon_0 = wrf['d01'].nc.CEN_LON
lat_0 = wrf['d01'].nc.CEN_LAT
llcrnrlat = wrf['d01'].lat().min() - 5
urcrnrlat = wrf['d01'].lat().max() + 5
llcrnrlon = wrf['d01'].lon().min() - 5
urcrnrlon = wrf['d01'].lon().max() + 5
plt.figure()
ax = plt.axes()
m = Basemap(projection=proj, llcrnrlat=llcrnrlat, urcrnrlat=urcrnrlat, \
llcrnrlon=llcrnrlon, urcrnrlon=urcrnrlon, lat_1=lat_1, \
lat_2=lat_2, lat_0=lat_0, lon_0=lon_0, resolution='i')
#m.drawcoastlines(color='black', linewidth=2)
#m.drawcountries(linewidth=1.5)
m.bluemarble()
parallels = ticks.loose_label(llcrnrlat, urcrnrlat)
m.drawparallels(parallels, labels=[1, 0, 0, 0], fontsize=14)
meridians = ticks.loose_label(llcrnrlon, urcrnrlon)
m.drawmeridians(meridians, labels=[0, 0, 0, 1], fontsize=14)
for i in range(1,len(dlist)+1):
xb = var_border(wrf['d%02d'%i].lon())
yb = var_border(wrf['d%02d'%i].lat())
x, y = m(xb,yb)
tx, ty = m(wrf['d%02d'%i].lon()[-1,0], wrf['d%02d'%i].lat()[-1,0]+0.5)
colors = ['lightblue', 'pink', 'lightgreen', 'lightsalmon', 'silver', 'khaki']
ax.plot(x,y, lw=2, c=colors[i-1])
ax.annotate('d%02d'%i, xy=(tx, ty), fontsize=16, color=colors[i-1])
| gpl-2.0 |
dnc1994/Kaggle-Playground | models/xgb.py | 1 | 2544 | # -*- encoding:ISO-8859-1 -*-
import warnings
warnings.filterwarnings('ignore')
import time
import pandas as pd
import xgboost as xgb
from sklearn.metrics import mean_squared_error, make_scorer
from sklearn import grid_search
import random
random.seed(2016)
def mean_squared_error_(ground_truth, predictions):
return mean_squared_error(ground_truth, predictions) ** 0.5
RMSE = make_scorer(mean_squared_error_, greater_is_better=False)
def main(input='df_new_421.csv'):
start_time = time.time()
df_all = pd.read_csv(input, encoding='ISO-8859-1', index_col=0)
num_train = 74067
df_train = df_all.iloc[:num_train]
df_test = df_all.iloc[num_train:]
id_test = df_test['id']
y_train = df_train['relevance'].values
cols_to_drop = ['id', 'relevance']
for col in cols_to_drop:
try:
df_train.drop(col, axis=1, inplace=True)
df_test.drop(col, axis=1, inplace=True)
except:
continue
X_train = df_train[:]
X_test = df_test[:]
print('--- Features Set: %s minutes ---' % round(((time.time() - start_time) / 60), 2))
print('Number of Features: ', len(X_train.columns.tolist()))
# print(X_train.columns.tolist())
# exit(0)
clf = xgb.XGBRegressor(seed=2016)
# param_grid = {
# 'n_estimators': [300, 500],
# 'learning_rate': [0.05],
# 'max_depth': [5, 7],
# 'subsample': [0.7, 0.8],
# 'colsample_bytree': [0.75, 0.85],
# }
param_grid = {
'n_estimators': [500],
'learning_rate': [0.045, 0.05, 0.055],
'max_depth': [7, 9, 11, 13],
'subsample': [0.7, 0.75, 0.8],
'colsample_bytree': [0.8, 0.825, 0.85],
}
model = grid_search.GridSearchCV(estimator=clf, param_grid=param_grid, n_jobs=5, cv=10, verbose=20, scoring=RMSE)
model.fit(X_train, y_train)
print('--- Grid Search Completed: %s minutes ---' % round(((time.time() - start_time) / 60), 2))
print('Param grid:')
print(param_grid)
print('Best Params:')
print(model.best_params_)
print('Best CV Score:')
print(-model.best_score_)
y_pred = model.predict(X_test)
for i in range(len(y_pred)):
if y_pred[i] < 1.0:
y_pred[i] = 1.0
if y_pred[i] > 3.0:
y_pred[i] = 3.0
pd.DataFrame({'id': id_test, 'relevance': y_pred}).to_csv('submission_xgb.csv', index=False)
print('--- Submission Generated: %s minutes ---' % round(((time.time() - start_time) / 60), 2))
if __name__ == '__main__':
main()
| mit |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.