repo_name
stringlengths 6
112
| path
stringlengths 4
204
| copies
stringlengths 1
3
| size
stringlengths 4
6
| content
stringlengths 714
810k
| license
stringclasses 15
values |
---|---|---|---|---|---|
mjudsp/Tsallis
|
sklearn/ensemble/tests/test_weight_boosting.py
|
58
|
17158
|
"""Testing for the boost module (sklearn.ensemble.boost)."""
import numpy as np
from sklearn.utils.testing import assert_array_equal, assert_array_less
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_equal, assert_true
from sklearn.utils.testing import assert_raises, assert_raises_regexp
from sklearn.base import BaseEstimator
from sklearn.model_selection import train_test_split
from sklearn.model_selection import GridSearchCV
from sklearn.ensemble import AdaBoostClassifier
from sklearn.ensemble import AdaBoostRegressor
from sklearn.ensemble import weight_boosting
from scipy.sparse import csc_matrix
from scipy.sparse import csr_matrix
from scipy.sparse import coo_matrix
from scipy.sparse import dok_matrix
from scipy.sparse import lil_matrix
from sklearn.svm import SVC, SVR
from sklearn.tree import DecisionTreeClassifier, DecisionTreeRegressor
from sklearn.utils import shuffle
from sklearn import datasets
# Common random state
rng = np.random.RandomState(0)
# Toy sample
X = [[-2, -1], [-1, -1], [-1, -2], [1, 1], [1, 2], [2, 1]]
y_class = ["foo", "foo", "foo", 1, 1, 1] # test string class labels
y_regr = [-1, -1, -1, 1, 1, 1]
T = [[-1, -1], [2, 2], [3, 2]]
y_t_class = ["foo", 1, 1]
y_t_regr = [-1, 1, 1]
# Load the iris dataset and randomly permute it
iris = datasets.load_iris()
perm = rng.permutation(iris.target.size)
iris.data, iris.target = shuffle(iris.data, iris.target, random_state=rng)
# Load the boston dataset and randomly permute it
boston = datasets.load_boston()
boston.data, boston.target = shuffle(boston.data, boston.target,
random_state=rng)
def test_samme_proba():
# Test the `_samme_proba` helper function.
# Define some example (bad) `predict_proba` output.
probs = np.array([[1, 1e-6, 0],
[0.19, 0.6, 0.2],
[-999, 0.51, 0.5],
[1e-6, 1, 1e-9]])
probs /= np.abs(probs.sum(axis=1))[:, np.newaxis]
# _samme_proba calls estimator.predict_proba.
# Make a mock object so I can control what gets returned.
class MockEstimator(object):
def predict_proba(self, X):
assert_array_equal(X.shape, probs.shape)
return probs
mock = MockEstimator()
samme_proba = weight_boosting._samme_proba(mock, 3, np.ones_like(probs))
assert_array_equal(samme_proba.shape, probs.shape)
assert_true(np.isfinite(samme_proba).all())
# Make sure that the correct elements come out as smallest --
# `_samme_proba` should preserve the ordering in each example.
assert_array_equal(np.argmin(samme_proba, axis=1), [2, 0, 0, 2])
assert_array_equal(np.argmax(samme_proba, axis=1), [0, 1, 1, 1])
def test_classification_toy():
# Check classification on a toy dataset.
for alg in ['SAMME', 'SAMME.R']:
clf = AdaBoostClassifier(algorithm=alg, random_state=0)
clf.fit(X, y_class)
assert_array_equal(clf.predict(T), y_t_class)
assert_array_equal(np.unique(np.asarray(y_t_class)), clf.classes_)
assert_equal(clf.predict_proba(T).shape, (len(T), 2))
assert_equal(clf.decision_function(T).shape, (len(T),))
def test_regression_toy():
# Check classification on a toy dataset.
clf = AdaBoostRegressor(random_state=0)
clf.fit(X, y_regr)
assert_array_equal(clf.predict(T), y_t_regr)
def test_iris():
# Check consistency on dataset iris.
classes = np.unique(iris.target)
clf_samme = prob_samme = None
for alg in ['SAMME', 'SAMME.R']:
clf = AdaBoostClassifier(algorithm=alg)
clf.fit(iris.data, iris.target)
assert_array_equal(classes, clf.classes_)
proba = clf.predict_proba(iris.data)
if alg == "SAMME":
clf_samme = clf
prob_samme = proba
assert_equal(proba.shape[1], len(classes))
assert_equal(clf.decision_function(iris.data).shape[1], len(classes))
score = clf.score(iris.data, iris.target)
assert score > 0.9, "Failed with algorithm %s and score = %f" % \
(alg, score)
# Somewhat hacky regression test: prior to
# ae7adc880d624615a34bafdb1d75ef67051b8200,
# predict_proba returned SAMME.R values for SAMME.
clf_samme.algorithm = "SAMME.R"
assert_array_less(0,
np.abs(clf_samme.predict_proba(iris.data) - prob_samme))
def test_boston():
# Check consistency on dataset boston house prices.
clf = AdaBoostRegressor(random_state=0)
clf.fit(boston.data, boston.target)
score = clf.score(boston.data, boston.target)
assert score > 0.85
def test_staged_predict():
# Check staged predictions.
rng = np.random.RandomState(0)
iris_weights = rng.randint(10, size=iris.target.shape)
boston_weights = rng.randint(10, size=boston.target.shape)
# AdaBoost classification
for alg in ['SAMME', 'SAMME.R']:
clf = AdaBoostClassifier(algorithm=alg, n_estimators=10)
clf.fit(iris.data, iris.target, sample_weight=iris_weights)
predictions = clf.predict(iris.data)
staged_predictions = [p for p in clf.staged_predict(iris.data)]
proba = clf.predict_proba(iris.data)
staged_probas = [p for p in clf.staged_predict_proba(iris.data)]
score = clf.score(iris.data, iris.target, sample_weight=iris_weights)
staged_scores = [
s for s in clf.staged_score(
iris.data, iris.target, sample_weight=iris_weights)]
assert_equal(len(staged_predictions), 10)
assert_array_almost_equal(predictions, staged_predictions[-1])
assert_equal(len(staged_probas), 10)
assert_array_almost_equal(proba, staged_probas[-1])
assert_equal(len(staged_scores), 10)
assert_array_almost_equal(score, staged_scores[-1])
# AdaBoost regression
clf = AdaBoostRegressor(n_estimators=10, random_state=0)
clf.fit(boston.data, boston.target, sample_weight=boston_weights)
predictions = clf.predict(boston.data)
staged_predictions = [p for p in clf.staged_predict(boston.data)]
score = clf.score(boston.data, boston.target, sample_weight=boston_weights)
staged_scores = [
s for s in clf.staged_score(
boston.data, boston.target, sample_weight=boston_weights)]
assert_equal(len(staged_predictions), 10)
assert_array_almost_equal(predictions, staged_predictions[-1])
assert_equal(len(staged_scores), 10)
assert_array_almost_equal(score, staged_scores[-1])
def test_gridsearch():
# Check that base trees can be grid-searched.
# AdaBoost classification
boost = AdaBoostClassifier(base_estimator=DecisionTreeClassifier())
parameters = {'n_estimators': (1, 2),
'base_estimator__max_depth': (1, 2),
'algorithm': ('SAMME', 'SAMME.R')}
clf = GridSearchCV(boost, parameters)
clf.fit(iris.data, iris.target)
# AdaBoost regression
boost = AdaBoostRegressor(base_estimator=DecisionTreeRegressor(),
random_state=0)
parameters = {'n_estimators': (1, 2),
'base_estimator__max_depth': (1, 2)}
clf = GridSearchCV(boost, parameters)
clf.fit(boston.data, boston.target)
def test_pickle():
# Check pickability.
import pickle
# Adaboost classifier
for alg in ['SAMME', 'SAMME.R']:
obj = AdaBoostClassifier(algorithm=alg)
obj.fit(iris.data, iris.target)
score = obj.score(iris.data, iris.target)
s = pickle.dumps(obj)
obj2 = pickle.loads(s)
assert_equal(type(obj2), obj.__class__)
score2 = obj2.score(iris.data, iris.target)
assert_equal(score, score2)
# Adaboost regressor
obj = AdaBoostRegressor(random_state=0)
obj.fit(boston.data, boston.target)
score = obj.score(boston.data, boston.target)
s = pickle.dumps(obj)
obj2 = pickle.loads(s)
assert_equal(type(obj2), obj.__class__)
score2 = obj2.score(boston.data, boston.target)
assert_equal(score, score2)
def test_importances():
# Check variable importances.
X, y = datasets.make_classification(n_samples=2000,
n_features=10,
n_informative=3,
n_redundant=0,
n_repeated=0,
shuffle=False,
random_state=1)
for alg in ['SAMME', 'SAMME.R']:
clf = AdaBoostClassifier(algorithm=alg)
clf.fit(X, y)
importances = clf.feature_importances_
assert_equal(importances.shape[0], 10)
assert_equal((importances[:3, np.newaxis] >= importances[3:]).all(),
True)
def test_error():
# Test that it gives proper exception on deficient input.
assert_raises(ValueError,
AdaBoostClassifier(learning_rate=-1).fit,
X, y_class)
assert_raises(ValueError,
AdaBoostClassifier(algorithm="foo").fit,
X, y_class)
assert_raises(ValueError,
AdaBoostClassifier().fit,
X, y_class, sample_weight=np.asarray([-1]))
def test_base_estimator():
# Test different base estimators.
from sklearn.ensemble import RandomForestClassifier
from sklearn.svm import SVC
# XXX doesn't work with y_class because RF doesn't support classes_
# Shouldn't AdaBoost run a LabelBinarizer?
clf = AdaBoostClassifier(RandomForestClassifier())
clf.fit(X, y_regr)
clf = AdaBoostClassifier(SVC(), algorithm="SAMME")
clf.fit(X, y_class)
from sklearn.ensemble import RandomForestRegressor
from sklearn.svm import SVR
clf = AdaBoostRegressor(RandomForestRegressor(), random_state=0)
clf.fit(X, y_regr)
clf = AdaBoostRegressor(SVR(), random_state=0)
clf.fit(X, y_regr)
# Check that an empty discrete ensemble fails in fit, not predict.
X_fail = [[1, 1], [1, 1], [1, 1], [1, 1]]
y_fail = ["foo", "bar", 1, 2]
clf = AdaBoostClassifier(SVC(), algorithm="SAMME")
assert_raises_regexp(ValueError, "worse than random",
clf.fit, X_fail, y_fail)
def test_sample_weight_missing():
from sklearn.linear_model import LogisticRegression
from sklearn.cluster import KMeans
clf = AdaBoostClassifier(KMeans(), algorithm="SAMME")
assert_raises(ValueError, clf.fit, X, y_regr)
clf = AdaBoostRegressor(KMeans())
assert_raises(ValueError, clf.fit, X, y_regr)
def test_sparse_classification():
# Check classification with sparse input.
class CustomSVC(SVC):
"""SVC variant that records the nature of the training set."""
def fit(self, X, y, sample_weight=None):
"""Modification on fit caries data type for later verification."""
super(CustomSVC, self).fit(X, y, sample_weight=sample_weight)
self.data_type_ = type(X)
return self
X, y = datasets.make_multilabel_classification(n_classes=1, n_samples=15,
n_features=5,
random_state=42)
# Flatten y to a 1d array
y = np.ravel(y)
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=0)
for sparse_format in [csc_matrix, csr_matrix, lil_matrix, coo_matrix,
dok_matrix]:
X_train_sparse = sparse_format(X_train)
X_test_sparse = sparse_format(X_test)
# Trained on sparse format
sparse_classifier = AdaBoostClassifier(
base_estimator=CustomSVC(probability=True),
random_state=1,
algorithm="SAMME"
).fit(X_train_sparse, y_train)
# Trained on dense format
dense_classifier = AdaBoostClassifier(
base_estimator=CustomSVC(probability=True),
random_state=1,
algorithm="SAMME"
).fit(X_train, y_train)
# predict
sparse_results = sparse_classifier.predict(X_test_sparse)
dense_results = dense_classifier.predict(X_test)
assert_array_equal(sparse_results, dense_results)
# decision_function
sparse_results = sparse_classifier.decision_function(X_test_sparse)
dense_results = dense_classifier.decision_function(X_test)
assert_array_equal(sparse_results, dense_results)
# predict_log_proba
sparse_results = sparse_classifier.predict_log_proba(X_test_sparse)
dense_results = dense_classifier.predict_log_proba(X_test)
assert_array_equal(sparse_results, dense_results)
# predict_proba
sparse_results = sparse_classifier.predict_proba(X_test_sparse)
dense_results = dense_classifier.predict_proba(X_test)
assert_array_equal(sparse_results, dense_results)
# score
sparse_results = sparse_classifier.score(X_test_sparse, y_test)
dense_results = dense_classifier.score(X_test, y_test)
assert_array_equal(sparse_results, dense_results)
# staged_decision_function
sparse_results = sparse_classifier.staged_decision_function(
X_test_sparse)
dense_results = dense_classifier.staged_decision_function(X_test)
for sprase_res, dense_res in zip(sparse_results, dense_results):
assert_array_equal(sprase_res, dense_res)
# staged_predict
sparse_results = sparse_classifier.staged_predict(X_test_sparse)
dense_results = dense_classifier.staged_predict(X_test)
for sprase_res, dense_res in zip(sparse_results, dense_results):
assert_array_equal(sprase_res, dense_res)
# staged_predict_proba
sparse_results = sparse_classifier.staged_predict_proba(X_test_sparse)
dense_results = dense_classifier.staged_predict_proba(X_test)
for sprase_res, dense_res in zip(sparse_results, dense_results):
assert_array_equal(sprase_res, dense_res)
# staged_score
sparse_results = sparse_classifier.staged_score(X_test_sparse,
y_test)
dense_results = dense_classifier.staged_score(X_test, y_test)
for sprase_res, dense_res in zip(sparse_results, dense_results):
assert_array_equal(sprase_res, dense_res)
# Verify sparsity of data is maintained during training
types = [i.data_type_ for i in sparse_classifier.estimators_]
assert all([(t == csc_matrix or t == csr_matrix)
for t in types])
def test_sparse_regression():
# Check regression with sparse input.
class CustomSVR(SVR):
"""SVR variant that records the nature of the training set."""
def fit(self, X, y, sample_weight=None):
"""Modification on fit caries data type for later verification."""
super(CustomSVR, self).fit(X, y, sample_weight=sample_weight)
self.data_type_ = type(X)
return self
X, y = datasets.make_regression(n_samples=15, n_features=50, n_targets=1,
random_state=42)
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=0)
for sparse_format in [csc_matrix, csr_matrix, lil_matrix, coo_matrix,
dok_matrix]:
X_train_sparse = sparse_format(X_train)
X_test_sparse = sparse_format(X_test)
# Trained on sparse format
sparse_classifier = AdaBoostRegressor(
base_estimator=CustomSVR(),
random_state=1
).fit(X_train_sparse, y_train)
# Trained on dense format
dense_classifier = dense_results = AdaBoostRegressor(
base_estimator=CustomSVR(),
random_state=1
).fit(X_train, y_train)
# predict
sparse_results = sparse_classifier.predict(X_test_sparse)
dense_results = dense_classifier.predict(X_test)
assert_array_equal(sparse_results, dense_results)
# staged_predict
sparse_results = sparse_classifier.staged_predict(X_test_sparse)
dense_results = dense_classifier.staged_predict(X_test)
for sprase_res, dense_res in zip(sparse_results, dense_results):
assert_array_equal(sprase_res, dense_res)
types = [i.data_type_ for i in sparse_classifier.estimators_]
assert all([(t == csc_matrix or t == csr_matrix)
for t in types])
def test_sample_weight_adaboost_regressor():
"""
AdaBoostRegressor should work without sample_weights in the base estimator
The random weighted sampling is done internally in the _boost method in
AdaBoostRegressor.
"""
class DummyEstimator(BaseEstimator):
def fit(self, X, y):
pass
def predict(self, X):
return np.zeros(X.shape[0])
boost = AdaBoostRegressor(DummyEstimator(), n_estimators=3)
boost.fit(X, y_regr)
assert_equal(len(boost.estimator_weights_), len(boost.estimator_errors_))
|
bsd-3-clause
|
sunil07t/e-mission-server
|
emission/analysis/intake/segmentation/trip_segmentation_methods/dwell_segmentation_time_filter.py
|
1
|
16915
|
from __future__ import division
from __future__ import unicode_literals
from __future__ import print_function
from __future__ import absolute_import
# Standard imports
from future import standard_library
standard_library.install_aliases()
from builtins import str
from builtins import *
from past.utils import old_div
import logging
import attrdict as ad
import numpy as np
import pandas as pd
import datetime as pydt
# Our imports
import emission.analysis.point_features as pf
import emission.analysis.intake.segmentation.trip_segmentation as eaist
import emission.core.wrapper.location as ecwl
import emission.analysis.intake.segmentation.restart_checking as eaisr
class DwellSegmentationTimeFilter(eaist.TripSegmentationMethod):
def __init__(self, time_threshold, point_threshold, distance_threshold):
"""
Determines segmentation points for points that were generated using a
time filter (i.e. report points every n seconds). This will *not* work for
points generated using a distance filter because it expects to have a
cluster of points to detect the trip end, and with a distance filter,
we will not get updates while we are still.
At least on android, we can get updates at a different frequency than
the "n" specified above. In particular:
a) we can get updates more frequently than "n" if there are other apps
that are requesting updates frequently - for example, while using a routing app.
b) we can get updates less frequently than "n" if there are bad/low
accuracy points that are filtered out.
So we use a combination of a time filter and a "number of points"
filter to detect the trip end.
The time_threshold indicates the number of seconds that we need to be
still before a trip end is detected.
The point_threshold indicates the number of prior points (after
filtering) that we need to be still for before a trip end is detected
The distance_threshold indicates the radius of the circle used to
detect that we are still. If all the points within the
time_threshold AND all the points within the point_threshold are
within the distance_threshold of each other, then we are still.
"""
self.time_threshold = time_threshold
self.point_threshold = point_threshold
self.distance_threshold = distance_threshold
def segment_into_trips(self, timeseries, time_query):
"""
Examines the timeseries database for a specific range and returns the
segmentation points. Note that the input is the entire timeseries and
the time range. This allows algorithms to use whatever combination of
data that they want from the sensor streams in order to determine the
segmentation points.
"""
filtered_points_pre_ts_diff_df = timeseries.get_data_df("background/filtered_location", time_query)
# Sometimes, we can get bogus points because data.ts and
# metadata.write_ts are off by a lot. If we don't do this, we end up
# appearing to travel back in time
# https://github.com/e-mission/e-mission-server/issues/457
filtered_points_df = filtered_points_pre_ts_diff_df[(filtered_points_pre_ts_diff_df.metadata_write_ts - filtered_points_pre_ts_diff_df.ts) < 1000]
filtered_points_df.reset_index(inplace=True)
transition_df = timeseries.get_data_df("statemachine/transition", time_query)
if len(transition_df) > 0:
logging.debug("transition_df = %s" % transition_df[["fmt_time", "transition"]])
else:
logging.debug("no transitions found. This can happen for continuous sensing")
self.last_ts_processed = None
logging.info("Last ts processed = %s" % self.last_ts_processed)
segmentation_points = []
last_trip_end_point = None
curr_trip_start_point = None
just_ended = True
prevPoint = None
for idx, row in filtered_points_df.iterrows():
currPoint = ad.AttrDict(row)
currPoint.update({"idx": idx})
logging.debug("-" * 30 + str(currPoint.fmt_time) + "-" * 30)
if curr_trip_start_point is None:
logging.debug("Appending currPoint because the current start point is None")
# segmentation_points.append(currPoint)
if just_ended:
if self.continue_just_ended(idx, currPoint, filtered_points_df):
# We have "processed" the currPoint by deciding to glom it
self.last_ts_processed = currPoint.metadata_write_ts
continue
# else:
sel_point = currPoint
logging.debug("Setting new trip start point %s with idx %s" % (sel_point, sel_point.idx))
curr_trip_start_point = sel_point
just_ended = False
last5MinsPoints_df = filtered_points_df[np.logical_and(
np.logical_and(
filtered_points_df.ts > currPoint.ts - self.time_threshold,
filtered_points_df.ts < currPoint.ts),
filtered_points_df.ts >= curr_trip_start_point.ts)]
# Using .loc here causes problems if we have filtered out some points and so the index is non-consecutive.
# Using .iloc just ends up including points after this one.
# So we reset_index upstream and use it here.
# We are going to use the last 8 points for now.
# TODO: Change this back to last 10 points once we normalize phone and this
last10Points_df = filtered_points_df.iloc[max(idx-self.point_threshold, curr_trip_start_point.idx):idx+1]
distanceToLast = lambda row: pf.calDistance(ad.AttrDict(row), currPoint)
timeToLast = lambda row: currPoint.ts - ad.AttrDict(row).ts
last5MinsDistances = last5MinsPoints_df.apply(distanceToLast, axis=1)
logging.debug("last5MinsDistances = %s with length %d" % (last5MinsDistances.as_matrix(), len(last5MinsDistances)))
last10PointsDistances = last10Points_df.apply(distanceToLast, axis=1)
logging.debug("last10PointsDistances = %s with length %d, shape %s" % (last10PointsDistances.as_matrix(),
len(last10PointsDistances),
last10PointsDistances.shape))
# Fix for https://github.com/e-mission/e-mission-server/issues/348
last5MinTimes = last5MinsPoints_df.apply(timeToLast, axis=1)
logging.debug("len(last10PointsDistances) = %d, len(last5MinsDistances) = %d" %
(len(last10PointsDistances), len(last5MinsDistances)))
logging.debug("last5MinsTimes.max() = %s, time_threshold = %s" %
(last5MinTimes.max() if len(last5MinTimes) > 0 else np.NaN, self.time_threshold))
if self.has_trip_ended(prevPoint, currPoint, timeseries, last10PointsDistances, last5MinsDistances, last5MinTimes):
(ended_before_this, last_trip_end_point) = self.get_last_trip_end_point(filtered_points_df,
last10Points_df, last5MinsPoints_df)
segmentation_points.append((curr_trip_start_point, last_trip_end_point))
logging.info("Found trip end at %s" % last_trip_end_point.fmt_time)
# We have processed everything up to the trip end by marking it as a completed trip
self.last_ts_processed = currPoint.metadata_write_ts
if ended_before_this:
# in this case, we end a trip at the previous point, and the next trip starts at this
# point, not the next one
just_ended = False
prevPoint = currPoint
curr_trip_start_point = currPoint
logging.debug("Setting new trip start point %s with idx %s" %
(currPoint, currPoint.idx))
else:
# We end a trip at the current point, and the next trip starts at the next point
just_ended = True
prevPoint = None
else:
prevPoint = currPoint
logging.debug("Iterated over all points, just_ended = %s, len(transition_df) = %s" %
(just_ended, len(transition_df)))
if not just_ended and len(transition_df) > 0:
stopped_moving_after_last = transition_df[(transition_df.ts > currPoint.ts) & (transition_df.transition == 2)]
logging.debug("looking after %s, found transitions %s" %
(currPoint.ts, stopped_moving_after_last))
if len(stopped_moving_after_last) > 0:
(unused, last_trip_end_point) = self.get_last_trip_end_point(filtered_points_df,
last10Points_df, None)
segmentation_points.append((curr_trip_start_point, last_trip_end_point))
logging.debug("Found trip end at %s" % last_trip_end_point.fmt_time)
# We have processed everything up to the trip end by marking it as a completed trip
self.last_ts_processed = currPoint.metadata_write_ts
return segmentation_points
def continue_just_ended(self, idx, currPoint, filtered_points_df):
"""
Normally, since the logic here and the
logic on the phone are the same, if we have detected a trip
end, any points after this are part of the new trip.
However, in some circumstances, notably in my data from 27th
August, there appears to be a mismatch and we get a couple of
points past the end that we detected here. So let's look for
points that are within the distance filter, and are at a
delta of a minute, and join them to the just ended trip instead of using them to
start the new trip
:param idx: Index of the current point
:param currPoint: current point
:param filtered_points_df: dataframe of filtered points
:return: True if we should continue the just ended trip, False otherwise
"""
if idx == 0:
return False
else:
prev_point = ad.AttrDict(filtered_points_df.iloc[idx - 1])
logging.debug("Comparing with prev_point = %s" % prev_point)
if pf.calDistance(prev_point, currPoint) < self.distance_threshold and \
currPoint.ts - prev_point.ts <= 60:
logging.info("Points %s and %s are within the distance filter and only 1 min apart so part of the same trip" %
(prev_point, currPoint))
return True
else:
return False
def has_trip_ended(self, prev_point, curr_point, timeseries, last10PointsDistances, last5MinsDistances, last5MinTimes):
# Another mismatch between phone and server. Phone stops tracking too soon,
# so the distance is still greater than the threshold at the end of the trip.
# But then the next point is a long time away, so we can split again (similar to a distance filter)
if prev_point is None:
logging.debug("prev_point is None, continuing trip")
else:
timeDelta = curr_point.ts - prev_point.ts
distDelta = pf.calDistance(prev_point, curr_point)
if timeDelta > 0:
speedDelta = old_div(distDelta, timeDelta)
else:
speedDelta = np.nan
speedThreshold = old_div(float(self.distance_threshold), self.time_threshold)
if eaisr.is_tracking_restarted_in_range(prev_point.ts, curr_point.ts, timeseries):
logging.debug("tracking was restarted, ending trip")
return True
ongoing_motion_check = len(eaisr.get_ongoing_motion_in_range(prev_point.ts, curr_point.ts, timeseries)) > 0
if timeDelta > 2 * self.time_threshold and not ongoing_motion_check:
logging.debug("lastPoint.ts = %s, currPoint.ts = %s, threshold = %s, large gap = %s, ongoing_motion_in_range = %s, ending trip" %
(prev_point.ts, curr_point.ts,self.time_threshold, curr_point.ts - prev_point.ts, ongoing_motion_check))
return True
# http://www.huffingtonpost.com/hoppercom/the-worlds-20-longest-non-stop-flights_b_5994268.html
# Longest flight is 17 hours, which is the longest you can go without cell reception
# And even if you split an air flight that long into two, you will get some untracked time in the
# middle, so that's good.
TWELVE_HOURS = 12 * 60 * 60
if timeDelta > TWELVE_HOURS:
logging.debug("prev_point.ts = %s, curr_point.ts = %s, TWELVE_HOURS = %s, large gap = %s, ending trip" %
(prev_point.ts, curr_point.ts, TWELVE_HOURS, curr_point.ts - prev_point.ts))
return True
if (timeDelta > 2 * self.time_threshold and # We have been here for a while
speedDelta < speedThreshold): # we haven't moved very much
logging.debug("prev_point.ts = %s, curr_point.ts = %s, threshold = %s, large gap = %s, ending trip" %
(prev_point.ts, curr_point.ts,self.time_threshold, curr_point.ts - prev_point.ts))
return True
else:
logging.debug("prev_point.ts = %s, curr_point.ts = %s, time gap = %s (vs %s), distance_gap = %s (vs %s), speed_gap = %s (vs %s) continuing trip" %
(prev_point.ts, curr_point.ts,
timeDelta, self.time_threshold,
distDelta, self.distance_threshold,
speedDelta, speedThreshold))
# The -30 is a fuzz factor intended to compensate for older clients
# where data collection stopped after 5 mins, so that we never actually
# see 5 mins of data
if (len(last10PointsDistances) < self.point_threshold - 1 or
len(last5MinsDistances) == 0 or
last5MinTimes.max() < self.time_threshold - 30):
logging.debug("Too few points to make a decision, continuing")
return False
# Normal end-of-trip case
logging.debug("last5MinsDistances.max() = %s, last10PointsDistance.max() = %s" %
(last5MinsDistances.max(), last10PointsDistances.max()))
if (last5MinsDistances.max() < self.distance_threshold and
last10PointsDistances.max() < self.distance_threshold):
return True
def get_last_trip_end_point(self, filtered_points_df, last10Points_df, last5MinsPoints_df):
ended_before_this = last5MinsPoints_df is None or len(last5MinsPoints_df) == 0
if ended_before_this:
logging.debug("trip end transition, so last 10 points are %s" % last10Points_df.index)
last10PointsMedian = np.median(last10Points_df.index)
last_trip_end_index = int(last10PointsMedian)
logging.debug("last5MinsPoints not found, last_trip_end_index = %s" % last_trip_end_index)
else:
last10PointsMedian = np.median(last10Points_df.index)
last5MinsPointsMedian = np.median(last5MinsPoints_df.index)
last_trip_end_index = int(min(last5MinsPointsMedian, last10PointsMedian))
logging.debug("last5MinsPoints and last10PointsMedian found, last_trip_end_index = %s" % last_trip_end_index)
# logging.debug("last5MinPoints.median = %s (%s), last10Points_df = %s (%s), sel index = %s" %
# (np.median(last5MinsPoints_df.index), last5MinsPoints_df.index,
# np.median(last10Points_df.index), last10Points_df.index,
# last_trip_end_index))
last_trip_end_point_row = filtered_points_df.iloc[last_trip_end_index]
last_trip_end_point = ad.AttrDict(filtered_points_df.iloc[last_trip_end_index])
logging.debug("Appending last_trip_end_point %s with index %s " %
(last_trip_end_point, last_trip_end_point_row.name))
return (ended_before_this, last_trip_end_point)
|
bsd-3-clause
|
clemkoa/scikit-learn
|
examples/svm/plot_svm_margin.py
|
88
|
2540
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
"""
=========================================================
SVM Margins Example
=========================================================
The plots below illustrate the effect the parameter `C` has
on the separation line. A large value of `C` basically tells
our model that we do not have that much faith in our data's
distribution, and will only consider points close to line
of separation.
A small value of `C` includes more/all the observations, allowing
the margins to be calculated using all the data in the area.
"""
print(__doc__)
# Code source: Gaël Varoquaux
# Modified for documentation by Jaques Grobler
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
from sklearn import svm
# we create 40 separable points
np.random.seed(0)
X = np.r_[np.random.randn(20, 2) - [2, 2], np.random.randn(20, 2) + [2, 2]]
Y = [0] * 20 + [1] * 20
# figure number
fignum = 1
# fit the model
for name, penalty in (('unreg', 1), ('reg', 0.05)):
clf = svm.SVC(kernel='linear', C=penalty)
clf.fit(X, Y)
# get the separating hyperplane
w = clf.coef_[0]
a = -w[0] / w[1]
xx = np.linspace(-5, 5)
yy = a * xx - (clf.intercept_[0]) / w[1]
# plot the parallels to the separating hyperplane that pass through the
# support vectors (margin away from hyperplane in direction
# perpendicular to hyperplane). This is sqrt(1+a^2) away vertically in
# 2-d.
margin = 1 / np.sqrt(np.sum(clf.coef_ ** 2))
yy_down = yy - np.sqrt(1 + a ** 2) * margin
yy_up = yy + np.sqrt(1 + a ** 2) * margin
# plot the line, the points, and the nearest vectors to the plane
plt.figure(fignum, figsize=(4, 3))
plt.clf()
plt.plot(xx, yy, 'k-')
plt.plot(xx, yy_down, 'k--')
plt.plot(xx, yy_up, 'k--')
plt.scatter(clf.support_vectors_[:, 0], clf.support_vectors_[:, 1], s=80,
facecolors='none', zorder=10, edgecolors='k')
plt.scatter(X[:, 0], X[:, 1], c=Y, zorder=10, cmap=plt.cm.Paired,
edgecolors='k')
plt.axis('tight')
x_min = -4.8
x_max = 4.2
y_min = -6
y_max = 6
XX, YY = np.mgrid[x_min:x_max:200j, y_min:y_max:200j]
Z = clf.predict(np.c_[XX.ravel(), YY.ravel()])
# Put the result into a color plot
Z = Z.reshape(XX.shape)
plt.figure(fignum, figsize=(4, 3))
plt.pcolormesh(XX, YY, Z, cmap=plt.cm.Paired)
plt.xlim(x_min, x_max)
plt.ylim(y_min, y_max)
plt.xticks(())
plt.yticks(())
fignum = fignum + 1
plt.show()
|
bsd-3-clause
|
PanDAWMS/panda-server
|
pandaserver/test/testEvgen.py
|
1
|
1834
|
import sys
import time
import uuid
import pandaserver.userinterface.Client as Client
from pandaserver.taskbuffer.JobSpec import JobSpec
from pandaserver.taskbuffer.FileSpec import FileSpec
if len(sys.argv)>1:
site = sys.argv[1]
else:
site = None
datasetName = 'panda.destDB.%s' % str(uuid.uuid4())
destName = None
jobList = []
for i in range(1):
job = JobSpec()
job.jobDefinitionID = int(time.time()) % 10000
job.jobName = "%s_%d" % (str(uuid.uuid4()),i)
job.AtlasRelease = 'Atlas-14.1.0'
job.homepackage = 'AtlasProduction/14.1.0.3'
job.transformation = 'csc_evgen_trf.py'
job.destinationDBlock = datasetName
job.destinationSE = destName
job.currentPriority = 100
job.prodSourceLabel = 'test'
job.computingSite = site
job.cloud = 'US'
job.cmtConfig = 'i686-slc4-gcc34-opt'
file = FileSpec()
file.lfn = "%s.evgen.pool.root" % job.jobName
file.destinationDBlock = job.destinationDBlock
file.destinationSE = job.destinationSE
file.dataset = job.destinationDBlock
file.destinationDBlockToken = 'ATLASDATADISK'
file.type = 'output'
job.addFile(file)
fileOL = FileSpec()
fileOL.lfn = "%s.job.log.tgz" % job.jobName
fileOL.destinationDBlock = job.destinationDBlock
fileOL.destinationSE = job.destinationSE
fileOL.dataset = job.destinationDBlock
fileOL.destinationDBlockToken = 'ATLASDATADISK'
fileOL.type = 'log'
job.addFile(fileOL)
job.jobParameters="5144 1 5000 1 CSC.005144.PythiaZee.py %s NONE NONE NONE" % file.lfn
jobList.append(job)
for i in range(1):
s,o = Client.submitJobs(jobList)
print("---------------------")
print(s)
for x in o:
print("PandaID=%s" % x[0])
|
apache-2.0
|
vsoch/repofish
|
analysis/wikipedia/1.run_pmc_match_models.py
|
1
|
1837
|
from glob import glob
import json
import pandas
import requests
import numpy
import pickle
import os
import re
home = os.environ["HOME"]
scripts = "%s/SCRIPT/repofish" %home
base = "%s/data/pubmed" %os.environ["LAB"]
articles_folder = "%s/articles" %base
outfolder = "%s/methods_match" %base
folders = glob("%s/*" %articles_folder)
words_vectors = "%s/analysis/models/vectors/methods_word2vec.tsv" %scripts
methods_vectors = "%s/analysis/models/method_vectors.tsv" %scripts
if not os.path.exists(outfolder):
os.mkdir(outfolder)
len(folders)
#6218
count = 0
for folder in folders:
print "Parsing %s" %(folder)
xmls = glob("%s/*.nxml" %folder)
count = count + len(xmls)
count
#1248536
# We will write a command to process xml files in each folder
count = 0
folder_count = 0
for folder in folders:
xmls = glob("%s/*.nxml" %folder)
for xml_file in xmls:
if count == 0:
jobfile = "%s/analysis/wikipedia/.job/%s_match_methods.job" %(scripts,folder_count)
filey = open(jobfile,"w")
elif count == 4000:
count=0
filey.close()
folder_count +=1
jobfile = "%s/analysis/wikipedia/.job/%s_match_methods.job" %(scripts,folder_count)
filey = open(jobfile,"w")
filey.writelines('python %s/analysis/wikipedia/1.pmc_match_models.py "%s" %s %s %s\n' %(scripts,xml_file,outfolder,words_vectors,methods_vectors))
count+=1
filey.close()
# Let's ask for 40 nodes, assuming we have 4000 jobs per launch, this means we want 100 jobs per core, with 15 minutes per command, this is 1500 minutes or 25 hours
# Now submit jobs to launch, we can only do 50 at a time
jobfiles = glob("%s/analysis/wikipedia/.job/*.job" %scripts)
for jobfile in jobfiles:
os.system('launch -s %s -N 40 --runtime=26:00:00' %jobfile)
|
mit
|
harisbal/pandas
|
pandas/tests/tslibs/test_libfrequencies.py
|
5
|
4197
|
# -*- coding: utf-8 -*-
import pandas.util.testing as tm
from pandas.tseries import offsets
from pandas._libs.tslibs.frequencies import (get_rule_month,
_period_str_to_code,
INVALID_FREQ_ERR_MSG,
is_superperiod, is_subperiod)
def assert_aliases_deprecated(freq, expected, aliases):
assert isinstance(aliases, list)
assert (_period_str_to_code(freq) == expected)
for alias in aliases:
with tm.assert_raises_regex(ValueError, INVALID_FREQ_ERR_MSG):
_period_str_to_code(alias)
def test_get_rule_month():
result = get_rule_month('W')
assert (result == 'DEC')
result = get_rule_month(offsets.Week())
assert (result == 'DEC')
result = get_rule_month('D')
assert (result == 'DEC')
result = get_rule_month(offsets.Day())
assert (result == 'DEC')
result = get_rule_month('Q')
assert (result == 'DEC')
result = get_rule_month(offsets.QuarterEnd(startingMonth=12))
result = get_rule_month('Q-JAN')
assert (result == 'JAN')
result = get_rule_month(offsets.QuarterEnd(startingMonth=1))
assert (result == 'JAN')
result = get_rule_month('A-DEC')
assert (result == 'DEC')
result = get_rule_month('Y-DEC')
assert (result == 'DEC')
result = get_rule_month(offsets.YearEnd())
assert (result == 'DEC')
result = get_rule_month('A-MAY')
assert (result == 'MAY')
result = get_rule_month('Y-MAY')
assert (result == 'MAY')
result = get_rule_month(offsets.YearEnd(month=5))
assert (result == 'MAY')
def test_period_str_to_code():
assert (_period_str_to_code('A') == 1000)
assert (_period_str_to_code('A-DEC') == 1000)
assert (_period_str_to_code('A-JAN') == 1001)
assert (_period_str_to_code('Y') == 1000)
assert (_period_str_to_code('Y-DEC') == 1000)
assert (_period_str_to_code('Y-JAN') == 1001)
assert (_period_str_to_code('Q') == 2000)
assert (_period_str_to_code('Q-DEC') == 2000)
assert (_period_str_to_code('Q-FEB') == 2002)
assert_aliases_deprecated("M", 3000, ["MTH", "MONTH", "MONTHLY"])
assert (_period_str_to_code('W') == 4000)
assert (_period_str_to_code('W-SUN') == 4000)
assert (_period_str_to_code('W-FRI') == 4005)
assert_aliases_deprecated("B", 5000, ["BUS", "BUSINESS",
"BUSINESSLY", "WEEKDAY"])
assert_aliases_deprecated("D", 6000, ["DAY", "DLY", "DAILY"])
assert_aliases_deprecated("H", 7000, ["HR", "HOUR", "HRLY", "HOURLY"])
assert_aliases_deprecated("T", 8000, ["minute", "MINUTE", "MINUTELY"])
assert (_period_str_to_code('Min') == 8000)
assert_aliases_deprecated("S", 9000, ["sec", "SEC", "SECOND", "SECONDLY"])
assert_aliases_deprecated("L", 10000, ["MILLISECOND", "MILLISECONDLY"])
assert (_period_str_to_code('ms') == 10000)
assert_aliases_deprecated("U", 11000, ["MICROSECOND", "MICROSECONDLY"])
assert (_period_str_to_code('US') == 11000)
assert_aliases_deprecated("N", 12000, ["NANOSECOND", "NANOSECONDLY"])
assert (_period_str_to_code('NS') == 12000)
def test_is_superperiod_subperiod():
# input validation
assert not (is_superperiod(offsets.YearEnd(), None))
assert not (is_subperiod(offsets.MonthEnd(), None))
assert not (is_superperiod(None, offsets.YearEnd()))
assert not (is_subperiod(None, offsets.MonthEnd()))
assert not (is_superperiod(None, None))
assert not (is_subperiod(None, None))
assert (is_superperiod(offsets.YearEnd(), offsets.MonthEnd()))
assert (is_subperiod(offsets.MonthEnd(), offsets.YearEnd()))
assert (is_superperiod(offsets.Hour(), offsets.Minute()))
assert (is_subperiod(offsets.Minute(), offsets.Hour()))
assert (is_superperiod(offsets.Second(), offsets.Milli()))
assert (is_subperiod(offsets.Milli(), offsets.Second()))
assert (is_superperiod(offsets.Milli(), offsets.Micro()))
assert (is_subperiod(offsets.Micro(), offsets.Milli()))
assert (is_superperiod(offsets.Micro(), offsets.Nano()))
assert (is_subperiod(offsets.Nano(), offsets.Micro()))
|
bsd-3-clause
|
SiLab-Bonn/Scarce
|
scarce/examples/sensor_3D_weighting.py
|
1
|
3704
|
''' Example creating the weighting field and potential of a 3D pixel array.
.. NOTE::
The weighting potential and field is only correct if the pixel is
surrounded by other pixels, thus `n_pixel_x = n_pixel_y = 3`
'''
import numpy as np
from scarce import (plot, sensor)
def sensor_3D():
n_pixel_x, n_pixel_y = 3, 3
width_x = 250.
width_y = 50.
radius = 6.
nD = 2 # Number of columns per pixel
n_eff = 1e12
temperature = 300
V_bias = -20.
V_readout = 0.
pot_descr, geom_descr = sensor.sensor_3D(n_eff=n_eff,
V_bias=V_bias,
V_readout=V_readout,
temperature=temperature,
n_pixel_x=n_pixel_x,
n_pixel_y=n_pixel_y,
width_x=width_x,
width_y=width_y,
radius=radius,
nD=nD,
selection='weighting',
resolution=80,
smoothing=0.5)
# Plot weighting potential and field in 2D and 1d
import matplotlib.pyplot as plt
fig = plt.figure()
plot.get_3D_sensor_plot(fig, width_x, width_y,
radius, nD,
n_pixel_x, n_pixel_y,
V_bias=1, V_readout=0,
pot_func=pot_descr.get_potential_smooth,
field_func=pot_descr.get_field,
# Comment in if you want to see the mesh
mesh=None, # potential.mesh,
title='Weihting potential and field of 3D sensor,'\
' %dx%d pixel matrix, numerical solution' % \
(n_pixel_x, n_pixel_y))
# Get line between readout and bias column
for x, y in geom_descr.get_ro_col_offsets():
if geom_descr.position_in_center_pixel(x, y):
x_ro, y_ro = x, y
break
for x, y in list(geom_descr.get_center_bias_col_offsets()) + geom_descr.get_edge_bias_col_offsets():
if geom_descr.position_in_center_pixel(x, y):
x_bias, y_bias = x, y
break
# Plot selected line between readout and bias column
N = 1000
x = np.linspace(x_ro, x_bias, N)
y = np.linspace(y_ro, y_bias, N)
# Deselect position that is within the columns
sel = ~geom_descr.position_in_column(x, y)
x, y = x[sel], y[sel]
ax = fig.get_axes()[0]
ax.plot(x, y, '-', color='black', linewidth=2)
plt.show()
# Plot weighting potential and field along selected line
phi_smooth = pot_descr.get_potential_smooth(x, y)
field = pot_descr.get_field(x, y)
position = np.sqrt(x ** 2 + y ** 2) # [um]
plt.plot(position, phi_smooth, color='blue', linewidth=2,
label='Potential')
plt.legend(loc=1)
plt.ylabel('Weighting potential [V]')
plt.twinx(plt.gca())
field_abs = np.sqrt(field[0] ** 2 + field[1] ** 2)
plt.plot(position, field_abs, color='red', linewidth=2, label='Field')
plt.grid()
plt.legend(loc=4)
plt.ylabel('Weighting field [V/cm]')
plt.show()
if __name__ == '__main__':
import logging
logging.basicConfig(level=logging.INFO,
format="%(asctime)s %(levelname)s %(message)s",
datefmt="%Y-%m-%d %H:%M:%S")
sensor_3D()
|
mit
|
beepee14/scikit-learn
|
benchmarks/bench_sample_without_replacement.py
|
397
|
8008
|
"""
Benchmarks for sampling without replacement of integer.
"""
from __future__ import division
from __future__ import print_function
import gc
import sys
import optparse
from datetime import datetime
import operator
import matplotlib.pyplot as plt
import numpy as np
import random
from sklearn.externals.six.moves import xrange
from sklearn.utils.random import sample_without_replacement
def compute_time(t_start, delta):
mu_second = 0.0 + 10 ** 6 # number of microseconds in a second
return delta.seconds + delta.microseconds / mu_second
def bench_sample(sampling, n_population, n_samples):
gc.collect()
# start time
t_start = datetime.now()
sampling(n_population, n_samples)
delta = (datetime.now() - t_start)
# stop time
time = compute_time(t_start, delta)
return time
if __name__ == "__main__":
###########################################################################
# Option parser
###########################################################################
op = optparse.OptionParser()
op.add_option("--n-times",
dest="n_times", default=5, type=int,
help="Benchmark results are average over n_times experiments")
op.add_option("--n-population",
dest="n_population", default=100000, type=int,
help="Size of the population to sample from.")
op.add_option("--n-step",
dest="n_steps", default=5, type=int,
help="Number of step interval between 0 and n_population.")
default_algorithms = "custom-tracking-selection,custom-auto," \
"custom-reservoir-sampling,custom-pool,"\
"python-core-sample,numpy-permutation"
op.add_option("--algorithm",
dest="selected_algorithm",
default=default_algorithms,
type=str,
help="Comma-separated list of transformer to benchmark. "
"Default: %default. \nAvailable: %default")
# op.add_option("--random-seed",
# dest="random_seed", default=13, type=int,
# help="Seed used by the random number generators.")
(opts, args) = op.parse_args()
if len(args) > 0:
op.error("this script takes no arguments.")
sys.exit(1)
selected_algorithm = opts.selected_algorithm.split(',')
for key in selected_algorithm:
if key not in default_algorithms.split(','):
raise ValueError("Unknown sampling algorithm \"%s\" not in (%s)."
% (key, default_algorithms))
###########################################################################
# List sampling algorithm
###########################################################################
# We assume that sampling algorithm has the following signature:
# sample(n_population, n_sample)
#
sampling_algorithm = {}
###########################################################################
# Set Python core input
sampling_algorithm["python-core-sample"] = \
lambda n_population, n_sample: \
random.sample(xrange(n_population), n_sample)
###########################################################################
# Set custom automatic method selection
sampling_algorithm["custom-auto"] = \
lambda n_population, n_samples, random_state=None: \
sample_without_replacement(n_population,
n_samples,
method="auto",
random_state=random_state)
###########################################################################
# Set custom tracking based method
sampling_algorithm["custom-tracking-selection"] = \
lambda n_population, n_samples, random_state=None: \
sample_without_replacement(n_population,
n_samples,
method="tracking_selection",
random_state=random_state)
###########################################################################
# Set custom reservoir based method
sampling_algorithm["custom-reservoir-sampling"] = \
lambda n_population, n_samples, random_state=None: \
sample_without_replacement(n_population,
n_samples,
method="reservoir_sampling",
random_state=random_state)
###########################################################################
# Set custom reservoir based method
sampling_algorithm["custom-pool"] = \
lambda n_population, n_samples, random_state=None: \
sample_without_replacement(n_population,
n_samples,
method="pool",
random_state=random_state)
###########################################################################
# Numpy permutation based
sampling_algorithm["numpy-permutation"] = \
lambda n_population, n_sample: \
np.random.permutation(n_population)[:n_sample]
###########################################################################
# Remove unspecified algorithm
sampling_algorithm = dict((key, value)
for key, value in sampling_algorithm.items()
if key in selected_algorithm)
###########################################################################
# Perform benchmark
###########################################################################
time = {}
n_samples = np.linspace(start=0, stop=opts.n_population,
num=opts.n_steps).astype(np.int)
ratio = n_samples / opts.n_population
print('Benchmarks')
print("===========================")
for name in sorted(sampling_algorithm):
print("Perform benchmarks for %s..." % name, end="")
time[name] = np.zeros(shape=(opts.n_steps, opts.n_times))
for step in xrange(opts.n_steps):
for it in xrange(opts.n_times):
time[name][step, it] = bench_sample(sampling_algorithm[name],
opts.n_population,
n_samples[step])
print("done")
print("Averaging results...", end="")
for name in sampling_algorithm:
time[name] = np.mean(time[name], axis=1)
print("done\n")
# Print results
###########################################################################
print("Script arguments")
print("===========================")
arguments = vars(opts)
print("%s \t | %s " % ("Arguments".ljust(16),
"Value".center(12),))
print(25 * "-" + ("|" + "-" * 14) * 1)
for key, value in arguments.items():
print("%s \t | %s " % (str(key).ljust(16),
str(value).strip().center(12)))
print("")
print("Sampling algorithm performance:")
print("===============================")
print("Results are averaged over %s repetition(s)." % opts.n_times)
print("")
fig = plt.figure('scikit-learn sample w/o replacement benchmark results')
plt.title("n_population = %s, n_times = %s" %
(opts.n_population, opts.n_times))
ax = fig.add_subplot(111)
for name in sampling_algorithm:
ax.plot(ratio, time[name], label=name)
ax.set_xlabel('ratio of n_sample / n_population')
ax.set_ylabel('Time (s)')
ax.legend()
# Sort legend labels
handles, labels = ax.get_legend_handles_labels()
hl = sorted(zip(handles, labels), key=operator.itemgetter(1))
handles2, labels2 = zip(*hl)
ax.legend(handles2, labels2, loc=0)
plt.show()
|
bsd-3-clause
|
baseband-geek/singlepulse-visualizer
|
interactive/singlepulse_tools_dev.py
|
1
|
15025
|
#!/usr/bin/python
# DM Sigma Time (s) Sample Downfact
import numpy as np
import matplotlib
import matplotlib.pyplot as plt
from pulsar_tools import disp_delay
import math
from matplotlib.widgets import AxesWidget
class VertSlider(AxesWidget):
"""
A slider representing a floating point range
The following attributes are defined
*ax* : the slider :class:`matplotlib.axes.Axes` instance
*val* : the current slider value
*vline* : a :class:`matplotlib.lines.Line2D` instance
representing the initial value of the slider
*poly* : A :class:`matplotlib.patches.Polygon` instance
which is the slider knob
*valfmt* : the format string for formatting the slider text
*label* : a :class:`matplotlib.text.Text` instance
for the slider label
*closedmin* : whether the slider is closed on the minimum
*closedmax* : whether the slider is closed on the maximum
*slidermin* : another slider - if not *None*, this slider must be
greater than *slidermin*
*slidermax* : another slider - if not *None*, this slider must be
less than *slidermax*
*dragging* : allow for mouse dragging on slider
Call :meth:`on_changed` to connect to the slider event
"""
def __init__(self, ax, label, valmin, valmax, valinit=0.5, valfmt='%1.2f',
closedmin=True, closedmax=True, slidermin=None,
slidermax=None, dragging=True, **kwargs):
"""
Create a slider from *valmin* to *valmax* in axes *ax*
*valinit*
The slider initial position
*label*
The slider label
*valfmt*
Used to format the slider value
*closedmin* and *closedmax*
Indicate whether the slider interval is closed
*slidermin* and *slidermax*
Used to constrain the value of this slider to the values
of other sliders.
additional kwargs are passed on to ``self.poly`` which is the
:class:`matplotlib.patches.Rectangle` which draws the slider
knob. See the :class:`matplotlib.patches.Rectangle` documentation
valid property names (e.g., *facecolor*, *edgecolor*, *alpha*, ...)
"""
AxesWidget.__init__(self, ax)
self.valmin = valmin
self.valmax = valmax
self.val = valinit
self.valinit = valinit
self.poly = ax.axhspan(valmin, valinit, 0, 1, **kwargs)
self.vline = ax.axhline(valinit, 0, 1, color='r', lw=1)
self.valfmt = valfmt
ax.set_xticks([])
ax.set_ylim((valmin, valmax))
ax.set_yticks([])
ax.set_navigate(False)
self.connect_event('button_press_event', self._update)
self.connect_event('button_release_event', self._update)
if dragging:
self.connect_event('motion_notify_event', self._update)
self.label = ax.text(0.5, 1.03, label, transform=ax.transAxes,
verticalalignment='center',
horizontalalignment='center')
self.valtext = ax.text(0.5, -0.03, valfmt % valinit,
transform=ax.transAxes,
verticalalignment='center',
horizontalalignment='center')
self.cnt = 0
self.observers = {}
self.closedmin = closedmin
self.closedmax = closedmax
self.slidermin = slidermin
self.slidermax = slidermax
self.drag_active = False
def _update(self, event):
"""update the slider position"""
if self.ignore(event):
return
if event.button != 1:
return
if event.name == 'button_press_event' and event.inaxes == self.ax:
self.drag_active = True
event.canvas.grab_mouse(self.ax)
if not self.drag_active:
return
elif ((event.name == 'button_release_event') or
(event.name == 'button_press_event' and
event.inaxes != self.ax)):
self.drag_active = False
event.canvas.release_mouse(self.ax)
return
val = event.ydata
if val <= self.valmin:
if not self.closedmin:
return
val = self.valmin
elif val >= self.valmax:
if not self.closedmax:
return
val = self.valmax
if self.slidermin is not None and val <= self.slidermin.val:
if not self.closedmin:
return
val = self.slidermin.val
if self.slidermax is not None and val >= self.slidermax.val:
if not self.closedmax:
return
val = self.slidermax.val
self.set_val(val)
def set_val(self, val):
xy = self.poly.xy
xy[1] = 0, val
xy[2] = 1, val
self.poly.xy = xy
self.valtext.set_text(self.valfmt % val)
if self.drawon:
self.ax.figure.canvas.draw()
self.val = val
if not self.eventson:
return
for cid, func in self.observers.iteritems():
func(val)
def on_changed(self, func):
"""
When the slider value is changed, call *func* with the new
slider position
A connection id is returned which can be used to disconnect
"""
cid = self.cnt
self.observers[cid] = func
self.cnt += 1
return cid
def disconnect(self, cid):
"""remove the observer with connection id *cid*"""
try:
del self.observers[cid]
except KeyError:
pass
def reset(self):
"""reset the slider to the initial value if needed"""
if (self.val != self.valinit):
self.set_val(self.valinit)
def loadfile(filename):
if filename==None:
print "No filename supplied to read..."
with open(filename) as f:
data = f.read()
data = data.split('\n')
while not data[-1]:
data = data[:-1]
return data
def obs_stats(time, flags):
# Not doing total time correctly, depends on last single pulse detection instead of observation time
flag_time = 0
for flag in flags:
flag_time+=(float(flag.split()[1])-float(flag.split()[0]))
print "%.2f seconds flagged from %.2f seconds of data (%.2f percent)" % ( flag_time, time[-1], flag_time/time[-1]*100)
def flagfile(basename, max_DM=2097.2, freq_l=0.169615, freq_h=0.200335, padding=3):
"""This function takes in a text file of bad 0 DM times and
writes out one flagged over the correct de-dispersive smearing
times, looking for overlaps along the way. There must be a text file named
basename.bad with rows indicating bad times for this to work. """
from subprocess import Popen
bads = np.genfromtxt(basename+'.bad')
i = 0 # initialize counter for new list
flags = []
for bad in bads:
start = bad[0] - (padding + disp_delay(freq1=freq_l, freq2=freq_h, DM=max_DM)/1000)
if start < 0:
start = 0
stop = bad[1] + padding
if len(flags) > 0:
if start <= flags[-1][1]:
flags[-1][1] = stop
else:
flags.append([start, stop])
else:
flags.append([start, stop])
np.savetxt(basename+'.flag', flags, fmt='%d')
Popen(['flag.sh', basename]).communicate()[0]
def max_nth_percent(n, data):
""" A function that returns the nth percent top value, planned use is for plotting
:param n: the percentile value desired for return
:param data: the iterable object searched through
:return: nth percent largest value
"""
import heapq
data=list(data)
n=float(n)
return heapq.nlargest(int(len(data)*(n/100.0)), data)[-1]
def singlepulse_plot(basename=None, DMvTime=1, StatPlots=False, raw = False, threshold=5.0, Downsamps=[20, 20, 20], colormap='cool' ):
"""Plots up the flagged data, should switch to using genfromtxt when I have the time"""
from matplotlib.widgets import RadioButtons, Slider
# if raw:
data_raw=loadfile(basename+'.singlepulse')[1:]
# flags = False
# else:
data_flag = loadfile(basename+'_flagged.singlepulse')[1:]
flags = loadfile(basename+'.flag')
DM_raw = [float(row.split()[0]) for row in data_raw if float(row.split()[1]) >= threshold]
Sigma_raw = [float(row.split()[1]) for row in data_raw if float(row.split()[1]) >= threshold]
Time_raw = [float(row.split()[2]) for row in data_raw if float(row.split()[1]) >= threshold]
Sample_raw = [int(row.split()[3]) for row in data_raw if float(row.split()[1]) >= threshold]
Downfact_raw = [int(row.split()[4]) for row in data_raw if float(row.split()[1]) >= threshold]
DM_flag = [float(row.split()[0]) for row in data_flag if float(row.split()[1]) >= threshold]
Sigma_flag = [float(row.split()[1]) for row in data_flag if float(row.split()[1]) >= threshold]
Time_flag = [float(row.split()[2]) for row in data_flag if float(row.split()[1]) >= threshold]
Sample_flag = [int(row.split()[3]) for row in data_flag if float(row.split()[1]) >= threshold]
Downfact_flag = [int(row.split()[4]) for row in data_flag if float(row.split()[1]) >= threshold]
Sigma_float_raw = [float(value) for value in Sigma_raw]
Downfact_float_raw = [float(value) for value in Downfact_raw]
Sigma_float_flag = [float(value) for value in Sigma_flag]
Downfact_float_flag = [float(value) for value in Downfact_flag]
DM = DM_flag
Sigma = Sigma_flag
Time = Time_flag
Sample = Sample_flag
Downfact = Downfact_flag
Sigma_float = Sigma_float_flag
Downfact_float = Downfact_float_flag
fig = plt.figure()
cm = plt.cm.get_cmap(colormap)
def onpick(event):
points = event.artist
ind = event.ind
mouseevent = event.mouseevent
print '\n'
print "Information for data points around click event %.4f, %.4f:" % (mouseevent.xdata, mouseevent.ydata)
for i in ind:
if ( DM[i] < 150):
boxcar = Downfact[i]
elif ( 150<= DM[i] < 823.2 ):
boxcar = Downfact[i] * Downsamps[0]/10
elif ( 823.2 <= DM[i] < 1486.2):
boxcar = Downfact[i] * Downsamps[1]/10
elif ( 1426.2 <= DM[i] < 2100):
boxcar = Downfact[i] * Downsamps[2]/10
print "%.2f seconds, %.2f Sigma event detected at a DM of %.2f with a boxcar of: %d ms" % (Time[i], Sigma[i], DM[i], boxcar)
if StatPlots:
ax0 = fig.add_subplot(231)
plt.hist(Sigma, histtype='step', bins=60)
ax0.set_xlabel('Signal-to-Noise')
ax0.set_ylabel('Number of Pulses')
ax1 = fig.add_subplot(232)
plt.hist(DM, histtype='step', bins=int(0.5*len(set(DM))))
ax1.set_xlabel('DM (pc cm^-3)')
ax1.set_ylabel('Number of Pulses')
ax2 = fig.add_subplot(233)
sc0=ax2.scatter(DM, Sigma, c=Downfact_float, vmin=min(Downfact_float), vmax=max_nth_percent(10,Downfact_float), cmap='spectral', picker=1)
ax2.set_ylabel('Signal-to-Noise')
ax2.set_xlabel('DM (pc cm^-3)')
ax2.set_xlim([min(DM), max(DM)])
ax2.set_ylim([min(Sigma), max(Sigma)])
# plt.colorbar(sc0, label="Boxcar")
# fig.canvas.mpl_connect('pick_event', onpick)
ax3 = fig.add_subplot(212)
else:
ax3 = fig.add_subplot(111)
# ax3.set_title("Single Pulse Sigma")
ax3.set_xlabel('Time (s)')
ax3.set_ylabel('DM (pc cm^-3)')
ax3.set_ylim([min(DM), max(DM)])
ax3.set_xlim([min(Time), max(Time)])
# cm = plt.cm.get_cmap(colormap)
sc=ax3.scatter(Time,DM, c=Sigma_float, vmin=min(Sigma_float), vmax=max_nth_percent(10,Sigma_float), cmap=cm, picker=1)
# leg = ax1.legend()
plt.colorbar(sc, label="Signal-to-Noise")
rax = plt.axes([0.85, 0.02, 0.13, 0.13])
radio = RadioButtons(rax, ('Flagged', 'Raw'))
def datafunc(label):
plot_argument_dict= {'Flagged':"Time_flag, DM_flag,c=Sigma_float_flag, vmin=min(Sigma_float_flag), vmax=max(Sigma_float_flag)" , 'Raw':"Time_raw,DM_raw,c=Sigma_float_raw, vmin=min(Sigma_float_raw), vmax=max(Sigma_float_raw)"}
sc=ax3.scatter(plot_argument_dict[label], cmap=cm, picker=1)
# ydata = hzdict[label]
# ax3.set_ydata(ydata)
plt.draw()
radio.on_clicked(datafunc)
axmin = plt.axes([0.85, 0.2, 0.04, 0.3])
axmax = plt.axes([0.93, 0.2, 0.04, 0.3])
smin = VertSlider(axmin, 'Min', min(Sigma_float), max(Sigma_float), valinit=min(Sigma_float))
smax = VertSlider(axmax, 'Max', min(Sigma_float), max(Sigma_float), valinit=max(Sigma_float))
def update(val):
sc.vlim([smin.val,smax.val])
fig.canvas.draw()
# smin.on_changed(update)
smax.on_changed(update)
if not raw:
for flag in flags:
flag_area = matplotlib.patches.Rectangle((float(flag.split()[0]), min(DM)), float(flag.split()[1])-float(flag.split()[0]), max(DM)-min(DM), edgecolor='0', facecolor='0.66')
ax3.add_patch(flag_area)
fig.canvas.mpl_connect('pick_event', onpick)
'''
ax2 = fig.add_subplot(122)
ax2.set_title("Single Pulse Boxcar")
ax2.set_xlabel('Time (s)')
ax2.set_ylabel('DM (pc cm^-3)')
cm = plt.cm.get_cmap('RdYlBu')
sc2=ax2.scatter(Time,DM, c=Downfact_float, vmin=min(Downfact_float), vmax=max(Downfact_float), cmap=cm)
# leg = ax1.legend()
plt.colorbar(sc2)
if not raw:
for flag in flags:
flag_area = matplotlib.patches.Rectangle((float(flag.split()[0]), min(DM)), float(flag.split()[1])-float(flag.split()[0]), max(DM)-min(DM), edgecolor='0', facecolor='0.66')
ax2.add_patch(flag_area)
'''
fig.suptitle('Single Pulse Search results for '+basename)
plt.show()
obs_stats(Time, flags)
def slice(infile, dm=None, timerange=None, sigma=None, downfact=None):
# Not properly implemented yet
data = read_singlepulse(infile)
slices = [None]*5
slice_map = {'dm':0, 'sigma':1, 'timerange':2, 'sample':3, 'downfact':4}
DM = [row.split()[0] for row in data]
Sigma = [row.split()[1] for row in data]
Time = [row.split()[2] for row in data]
Sample = [row.split()[3] for row in data]
Downfact = [row.split()[4] for row in data]
if dm:
if type(dm) == type(0) or type(0.0):
data = [row for row in data if dm <= row.split()[0]]
elif type(dm) == type([]):
data = [row for row in data if dm[0] <= row.split()[0] <= dm[1]]
if sigma:
if type(sigma) == type(0) or type(0.0):
data = [row for row in data if sigma <= row.split()[1] ]
elif type(sigma) == type([]):
data = [row for row in data if sigma[0] <= row.split()[1] <= sigma[1]]
|
mit
|
jemromerol/apasvo
|
apasvo/utils/plotting.py
|
1
|
5629
|
# encoding: utf-8
'''
@author: Jose Emilio Romero Lopez
@copyright: Copyright 2013-2014, Jose Emilio Romero Lopez.
@license: GPL
@contact: [email protected]
This file is part of APASVO.
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
'''
from matplotlib import mlab
from scipy import signal
import numpy as np
SPECGRAM_WINDOWS = ("boxcar", "hamming", "hann", "bartlett",
'blackman', "blackmanharris")
SPECGRAM_WINDOWS_NAMES = ("Rectangular", "Hamming", "Hann", "Bartlett",
"Blackman", "Blackman-Harris")
def plot_specgram(ax, data, fs, nfft=256, noverlap=128, window='hann',
cmap='jet', interpolation='bilinear', rasterized=True):
if window not in SPECGRAM_WINDOWS:
raise ValueError("Window not supported")
elif window == "boxcar":
mwindow = signal.boxcar(nfft)
elif window == "hamming":
mwindow = signal.hamming(nfft)
elif window == "hann":
mwindow = signal.hann(nfft)
elif window == "bartlett":
mwindow = signal.bartlett(nfft)
elif window == "blackman":
mwindow = signal.blackman(nfft)
elif window == "blackmanharris":
mwindow = signal.blackmanharris(nfft)
specgram, freqs, time = mlab.specgram(data, NFFT=nfft, Fs=fs,
window=mwindow,
noverlap=noverlap)
specgram = 10 * np.log10(specgram[1:, :])
specgram = np.flipud(specgram)
freqs = freqs[1:]
halfbin_time = (time[1] - time[0]) / 2.0
halfbin_freq = (freqs[1] - freqs[0]) / 2.0
extent = (time[0] - halfbin_time, time[-1] + halfbin_time,
freqs[0] - halfbin_freq, freqs[-1] + halfbin_freq)
ax.imshow(specgram, cmap=cmap, interpolation=interpolation,
extent=extent, rasterized=rasterized)
ax.axis('tight')
def reduce_data(x, y, width, xmin=0, xmax=None):
"""Given x-axis data and y-axis data returns a smaller representation
of both datasets with a desired length for faster plotting.
Given a width value, which usually corresponds with the desired pixel width
of the plot, splits represented x-data range into 'width' partitions and
returns the (x,y) minimum and maximum data pairs for each partition.
Args:
x: x-axis data. Numpy array type.
y: y-axis data. Numpy array type.
width: Desired plot width, usually related to plot's pixel width.
xmin: Position of the first (x,y) data pair to be represented
xmax: Position of the last (x,y) data pair to be represented
Returns:
x_reduced: Reduced x-axis dataset.
y_reduced: Reduced y-axis dataset.
"""
if len(x) != len(y):
raise ValueError("x and y must have the same length.")
if not isinstance(x, np.ndarray):
x = np.array(x)
if not isinstance(y, np.ndarray):
y = np.array(y)
# Init xmax and xmin values
length = len(x)
xmax = xmax if xmax is not None else length - 1
xmax = min(length - 1, xmax if xmax > 0 else 0)
xmin = max(0, xmin if xmin < length else length - 1)
if xmin > xmax:
raise ValueError("xmax must be greater or equal than xmin")
n_points = 2 * width
data_size = xmax - xmin
# If the length of the datasets is too small returns the datasets
if data_size <= n_points:
return x[xmin:xmax + 1], y[xmin:xmax + 1]
indexes = np.empty(n_points + 2, dtype=int)
# Initial and final (x,y) pairs of the reduced data corresponds
# with the initial and final (x,y) values of the represented data
indexes[0], indexes[-1] = xmin, xmax
i = 1
limits = np.ceil(np.linspace(xmin, xmax, width + 1)).astype(int)
for j in xrange(int(width)):
left = limits[j]
right = limits[j + 1]
indexes[i] = left + y[left:right + 1].argmax(axis=0)
i += 1
indexes[i] = left + y[left:right + 1].argmin(axis=0)
i += 1
indexes.sort()
return x[indexes], y[indexes]
def adjust_axes_height(ax, max_value=None, min_value=None, margin=0.1):
max_values = [max_value] if max_value else []
min_values = [min_value] if min_value else []
for line in ax.lines:
try:
xdata = list(line.get_xdata())
ydata = list(line.get_ydata())
except TypeError:
continue
if len(xdata) == 2 and len(ydata) == 2:
# Check for horizontal lines and discard
if xdata == [0, 1] and ydata[0] == ydata[1]:
continue
# Check for vertical lines and discard
if ydata == [0, 1] and xdata[0] == xdata[1]:
continue
else:
max_values.append(max(ydata))
min_values.append(min(ydata))
if max_values and min_values:
maximum = max(max_values)
minimum = min(min_values)
margin_height = (maximum - minimum) * margin
ax.set_ylim(minimum - margin_height, maximum + margin_height)
|
gpl-3.0
|
waterponey/scikit-learn
|
examples/linear_model/plot_robust_fit.py
|
147
|
3050
|
"""
Robust linear estimator fitting
===============================
Here a sine function is fit with a polynomial of order 3, for values
close to zero.
Robust fitting is demoed in different situations:
- No measurement errors, only modelling errors (fitting a sine with a
polynomial)
- Measurement errors in X
- Measurement errors in y
The median absolute deviation to non corrupt new data is used to judge
the quality of the prediction.
What we can see that:
- RANSAC is good for strong outliers in the y direction
- TheilSen is good for small outliers, both in direction X and y, but has
a break point above which it performs worse than OLS.
- The scores of HuberRegressor may not be compared directly to both TheilSen
and RANSAC because it does not attempt to completely filter the outliers
but lessen their effect.
"""
from matplotlib import pyplot as plt
import numpy as np
from sklearn.linear_model import (
LinearRegression, TheilSenRegressor, RANSACRegressor, HuberRegressor)
from sklearn.metrics import mean_squared_error
from sklearn.preprocessing import PolynomialFeatures
from sklearn.pipeline import make_pipeline
np.random.seed(42)
X = np.random.normal(size=400)
y = np.sin(X)
# Make sure that it X is 2D
X = X[:, np.newaxis]
X_test = np.random.normal(size=200)
y_test = np.sin(X_test)
X_test = X_test[:, np.newaxis]
y_errors = y.copy()
y_errors[::3] = 3
X_errors = X.copy()
X_errors[::3] = 3
y_errors_large = y.copy()
y_errors_large[::3] = 10
X_errors_large = X.copy()
X_errors_large[::3] = 10
estimators = [('OLS', LinearRegression()),
('Theil-Sen', TheilSenRegressor(random_state=42)),
('RANSAC', RANSACRegressor(random_state=42)),
('HuberRegressor', HuberRegressor())]
colors = {'OLS': 'turquoise', 'Theil-Sen': 'gold', 'RANSAC': 'lightgreen', 'HuberRegressor': 'black'}
linestyle = {'OLS': '-', 'Theil-Sen': '-.', 'RANSAC': '--', 'HuberRegressor': '--'}
lw = 3
x_plot = np.linspace(X.min(), X.max())
for title, this_X, this_y in [
('Modeling Errors Only', X, y),
('Corrupt X, Small Deviants', X_errors, y),
('Corrupt y, Small Deviants', X, y_errors),
('Corrupt X, Large Deviants', X_errors_large, y),
('Corrupt y, Large Deviants', X, y_errors_large)]:
plt.figure(figsize=(5, 4))
plt.plot(this_X[:, 0], this_y, 'b+')
for name, estimator in estimators:
model = make_pipeline(PolynomialFeatures(3), estimator)
model.fit(this_X, this_y)
mse = mean_squared_error(model.predict(X_test), y_test)
y_plot = model.predict(x_plot[:, np.newaxis])
plt.plot(x_plot, y_plot, color=colors[name], linestyle=linestyle[name],
linewidth=lw, label='%s: error = %.3f' % (name, mse))
legend_title = 'Error of Mean\nAbsolute Deviation\nto Non-corrupt Data'
legend = plt.legend(loc='upper right', frameon=False, title=legend_title,
prop=dict(size='x-small'))
plt.xlim(-4, 10.2)
plt.ylim(-2, 10.2)
plt.title(title)
plt.show()
|
bsd-3-clause
|
Tjorriemorrie/trading
|
19_rf_kelly/predictor.py
|
1
|
1399
|
import logging as log
import pandas as pd
import numpy as np
from pprint import pprint
from sklearn.ensemble import GradientBoostingClassifier
from sklearn.externals import joblib
from main import DATA, loadData, getLabels, splitAndScale, addEwma, addRsi
FIBOS = [5, 13, 34]
def main():
for item in DATA:
df = loadData(item['currency'], item['timeframe'])
# print df
# adding indicators
addEwma(df, FIBOS)
addRsi(df, FIBOS)
# set labels
labels = getLabels(df)
# print labels
# print df.tail()
# print labels.tail()
# split and scale
X_train, X_test, y_train, y_test = splitAndScale(df, labels)
# print X_test
# print y_test
# loading regressor
clf = joblib.load('models/{0}.gbrt'.format(item['currency']))
log.info('Classifier loading')
# predict last day
prediction = clf.predict(X_test[-1])
predict_proba = clf.predict_proba(X_test[-1])
log.warn('{0} {1} {2} {3}'.format(df.ix[-1].name, item['currency'], prediction[0], max(predict_proba[0])))
if __name__ == '__main__':
# parser = argparse.ArgumentParser()
# args = parser.parse_args()
log.basicConfig(
level=log.WARN,
format='%(asctime)s %(name)-8s %(levelname)-8s %(message)s',
# datefmt='%Y-%m-%d %H:%M:',
)
main()
|
mit
|
cauchycui/scikit-learn
|
sklearn/lda.py
|
56
|
17706
|
"""
Linear Discriminant Analysis (LDA)
"""
# Authors: Clemens Brunner
# Martin Billinger
# Matthieu Perrot
# Mathieu Blondel
# License: BSD 3-Clause
from __future__ import print_function
import warnings
import numpy as np
from scipy import linalg
from .externals.six import string_types
from .base import BaseEstimator, TransformerMixin
from .linear_model.base import LinearClassifierMixin
from .covariance import ledoit_wolf, empirical_covariance, shrunk_covariance
from .utils.multiclass import unique_labels
from .utils import check_array, check_X_y
from .utils.validation import check_is_fitted
from .utils.fixes import bincount
from .preprocessing import StandardScaler
__all__ = ['LDA']
def _cov(X, shrinkage=None):
"""Estimate covariance matrix (using optional shrinkage).
Parameters
----------
X : array-like, shape (n_samples, n_features)
Input data.
shrinkage : string or float, optional
Shrinkage parameter, possible values:
- None or 'empirical': no shrinkage (default).
- 'auto': automatic shrinkage using the Ledoit-Wolf lemma.
- float between 0 and 1: fixed shrinkage parameter.
Returns
-------
s : array, shape (n_features, n_features)
Estimated covariance matrix.
"""
shrinkage = "empirical" if shrinkage is None else shrinkage
if isinstance(shrinkage, string_types):
if shrinkage == 'auto':
sc = StandardScaler() # standardize features
X = sc.fit_transform(X)
s = sc.std_ * ledoit_wolf(X)[0] * sc.std_ # scale back
elif shrinkage == 'empirical':
s = empirical_covariance(X)
else:
raise ValueError('unknown shrinkage parameter')
elif isinstance(shrinkage, float) or isinstance(shrinkage, int):
if shrinkage < 0 or shrinkage > 1:
raise ValueError('shrinkage parameter must be between 0 and 1')
s = shrunk_covariance(empirical_covariance(X), shrinkage)
else:
raise TypeError('shrinkage must be of string or int type')
return s
def _class_means(X, y):
"""Compute class means.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Input data.
y : array-like, shape (n_samples,) or (n_samples, n_targets)
Target values.
Returns
-------
means : array-like, shape (n_features,)
Class means.
"""
means = []
classes = np.unique(y)
for group in classes:
Xg = X[y == group, :]
means.append(Xg.mean(0))
return np.asarray(means)
def _class_cov(X, y, priors=None, shrinkage=None):
"""Compute class covariance matrix.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Input data.
y : array-like, shape (n_samples,) or (n_samples, n_targets)
Target values.
priors : array-like, shape (n_classes,)
Class priors.
shrinkage : string or float, optional
Shrinkage parameter, possible values:
- None: no shrinkage (default).
- 'auto': automatic shrinkage using the Ledoit-Wolf lemma.
- float between 0 and 1: fixed shrinkage parameter.
Returns
-------
cov : array-like, shape (n_features, n_features)
Class covariance matrix.
"""
classes = np.unique(y)
covs = []
for group in classes:
Xg = X[y == group, :]
covs.append(np.atleast_2d(_cov(Xg, shrinkage)))
return np.average(covs, axis=0, weights=priors)
class LDA(BaseEstimator, LinearClassifierMixin, TransformerMixin):
"""Linear Discriminant Analysis (LDA).
A classifier with a linear decision boundary, generated by fitting class
conditional densities to the data and using Bayes' rule.
The model fits a Gaussian density to each class, assuming that all classes
share the same covariance matrix.
The fitted model can also be used to reduce the dimensionality of the input
by projecting it to the most discriminative directions.
Read more in the :ref:`User Guide <lda_qda>`.
Parameters
----------
solver : string, optional
Solver to use, possible values:
- 'svd': Singular value decomposition (default). Does not compute the
covariance matrix, therefore this solver is recommended for
data with a large number of features.
- 'lsqr': Least squares solution, can be combined with shrinkage.
- 'eigen': Eigenvalue decomposition, can be combined with shrinkage.
shrinkage : string or float, optional
Shrinkage parameter, possible values:
- None: no shrinkage (default).
- 'auto': automatic shrinkage using the Ledoit-Wolf lemma.
- float between 0 and 1: fixed shrinkage parameter.
Note that shrinkage works only with 'lsqr' and 'eigen' solvers.
priors : array, optional, shape (n_classes,)
Class priors.
n_components : int, optional
Number of components (< n_classes - 1) for dimensionality reduction.
store_covariance : bool, optional
Additionally compute class covariance matrix (default False).
tol : float, optional
Threshold used for rank estimation in SVD solver.
Attributes
----------
coef_ : array, shape (n_features,) or (n_classes, n_features)
Weight vector(s).
intercept_ : array, shape (n_features,)
Intercept term.
covariance_ : array-like, shape (n_features, n_features)
Covariance matrix (shared by all classes).
means_ : array-like, shape (n_classes, n_features)
Class means.
priors_ : array-like, shape (n_classes,)
Class priors (sum to 1).
scalings_ : array-like, shape (rank, n_classes - 1)
Scaling of the features in the space spanned by the class centroids.
xbar_ : array-like, shape (n_features,)
Overall mean.
classes_ : array-like, shape (n_classes,)
Unique class labels.
See also
--------
sklearn.qda.QDA: Quadratic discriminant analysis
Notes
-----
The default solver is 'svd'. It can perform both classification and
transform, and it does not rely on the calculation of the covariance
matrix. This can be an advantage in situations where the number of features
is large. However, the 'svd' solver cannot be used with shrinkage.
The 'lsqr' solver is an efficient algorithm that only works for
classification. It supports shrinkage.
The 'eigen' solver is based on the optimization of the between class
scatter to within class scatter ratio. It can be used for both
classification and transform, and it supports shrinkage. However, the
'eigen' solver needs to compute the covariance matrix, so it might not be
suitable for situations with a high number of features.
Examples
--------
>>> import numpy as np
>>> from sklearn.lda import LDA
>>> X = np.array([[-1, -1], [-2, -1], [-3, -2], [1, 1], [2, 1], [3, 2]])
>>> y = np.array([1, 1, 1, 2, 2, 2])
>>> clf = LDA()
>>> clf.fit(X, y)
LDA(n_components=None, priors=None, shrinkage=None, solver='svd',
store_covariance=False, tol=0.0001)
>>> print(clf.predict([[-0.8, -1]]))
[1]
"""
def __init__(self, solver='svd', shrinkage=None, priors=None,
n_components=None, store_covariance=False, tol=1e-4):
self.solver = solver
self.shrinkage = shrinkage
self.priors = priors
self.n_components = n_components
self.store_covariance = store_covariance # used only in svd solver
self.tol = tol # used only in svd solver
def _solve_lsqr(self, X, y, shrinkage):
"""Least squares solver.
The least squares solver computes a straightforward solution of the
optimal decision rule based directly on the discriminant functions. It
can only be used for classification (with optional shrinkage), because
estimation of eigenvectors is not performed. Therefore, dimensionality
reduction with the transform is not supported.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Training data.
y : array-like, shape (n_samples,) or (n_samples, n_classes)
Target values.
shrinkage : string or float, optional
Shrinkage parameter, possible values:
- None: no shrinkage (default).
- 'auto': automatic shrinkage using the Ledoit-Wolf lemma.
- float between 0 and 1: fixed shrinkage parameter.
Notes
-----
This solver is based on [1]_, section 2.6.2, pp. 39-41.
References
----------
.. [1] R. O. Duda, P. E. Hart, D. G. Stork. Pattern Classification
(Second Edition). John Wiley & Sons, Inc., New York, 2001. ISBN
0-471-05669-3.
"""
self.means_ = _class_means(X, y)
self.covariance_ = _class_cov(X, y, self.priors_, shrinkage)
self.coef_ = linalg.lstsq(self.covariance_, self.means_.T)[0].T
self.intercept_ = (-0.5 * np.diag(np.dot(self.means_, self.coef_.T))
+ np.log(self.priors_))
def _solve_eigen(self, X, y, shrinkage):
"""Eigenvalue solver.
The eigenvalue solver computes the optimal solution of the Rayleigh
coefficient (basically the ratio of between class scatter to within
class scatter). This solver supports both classification and
dimensionality reduction (with optional shrinkage).
Parameters
----------
X : array-like, shape (n_samples, n_features)
Training data.
y : array-like, shape (n_samples,) or (n_samples, n_targets)
Target values.
shrinkage : string or float, optional
Shrinkage parameter, possible values:
- None: no shrinkage (default).
- 'auto': automatic shrinkage using the Ledoit-Wolf lemma.
- float between 0 and 1: fixed shrinkage constant.
Notes
-----
This solver is based on [1]_, section 3.8.3, pp. 121-124.
References
----------
.. [1] R. O. Duda, P. E. Hart, D. G. Stork. Pattern Classification
(Second Edition). John Wiley & Sons, Inc., New York, 2001. ISBN
0-471-05669-3.
"""
self.means_ = _class_means(X, y)
self.covariance_ = _class_cov(X, y, self.priors_, shrinkage)
Sw = self.covariance_ # within scatter
St = _cov(X, shrinkage) # total scatter
Sb = St - Sw # between scatter
evals, evecs = linalg.eigh(Sb, Sw)
evecs = evecs[:, np.argsort(evals)[::-1]] # sort eigenvectors
# evecs /= np.linalg.norm(evecs, axis=0) # doesn't work with numpy 1.6
evecs /= np.apply_along_axis(np.linalg.norm, 0, evecs)
self.scalings_ = evecs
self.coef_ = np.dot(self.means_, evecs).dot(evecs.T)
self.intercept_ = (-0.5 * np.diag(np.dot(self.means_, self.coef_.T))
+ np.log(self.priors_))
def _solve_svd(self, X, y, store_covariance=False, tol=1.0e-4):
"""SVD solver.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Training data.
y : array-like, shape (n_samples,) or (n_samples, n_targets)
Target values.
store_covariance : bool, optional
Additionally compute class covariance matrix (default False).
tol : float, optional
Threshold used for rank estimation.
"""
n_samples, n_features = X.shape
n_classes = len(self.classes_)
self.means_ = _class_means(X, y)
if store_covariance:
self.covariance_ = _class_cov(X, y, self.priors_)
Xc = []
for idx, group in enumerate(self.classes_):
Xg = X[y == group, :]
Xc.append(Xg - self.means_[idx])
self.xbar_ = np.dot(self.priors_, self.means_)
Xc = np.concatenate(Xc, axis=0)
# 1) within (univariate) scaling by with classes std-dev
std = Xc.std(axis=0)
# avoid division by zero in normalization
std[std == 0] = 1.
fac = 1. / (n_samples - n_classes)
# 2) Within variance scaling
X = np.sqrt(fac) * (Xc / std)
# SVD of centered (within)scaled data
U, S, V = linalg.svd(X, full_matrices=False)
rank = np.sum(S > tol)
if rank < n_features:
warnings.warn("Variables are collinear.")
# Scaling of within covariance is: V' 1/S
scalings = (V[:rank] / std).T / S[:rank]
# 3) Between variance scaling
# Scale weighted centers
X = np.dot(((np.sqrt((n_samples * self.priors_) * fac)) *
(self.means_ - self.xbar_).T).T, scalings)
# Centers are living in a space with n_classes-1 dim (maximum)
# Use SVD to find projection in the space spanned by the
# (n_classes) centers
_, S, V = linalg.svd(X, full_matrices=0)
rank = np.sum(S > tol * S[0])
self.scalings_ = np.dot(scalings, V.T[:, :rank])
coef = np.dot(self.means_ - self.xbar_, self.scalings_)
self.intercept_ = (-0.5 * np.sum(coef ** 2, axis=1)
+ np.log(self.priors_))
self.coef_ = np.dot(coef, self.scalings_.T)
self.intercept_ -= np.dot(self.xbar_, self.coef_.T)
def fit(self, X, y, store_covariance=False, tol=1.0e-4):
"""Fit LDA model according to the given training data and parameters.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Training data.
y : array, shape (n_samples,)
Target values.
"""
if store_covariance:
warnings.warn("'store_covariance' was moved to the __init__()"
"method in version 0.16 and will be removed from"
"fit() in version 0.18.", DeprecationWarning)
else:
store_covariance = self.store_covariance
if tol != 1.0e-4:
warnings.warn("'tol' was moved to __init__() method in version"
" 0.16 and will be removed from fit() in 0.18",
DeprecationWarning)
self.tol = tol
X, y = check_X_y(X, y)
self.classes_ = unique_labels(y)
if self.priors is None: # estimate priors from sample
_, y_t = np.unique(y, return_inverse=True) # non-negative ints
self.priors_ = bincount(y_t) / float(len(y))
else:
self.priors_ = self.priors
if self.solver == 'svd':
if self.shrinkage is not None:
raise NotImplementedError('shrinkage not supported')
self._solve_svd(X, y, store_covariance=store_covariance, tol=tol)
elif self.solver == 'lsqr':
self._solve_lsqr(X, y, shrinkage=self.shrinkage)
elif self.solver == 'eigen':
self._solve_eigen(X, y, shrinkage=self.shrinkage)
else:
raise ValueError("unknown solver {} (valid solvers are 'svd', "
"'lsqr', and 'eigen').".format(self.solver))
if self.classes_.size == 2: # treat binary case as a special case
self.coef_ = np.array(self.coef_[1, :] - self.coef_[0, :], ndmin=2)
self.intercept_ = np.array(self.intercept_[1] - self.intercept_[0],
ndmin=1)
return self
def transform(self, X):
"""Project data to maximize class separation.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Input data.
Returns
-------
X_new : array, shape (n_samples, n_components)
Transformed data.
"""
if self.solver == 'lsqr':
raise NotImplementedError("transform not implemented for 'lsqr' "
"solver (use 'svd' or 'eigen').")
check_is_fitted(self, ['xbar_', 'scalings_'], all_or_any=any)
X = check_array(X)
if self.solver == 'svd':
X_new = np.dot(X - self.xbar_, self.scalings_)
elif self.solver == 'eigen':
X_new = np.dot(X, self.scalings_)
n_components = X.shape[1] if self.n_components is None \
else self.n_components
return X_new[:, :n_components]
def predict_proba(self, X):
"""Estimate probability.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Input data.
Returns
-------
C : array, shape (n_samples, n_classes)
Estimated probabilities.
"""
prob = self.decision_function(X)
prob *= -1
np.exp(prob, prob)
prob += 1
np.reciprocal(prob, prob)
if len(self.classes_) == 2: # binary case
return np.column_stack([1 - prob, prob])
else:
# OvR normalization, like LibLinear's predict_probability
prob /= prob.sum(axis=1).reshape((prob.shape[0], -1))
return prob
def predict_log_proba(self, X):
"""Estimate log probability.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Input data.
Returns
-------
C : array, shape (n_samples, n_classes)
Estimated log probabilities.
"""
return np.log(self.predict_proba(X))
|
bsd-3-clause
|
1iyiwei/pyml
|
code/optional-py-scripts/ch10.py
|
2
|
14374
|
# Sebastian Raschka, 2015 (http://sebastianraschka.com)
# Python Machine Learning - Code Examples
#
# Chapter 10 - Predicting Continuous Target Variables with Regression Analysis
#
# S. Raschka. Python Machine Learning. Packt Publishing Ltd., 2015.
# GitHub Repo: https://github.com/rasbt/python-machine-learning-book
#
# License: MIT
# https://github.com/rasbt/python-machine-learning-book/blob/master/LICENSE.txt
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
from sklearn.preprocessing import StandardScaler
from sklearn.linear_model import LinearRegression
from sklearn.linear_model import RANSACRegressor
from sklearn.cross_validation import train_test_split
from sklearn.metrics import r2_score
from sklearn.metrics import mean_squared_error
from sklearn.linear_model import Lasso
from sklearn.preprocessing import PolynomialFeatures
from sklearn.tree import DecisionTreeRegressor
from sklearn.ensemble import RandomForestRegressor
# Added version check for recent scikit-learn 0.18 checks
from distutils.version import LooseVersion as Version
from sklearn import __version__ as sklearn_version
if Version(sklearn_version) < '0.18':
from sklearn.cross_validation import train_test_split
else:
from sklearn.model_selection import train_test_split
#############################################################################
print(50 * '=')
print('Section: Exploring the Housing dataset')
print(50 * '-')
df = pd.read_csv('https://archive.ics.uci.edu/ml/machine-learning-databases/'
'housing/housing.data',
header=None,
sep='\s+')
df.columns = ['CRIM', 'ZN', 'INDUS', 'CHAS',
'NOX', 'RM', 'AGE', 'DIS', 'RAD',
'TAX', 'PTRATIO', 'B', 'LSTAT', 'MEDV']
print('Dataset excerpt:\n\n', df.head())
#############################################################################
print(50 * '=')
print('Section: Visualizing the important characteristics of a dataset')
print(50 * '-')
sns.set(style='whitegrid', context='notebook')
cols = ['LSTAT', 'INDUS', 'NOX', 'RM', 'MEDV']
sns.pairplot(df[cols], size=2.5)
# plt.tight_layout()
# plt.savefig('./figures/scatter.png', dpi=300)
plt.show()
cm = np.corrcoef(df[cols].values.T)
sns.set(font_scale=1.5)
hm = sns.heatmap(cm,
cbar=True,
annot=True,
square=True,
fmt='.2f',
annot_kws={'size': 15},
yticklabels=cols,
xticklabels=cols)
# plt.tight_layout()
# plt.savefig('./figures/corr_mat.png', dpi=300)
plt.show()
sns.reset_orig()
#############################################################################
print(50 * '=')
print('Section: Solving regression for regression'
' parameters with gradient descent')
print(50 * '-')
class LinearRegressionGD(object):
def __init__(self, eta=0.001, n_iter=20):
self.eta = eta
self.n_iter = n_iter
def fit(self, X, y):
self.w_ = np.zeros(1 + X.shape[1])
self.cost_ = []
for i in range(self.n_iter):
output = self.net_input(X)
errors = (y - output)
self.w_[1:] += self.eta * X.T.dot(errors)
self.w_[0] += self.eta * errors.sum()
cost = (errors**2).sum() / 2.0
self.cost_.append(cost)
return self
def net_input(self, X):
return np.dot(X, self.w_[1:]) + self.w_[0]
def predict(self, X):
return self.net_input(X)
X = df[['RM']].values
y = df['MEDV'].values
sc_x = StandardScaler()
sc_y = StandardScaler()
X_std = sc_x.fit_transform(X)
y_std = sc_y.fit_transform(y)
lr = LinearRegressionGD()
lr.fit(X_std, y_std)
plt.plot(range(1, lr.n_iter+1), lr.cost_)
plt.ylabel('SSE')
plt.xlabel('Epoch')
# plt.tight_layout()
# plt.savefig('./figures/cost.png', dpi=300)
plt.show()
def lin_regplot(X, y, model):
plt.scatter(X, y, c='lightblue')
plt.plot(X, model.predict(X), color='red', linewidth=2)
return
lin_regplot(X_std, y_std, lr)
plt.xlabel('Average number of rooms [RM] (standardized)')
plt.ylabel('Price in $1000\'s [MEDV] (standardized)')
# plt.tight_layout()
# plt.savefig('./figures/gradient_fit.png', dpi=300)
plt.show()
print('Slope: %.3f' % lr.w_[1])
print('Intercept: %.3f' % lr.w_[0])
num_rooms_std = sc_x.transform([5.0])
price_std = lr.predict(num_rooms_std)
print("Price in $1000's: %.3f" % sc_y.inverse_transform(price_std))
#############################################################################
print(50 * '=')
print('Section: Estimating the coefficient of a'
' regression model via scikit-learn')
print(50 * '-')
slr = LinearRegression()
slr.fit(X, y)
y_pred = slr.predict(X)
print('Slope: %.3f' % slr.coef_[0])
print('Intercept: %.3f' % slr.intercept_)
lin_regplot(X, y, slr)
plt.xlabel('Average number of rooms [RM]')
plt.ylabel('Price in $1000\'s [MEDV]')
# plt.tight_layout()
# plt.savefig('./figures/scikit_lr_fit.png', dpi=300)
plt.show()
# adding a column vector of "ones"
Xb = np.hstack((np.ones((X.shape[0], 1)), X))
w = np.zeros(X.shape[1])
z = np.linalg.inv(np.dot(Xb.T, Xb))
w = np.dot(z, np.dot(Xb.T, y))
print('Slope: %.3f' % w[1])
print('Intercept: %.3f' % w[0])
#############################################################################
print(50 * '=')
print('Section: Fitting a robust regression model using RANSAC')
print(50 * '-')
if Version(sklearn_version) < '0.18':
ransac = RANSACRegressor(LinearRegression(),
max_trials=100,
min_samples=50,
residual_metric=lambda x: np.sum(np.abs(x), axis=1),
residual_threshold=5.0,
random_state=0)
else:
ransac = RANSACRegressor(LinearRegression(),
max_trials=100,
min_samples=50,
loss='absolute_loss',
residual_threshold=5.0,
random_state=0)
ransac.fit(X, y)
inlier_mask = ransac.inlier_mask_
outlier_mask = np.logical_not(inlier_mask)
line_X = np.arange(3, 10, 1)
line_y_ransac = ransac.predict(line_X[:, np.newaxis])
plt.scatter(X[inlier_mask], y[inlier_mask],
c='blue', marker='o', label='Inliers')
plt.scatter(X[outlier_mask], y[outlier_mask],
c='lightgreen', marker='s', label='Outliers')
plt.plot(line_X, line_y_ransac, color='red')
plt.xlabel('Average number of rooms [RM]')
plt.ylabel('Price in $1000\'s [MEDV]')
plt.legend(loc='upper left')
# plt.tight_layout()
# plt.savefig('./figures/ransac_fit.png', dpi=300)
plt.show()
print('Slope: %.3f' % ransac.estimator_.coef_[0])
print('Intercept: %.3f' % ransac.estimator_.intercept_)
#############################################################################
print(50 * '=')
print('Section: Evaluating the performance of linear regression models')
print(50 * '-')
X = df.iloc[:, :-1].values
y = df['MEDV'].values
X_train, X_test, y_train, y_test = train_test_split(
X, y, test_size=0.3, random_state=0)
slr = LinearRegression()
slr.fit(X_train, y_train)
y_train_pred = slr.predict(X_train)
y_test_pred = slr.predict(X_test)
plt.scatter(y_train_pred, y_train_pred - y_train,
c='blue', marker='o', label='Training data')
plt.scatter(y_test_pred, y_test_pred - y_test,
c='lightgreen', marker='s', label='Test data')
plt.xlabel('Predicted values')
plt.ylabel('Residuals')
plt.legend(loc='upper left')
plt.hlines(y=0, xmin=-10, xmax=50, lw=2, color='red')
plt.xlim([-10, 50])
# plt.tight_layout()
# plt.savefig('./figures/slr_residuals.png', dpi=300)
plt.show()
print('MSE train: %.3f, test: %.3f' % (
mean_squared_error(y_train, y_train_pred),
mean_squared_error(y_test, y_test_pred)))
print('R^2 train: %.3f, test: %.3f' % (
r2_score(y_train, y_train_pred),
r2_score(y_test, y_test_pred)))
#############################################################################
print(50 * '=')
print('Section: Using regularized methods for regression')
print(50 * '-')
print('LASSO Coefficients')
lasso = Lasso(alpha=0.1)
lasso.fit(X_train, y_train)
y_train_pred = lasso.predict(X_train)
y_test_pred = lasso.predict(X_test)
print(lasso.coef_)
print('MSE train: %.3f, test: %.3f' % (
mean_squared_error(y_train, y_train_pred),
mean_squared_error(y_test, y_test_pred)))
print('R^2 train: %.3f, test: %.3f' % (
r2_score(y_train, y_train_pred),
r2_score(y_test, y_test_pred)))
#############################################################################
print(50 * '=')
print('Section: Turning a linear regression model into a curve'
' - polynomial regression')
print(50 * '-')
X = np.array([258.0, 270.0, 294.0,
320.0, 342.0, 368.0,
396.0, 446.0, 480.0, 586.0])[:, np.newaxis]
y = np.array([236.4, 234.4, 252.8,
298.6, 314.2, 342.2,
360.8, 368.0, 391.2,
390.8])
lr = LinearRegression()
pr = LinearRegression()
quadratic = PolynomialFeatures(degree=2)
X_quad = quadratic.fit_transform(X)
# fit linear features
lr.fit(X, y)
X_fit = np.arange(250, 600, 10)[:, np.newaxis]
y_lin_fit = lr.predict(X_fit)
# fit quadratic features
pr.fit(X_quad, y)
y_quad_fit = pr.predict(quadratic.fit_transform(X_fit))
# plot results
plt.scatter(X, y, label='training points')
plt.plot(X_fit, y_lin_fit, label='linear fit', linestyle='--')
plt.plot(X_fit, y_quad_fit, label='quadratic fit')
plt.legend(loc='upper left')
# plt.tight_layout()
# plt.savefig('./figures/poly_example.png', dpi=300)
plt.show()
y_lin_pred = lr.predict(X)
y_quad_pred = pr.predict(X_quad)
print('Training MSE linear: %.3f, quadratic: %.3f' % (
mean_squared_error(y, y_lin_pred),
mean_squared_error(y, y_quad_pred)))
print('Training R^2 linear: %.3f, quadratic: %.3f' % (
r2_score(y, y_lin_pred),
r2_score(y, y_quad_pred)))
#############################################################################
print(50 * '=')
print('Section: Modeling nonlinear relationships in the Housing Dataset')
print(50 * '-')
X = df[['LSTAT']].values
y = df['MEDV'].values
regr = LinearRegression()
# create quadratic features
quadratic = PolynomialFeatures(degree=2)
cubic = PolynomialFeatures(degree=3)
X_quad = quadratic.fit_transform(X)
X_cubic = cubic.fit_transform(X)
# fit features
X_fit = np.arange(X.min(), X.max(), 1)[:, np.newaxis]
regr = regr.fit(X, y)
y_lin_fit = regr.predict(X_fit)
linear_r2 = r2_score(y, regr.predict(X))
regr = regr.fit(X_quad, y)
y_quad_fit = regr.predict(quadratic.fit_transform(X_fit))
quadratic_r2 = r2_score(y, regr.predict(X_quad))
regr = regr.fit(X_cubic, y)
y_cubic_fit = regr.predict(cubic.fit_transform(X_fit))
cubic_r2 = r2_score(y, regr.predict(X_cubic))
# plot results
plt.scatter(X, y, label='training points', color='lightgray')
plt.plot(X_fit, y_lin_fit,
label='linear (d=1), $R^2=%.2f$' % linear_r2,
color='blue',
lw=2,
linestyle=':')
plt.plot(X_fit, y_quad_fit,
label='quadratic (d=2), $R^2=%.2f$' % quadratic_r2,
color='red',
lw=2,
linestyle='-')
plt.plot(X_fit, y_cubic_fit,
label='cubic (d=3), $R^2=%.2f$' % cubic_r2,
color='green',
lw=2,
linestyle='--')
plt.xlabel('% lower status of the population [LSTAT]')
plt.ylabel('Price in $1000\'s [MEDV]')
plt.legend(loc='upper right')
# plt.tight_layout()
# plt.savefig('./figures/polyhouse_example.png', dpi=300)
plt.show()
print('Transforming the dataset')
X = df[['LSTAT']].values
y = df['MEDV'].values
# transform features
X_log = np.log(X)
y_sqrt = np.sqrt(y)
# fit features
X_fit = np.arange(X_log.min()-1, X_log.max()+1, 1)[:, np.newaxis]
regr = regr.fit(X_log, y_sqrt)
y_lin_fit = regr.predict(X_fit)
linear_r2 = r2_score(y_sqrt, regr.predict(X_log))
# plot results
plt.scatter(X_log, y_sqrt, label='training points', color='lightgray')
plt.plot(X_fit, y_lin_fit,
label='linear (d=1), $R^2=%.2f$' % linear_r2,
color='blue',
lw=2)
plt.xlabel('log(% lower status of the population [LSTAT])')
plt.ylabel('$\sqrt{Price \; in \; \$1000\'s [MEDV]}$')
plt.legend(loc='lower left')
# plt.tight_layout()
# plt.savefig('./figures/transform_example.png', dpi=300)
plt.show()
#############################################################################
print(50 * '=')
print('Section: Decision tree regression')
print(50 * '-')
X = df[['LSTAT']].values
y = df['MEDV'].values
tree = DecisionTreeRegressor(max_depth=3)
tree.fit(X, y)
sort_idx = X.flatten().argsort()
lin_regplot(X[sort_idx], y[sort_idx], tree)
plt.xlabel('% lower status of the population [LSTAT]')
plt.ylabel('Price in $1000\'s [MEDV]')
# plt.savefig('./figures/tree_regression.png', dpi=300)
plt.show()
#############################################################################
print(50 * '=')
print('Section: Random forest regression')
print(50 * '-')
X = df.iloc[:, :-1].values
y = df['MEDV'].values
X_train, X_test, y_train, y_test = train_test_split(
X, y, test_size=0.4, random_state=1)
forest = RandomForestRegressor(n_estimators=1000,
criterion='mse',
random_state=1,
n_jobs=-1)
forest.fit(X_train, y_train)
y_train_pred = forest.predict(X_train)
y_test_pred = forest.predict(X_test)
print('MSE train: %.3f, test: %.3f' % (
mean_squared_error(y_train, y_train_pred),
mean_squared_error(y_test, y_test_pred)))
print('R^2 train: %.3f, test: %.3f' % (
r2_score(y_train, y_train_pred),
r2_score(y_test, y_test_pred)))
plt.scatter(y_train_pred,
y_train_pred - y_train,
c='black',
marker='o',
s=35,
alpha=0.5,
label='Training data')
plt.scatter(y_test_pred,
y_test_pred - y_test,
c='lightgreen',
marker='s',
s=35,
alpha=0.7,
label='Test data')
plt.xlabel('Predicted values')
plt.ylabel('Residuals')
plt.legend(loc='upper left')
plt.hlines(y=0, xmin=-10, xmax=50, lw=2, color='red')
plt.xlim([-10, 50])
# plt.tight_layout()
# plt.savefig('./figures/slr_residuals.png', dpi=300)
plt.show()
|
mit
|
glouppe/scikit-learn
|
examples/feature_selection/plot_feature_selection.py
|
95
|
2847
|
"""
===============================
Univariate Feature Selection
===============================
An example showing univariate feature selection.
Noisy (non informative) features are added to the iris data and
univariate feature selection is applied. For each feature, we plot the
p-values for the univariate feature selection and the corresponding
weights of an SVM. We can see that univariate feature selection
selects the informative features and that these have larger SVM weights.
In the total set of features, only the 4 first ones are significant. We
can see that they have the highest score with univariate feature
selection. The SVM assigns a large weight to one of these features, but also
Selects many of the non-informative features.
Applying univariate feature selection before the SVM
increases the SVM weight attributed to the significant features, and will
thus improve classification.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from sklearn import datasets, svm
from sklearn.feature_selection import SelectPercentile, f_classif
###############################################################################
# import some data to play with
# The iris dataset
iris = datasets.load_iris()
# Some noisy data not correlated
E = np.random.uniform(0, 0.1, size=(len(iris.data), 20))
# Add the noisy data to the informative features
X = np.hstack((iris.data, E))
y = iris.target
###############################################################################
plt.figure(1)
plt.clf()
X_indices = np.arange(X.shape[-1])
###############################################################################
# Univariate feature selection with F-test for feature scoring
# We use the default selection function: the 10% most significant features
selector = SelectPercentile(f_classif, percentile=10)
selector.fit(X, y)
scores = -np.log10(selector.pvalues_)
scores /= scores.max()
plt.bar(X_indices - .45, scores, width=.2,
label=r'Univariate score ($-Log(p_{value})$)', color='darkorange')
###############################################################################
# Compare to the weights of an SVM
clf = svm.SVC(kernel='linear')
clf.fit(X, y)
svm_weights = (clf.coef_ ** 2).sum(axis=0)
svm_weights /= svm_weights.max()
plt.bar(X_indices - .25, svm_weights, width=.2, label='SVM weight',
color='navy')
clf_selected = svm.SVC(kernel='linear')
clf_selected.fit(selector.transform(X), y)
svm_weights_selected = (clf_selected.coef_ ** 2).sum(axis=0)
svm_weights_selected /= svm_weights_selected.max()
plt.bar(X_indices[selector.get_support()] - .05, svm_weights_selected,
width=.2, label='SVM weights after selection', color='c')
plt.title("Comparing feature selection")
plt.xlabel('Feature number')
plt.yticks(())
plt.axis('tight')
plt.legend(loc='upper right')
plt.show()
|
bsd-3-clause
|
AnasGhrab/scikit-learn
|
examples/model_selection/grid_search_text_feature_extraction.py
|
253
|
4158
|
"""
==========================================================
Sample pipeline for text feature extraction and evaluation
==========================================================
The dataset used in this example is the 20 newsgroups dataset which will be
automatically downloaded and then cached and reused for the document
classification example.
You can adjust the number of categories by giving their names to the dataset
loader or setting them to None to get the 20 of them.
Here is a sample output of a run on a quad-core machine::
Loading 20 newsgroups dataset for categories:
['alt.atheism', 'talk.religion.misc']
1427 documents
2 categories
Performing grid search...
pipeline: ['vect', 'tfidf', 'clf']
parameters:
{'clf__alpha': (1.0000000000000001e-05, 9.9999999999999995e-07),
'clf__n_iter': (10, 50, 80),
'clf__penalty': ('l2', 'elasticnet'),
'tfidf__use_idf': (True, False),
'vect__max_n': (1, 2),
'vect__max_df': (0.5, 0.75, 1.0),
'vect__max_features': (None, 5000, 10000, 50000)}
done in 1737.030s
Best score: 0.940
Best parameters set:
clf__alpha: 9.9999999999999995e-07
clf__n_iter: 50
clf__penalty: 'elasticnet'
tfidf__use_idf: True
vect__max_n: 2
vect__max_df: 0.75
vect__max_features: 50000
"""
# Author: Olivier Grisel <[email protected]>
# Peter Prettenhofer <[email protected]>
# Mathieu Blondel <[email protected]>
# License: BSD 3 clause
from __future__ import print_function
from pprint import pprint
from time import time
import logging
from sklearn.datasets import fetch_20newsgroups
from sklearn.feature_extraction.text import CountVectorizer
from sklearn.feature_extraction.text import TfidfTransformer
from sklearn.linear_model import SGDClassifier
from sklearn.grid_search import GridSearchCV
from sklearn.pipeline import Pipeline
print(__doc__)
# Display progress logs on stdout
logging.basicConfig(level=logging.INFO,
format='%(asctime)s %(levelname)s %(message)s')
###############################################################################
# Load some categories from the training set
categories = [
'alt.atheism',
'talk.religion.misc',
]
# Uncomment the following to do the analysis on all the categories
#categories = None
print("Loading 20 newsgroups dataset for categories:")
print(categories)
data = fetch_20newsgroups(subset='train', categories=categories)
print("%d documents" % len(data.filenames))
print("%d categories" % len(data.target_names))
print()
###############################################################################
# define a pipeline combining a text feature extractor with a simple
# classifier
pipeline = Pipeline([
('vect', CountVectorizer()),
('tfidf', TfidfTransformer()),
('clf', SGDClassifier()),
])
# uncommenting more parameters will give better exploring power but will
# increase processing time in a combinatorial way
parameters = {
'vect__max_df': (0.5, 0.75, 1.0),
#'vect__max_features': (None, 5000, 10000, 50000),
'vect__ngram_range': ((1, 1), (1, 2)), # unigrams or bigrams
#'tfidf__use_idf': (True, False),
#'tfidf__norm': ('l1', 'l2'),
'clf__alpha': (0.00001, 0.000001),
'clf__penalty': ('l2', 'elasticnet'),
#'clf__n_iter': (10, 50, 80),
}
if __name__ == "__main__":
# multiprocessing requires the fork to happen in a __main__ protected
# block
# find the best parameters for both the feature extraction and the
# classifier
grid_search = GridSearchCV(pipeline, parameters, n_jobs=-1, verbose=1)
print("Performing grid search...")
print("pipeline:", [name for name, _ in pipeline.steps])
print("parameters:")
pprint(parameters)
t0 = time()
grid_search.fit(data.data, data.target)
print("done in %0.3fs" % (time() - t0))
print()
print("Best score: %0.3f" % grid_search.best_score_)
print("Best parameters set:")
best_parameters = grid_search.best_estimator_.get_params()
for param_name in sorted(parameters.keys()):
print("\t%s: %r" % (param_name, best_parameters[param_name]))
|
bsd-3-clause
|
kbase/data_api
|
lib/doekbase/data_api/interactive/display.py
|
5
|
12947
|
"""
Objects for displaying the results in the IPython notebook.
"""
__author__ = 'Dan Gunter <[email protected]>'
__date__ = '8/1/15'
# Stdlib
import logging
# Third-party
from IPython.display import display
import matplotlib.pyplot as plt
import pandas as pd
# Qgrid table display
try:
import qgrid
qgrid.nbinstall()
except ImportError:
qgrid = None
from jinja2 import Template
# Seaborn graphing
try:
import seaborn as sns
sns.set_style("whitegrid")
except ImportError:
sns = None
# Local
from doekbase.data_api.util import get_logger, log_start, log_end
_log = get_logger('doekbase.data_api.display')
_nbviewer = False
def nbviewer_mode(value=None):
"""Get/set the global nbviewer-friendly mode.
This is currently used to tell qgrid where to get
its JavaScript from (local or a CDN).
"""
global _nbviewer
if value is not None:
_nbviewer = bool(value)
return _nbviewer
class Table(pd.DataFrame):
"""Create a Table from the input data.
This is a thin wrapper around the Pandas DataFrame object.
"""
def _ipython_display(self):
if qgrid:
return qgrid.show_grid(self, remote_js=nbviewer_mode())
else:
print
return display(self)
class Contigs(Table):
def __init__(self, contigs):
"""Create contigset from list of strings.
Args:
contigs: List of strings for contigs
"""
Table.__init__(self, {'ids': contigs})
class TemplateMixin(object):
template = ''
def __init__(self):
self._template = Template(self.template)
def render(self, *args, **kwargs):
return self._template.render(*args, **kwargs)
class Classification(TemplateMixin):
"""Taxonomic classification.
Attributes:
taxon (TaxonAPI): base taxon
name (str): Scientific name
children (list of TaxonAPI): List of TaxonAPI objects
parents (list of Taxon
"""
template = '''{% for name in classification %}
<span style="margin-left: {{ loop.index0 * 10 }}px">
<span style="font-size: 50%">></span> {{ name }}
</span><br>{% endfor %}'''
def __init__(self, obj):
"""Create from a taxon.
Args:
obj: TaxonAPI object or object with `get_taxon`.
"""
TemplateMixin.__init__(self)
self.taxon = obj.get_taxon() if hasattr(obj, 'get_taxon') else obj
self.classification = self.taxon.get_scientific_lineage().split(';')
self.name = self.taxon.get_scientific_name()
# self.children = self.taxon.get_children() or []
# tx, self.parents = self.taxon, []
# while tx:
# tx = tx.get_parent()
# if tx:
# self.parents.insert(tx.get_scientific_name(), 0)
# self.classification = self.parents + [self.name] + [
# child.get_scientific_name() for child in self.children]
def _repr_html_(self):
return self.render(classification=self.classification)
class Organism(TemplateMixin):
"""Summary of an organism as per ENSEMBL page, from
a TaxonAPI.
Attributes:
taxon (TaxonAPI): Taxon with info for organism
"""
template = '''<b>Taxonomy ID</b>: {{taxon.get_taxonomic_id()}}<br>
<b>Name</b>: {{taxon.get_scientific_name()}}<br>
<b>Aliases</b>:<br>
{% for a in taxon.get_aliases() %}
{{ a }}<br>
{% endfor %}
<b>Classification</b>:<br>''' + \
Classification.template
def __init__(self, obj):
"""Create from an API object.
Args:
obj: TaxonAPI object or object with `get_taxon`.
"""
TemplateMixin.__init__(self)
self.taxon = obj.get_taxon() if hasattr(obj, 'get_taxon') else obj
def _repr_html_(self):
if self.taxon is None:
return None
classf = Classification(self.taxon).classification
return self.render(classification=classf, taxon=self.taxon)
class AssemblyInfo(TemplateMixin):
"""Get information about assembly.
Attributes:
stats (dict): Statistics from `AssemblyAPI.get_stats()`
"""
template = '''<b>GC content</b>: {{gc_content}}<br>
<b>Total DNA sequence length</b>:{{dna_size}}<br>
<b>Number of contigs</b>:{{num_contigs}}'''
def __init__(self, obj):
"""Create assembly info.
Args:
obj: AssemblyAPI or object with `get_assembly` method.
"""
TemplateMixin.__init__(self)
if hasattr(obj, 'get_assembly'):
self.assembly = obj.get_assembly()
else:
self.assembly = obj
self.stats = self.assembly.get_stats()
def _repr_html_(self):
return self.render(self.stats)
class FeatureStats(Table):
"""Feature information for genomes
"""
def __init__(self, ga):
"""Create from a genome.
Args:
ga: GenomeAnnotation object
"""
data = []
for feature in ga.get_feature_types(): # all feature types
count = 0
# get lists of positions for each feature_id
feature_id_lists = ga.get_feature_ids_by_type([feature])
for fi, values in feature_id_lists.items():
count += len(values)
data.append((feature, count))
Table.__init__(self, data, columns=('feature_type', 'count'))
class FeaturePositions(Table):
"""The position (and ID and type) of features in the genome.
"""
def __init__(self, ga):
"""Create from a genome.
Args:
ga: GenomeAnnotation object
"""
data = self._get_features(ga)
Table.__init__(self, data, columns=('type', 'id', 'start', 'len', 'dir'))
def _get_features(self, ga):
"This should probably move into genome_annotation module"
from doekbase.data_api.object import ObjectAPI
fcr = 'feature_container_references'
refs = ga.get_data_subset(path_list=[fcr])[fcr]
result = []
for ref in refs.values():
obj = ObjectAPI(ga.services, ref) # fetch data
features = obj.get_data()['features']
for feat_id in features.keys(): # iterate features
ftype = features[feat_id]['type']
for loc in features[feat_id]['locations']:
# biuld an output row and add to result
row = (ftype, feat_id, loc[1], loc[3], loc[2])
result.append(row)
return result
def stripplot(self):
"""Make a 'stripplot' of all feature positions.
Requires the 'seaborn' library
"""
if sns is None:
raise NotImplementedError('Requires the "seaborn" library. See: '
'https://github.com/mwaskom/seaborn')
ax = sns.stripplot(x='start', y='type', data=self)
# get rid of spurious negative tick
ax.set_xlim(0, ax.get_xlim()[1])
return ax
class ProteinStats(Table):
"""Various statistics for proteins.
"""
STATS_LENGTH = 'length'
def __init__(self, ga, stats=[STATS_LENGTH]):
"""Create from a genome.
Args:
ga: GenomeAnnotation object
"""
proteins = ga.get_proteins()
data = {}
if self.STATS_LENGTH in stats:
data[self.STATS_LENGTH] = [
len(v['amino_acid_sequence'])
for v in proteins.values()]
Table.__init__(self, data)
def plot_protein_lengths(self):
return self.plot(x=self.STATS_LENGTH, kind='hist')
class GenomeSummary(TemplateMixin):
"""Summary of a GenomeAnnotation.
Attributes:
taxon (dict): Information about the taxonomic type
assembly (dict): Infomration about the contigs in the assembly
annotation (dict): Information about the assembly
data (dict): All the information as a single dict with the attributes
listed above as top-level keys.
"""
template = '<h3>Genome Summary</h3>'+ Organism.template
def __init__(self, ga, taxons=True, assembly=True, annotation=True):
"""Create new summary from GenomeAnnotation.
Args:
ga (GenomeAnnotation): input object
taxons: If False, do not retrieve taxons
assembly: If False, do not retrieve assembly
annotation: If False, do not retrieve annotation
"""
if not hasattr(ga, 'get_taxon') or not hasattr(ga, 'get_assembly'):
raise TypeError('{} is not a recognized GenomeAnnotation type.'
.format(type(ga)))
self.data = { 'taxon': {}, 'assembly': {}, 'annotation': {}}
if taxons:
self.data['taxon'] = self._get_taxon(ga)
if assembly:
self.data['assembly'] = self._get_assembly(ga)
if annotation:
self.data['annotation'] = self._get_annotation(ga)
self.ga = ga
self._set_attrs()
def _set_attrs(self):
"""Set attributes for top-level keys"""
for key, value in self.data.items():
setattr(self, key, value)
TemplateMixin.__init__(self)
@staticmethod
def _get_taxon(ga):
t0 = log_start(_log, 'get_taxon')
try:
taxon = ga.get_taxon()
except Exception as err:
raise RuntimeError('Cannot get taxon: {}'.format(err))
txn = { k: getattr(taxon, 'get_' + k)()
for k in ('taxonomic_id', 'kingdom', 'domain',
'genetic_code', 'scientific_name', 'aliases',
'scientific_lineage')}
txn['lineage_list'] = txn['scientific_lineage'].split(';')
log_end(_log, t0, 'get_taxon')
return txn
@staticmethod
def _get_assembly(ga):
t0 = log_start(_log, 'get_assembly')
try:
assembly = ga.get_assembly()
except Exception as err:
raise RuntimeError('Cannot get assembly: {}'.format(err))
asy = {
k1: getattr(assembly, 'get_' + k2)()
for k1, k2 in (
('number_of_contigs', 'number_contigs'),
('total_length', 'dna_size'),
('total_gc_content', 'gc_content'),
('contig_length', 'contig_lengths'),
('contig_gc_content', 'contig_gc_content')
)}
log_end(_log, t0, 'get_assembly')
return asy
@staticmethod
def _get_annotation(ga):
t0 = log_start(_log, 'get_annotation')
try:
feature_types = ga.get_feature_types()
except Exception as err:
raise RuntimeError('Cannot get feature_types: {}'.format(err))
ann = { 'feature_' + k: getattr(ga, 'get_feature_' + k)(feature_types)
for k in ('type_descriptions', 'type_counts')}
ann['feature_types'] = feature_types
log_end(_log, t0, 'get_annotation')
return ann
def summary_plots(self):
"""Show some plots summarizing the information in the Genome.
"""
# First plot: feature types and counts
n = sum(map(bool, self.data.keys()))
i = 1
plt.close()
if self.annotation:
plt.subplot(n, 1, i)
self._plot_feature_counts()
i += 1
# Second plot
if self.assembly:
plt.subplot(n, 1, i)
self._plot_contig_lengths()
i += 1
# Third plot
if self.assembly:
plt.subplot(n, 1, i)
self._plot_contig_gc()
i += 1
plt.show()
def _plot_feature_counts(self):
d = pd.DataFrame({'Feature Type': self.annotation['feature_types'],
'Count': self.annotation['feature_type_counts']})
ax = sns.barplot(x='Count', y='Feature Type', orient='h', data=d)
ax.set_title('Feature type counts from {} to{}'.format(min(d['Count']),
max(d['Count'])))
def _plot_contig_lengths(self):
vals = pd.Series(self.assembly['contig_length'].values(),
name='Sequence length (bp)')
ax = sns.distplot(vals)
ax.set_title('Contig lengths from {} to {}'.format(
vals.min(), vals.max()))
def _plot_contig_gc(self):
gc = self.assembly['contig_gc_content'].values()
gcp, ctg = 'GC percent', 'Contigs'
d = pd.DataFrame({gcp: [x*100.0 for x in sorted(gc)],
ctg: range(1, len(gc) + 1)})
ax = sns.factorplot(x=gcp, y=ctg, data=d)
#ax.set_title("Contig {} from {.2f} to {.2f}"
# .format(gcp, min(gc), max(gc)))
return ax
def _repr_html_(self):
self.summary_plots()
classf = Classification(self.taxon).classification
return self.render(classification=classf, taxon=self.taxon)
|
mit
|
stuart-knock/bokeh
|
bokeh/charts/builder/tests/test_horizon_builder.py
|
33
|
3440
|
""" This is the Bokeh charts testing interface.
"""
#-----------------------------------------------------------------------------
# Copyright (c) 2012 - 2014, Continuum Analytics, Inc. All rights reserved.
#
# Powered by the Bokeh Development Team.
#
# The full license is in the file LICENSE.txt, distributed with this software.
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Imports
#-----------------------------------------------------------------------------
from __future__ import absolute_import
from collections import OrderedDict
import datetime
import unittest
from numpy.testing import assert_array_equal
import pandas as pd
from bokeh.charts import Horizon
from bokeh.charts.builder.tests._utils import create_chart
#-----------------------------------------------------------------------------
# Classes and functions
#-----------------------------------------------------------------------------
class TestHorizon(unittest.TestCase):
def test_supported_input(self):
now = datetime.datetime.now()
delta = datetime.timedelta(minutes=1)
dts = [now + delta*i for i in range(6)]
xyvalues = OrderedDict({'Date': dts})
# Repeat the starting and trailing points in order to
xyvalues['python'] = [-120, -120, -30, 50, 100, 103]
xyvalues['pypy'] = [-75, -75, -33, 15, 126, 126]
xyvaluesdf = pd.DataFrame(xyvalues)
groups = ['python', 'pypy']
for i, _xy in enumerate([xyvalues, xyvaluesdf]):
ts = create_chart(Horizon, _xy, index='Date')
builder = ts._builders[0]
padded_date = [x for x in _xy['Date']]
padded_date.insert(0, padded_date[0])
padded_date.append(padded_date[-1])
self.assertEqual(builder.num_folds, 3)
self.assertEqual(builder._series, groups)
self.assertEqual(builder._fold_height, 126.0 / 3)
self.assertEqual(builder._groups, ['42.0', '-42.0', '84.0', '-84.0', '126.0', '-126.0'])
assert_array_equal(builder._data['x_python'], padded_date)
assert_array_equal(builder._data['x_pypy'], padded_date)
assert_array_equal(builder._data['y_fold-3_python'], [63, 9, 9 ,63, 63, 63, 63, 63])
assert_array_equal(builder._data['y_fold-2_python'], [63, 0, 0, 63, 63, 63, 63, 63])
assert_array_equal(builder._data['y_fold-1_python'], [63, 0, 0, 18, 63, 63, 63, 63])
assert_array_equal(builder._data['y_fold1_python'], [0, 0, 0, 0, 63, 63, 63, 0])
assert_array_equal(builder._data['y_fold2_python'], [0, 0, 0, 0, 12, 63, 63, 0])
assert_array_equal(builder._data['y_fold3_python'], [0, 0, 0, 0, 0, 24, 28.5, 0])
assert_array_equal(builder._data['y_fold-3_pypy'], [126, 126, 126, 126, 126, 126, 126, 126])
assert_array_equal(builder._data['y_fold-2_pypy'], [126, 76.5, 76.5, 126, 126, 126, 126, 126])
assert_array_equal(builder._data['y_fold-1_pypy'], [126, 63, 63, 76.5, 126, 126, 126, 126])
assert_array_equal(builder._data['y_fold1_pypy'], [63, 63, 63, 63, 85.5, 126, 126, 63])
assert_array_equal(builder._data['y_fold2_pypy'], [63, 63, 63, 63, 63, 126, 126, 63])
assert_array_equal(builder._data['y_fold3_pypy'], [63, 63, 63, 63, 63, 126, 126, 63])
|
bsd-3-clause
|
huzq/scikit-learn
|
sklearn/manifold/tests/test_isomap.py
|
15
|
6512
|
from itertools import product
import numpy as np
from numpy.testing import assert_almost_equal, assert_array_almost_equal
import pytest
from sklearn import datasets
from sklearn import manifold
from sklearn import neighbors
from sklearn import pipeline
from sklearn import preprocessing
from scipy.sparse import rand as sparse_rand
eigen_solvers = ['auto', 'dense', 'arpack']
path_methods = ['auto', 'FW', 'D']
def test_isomap_simple_grid():
# Isomap should preserve distances when all neighbors are used
N_per_side = 5
Npts = N_per_side ** 2
n_neighbors = Npts - 1
# grid of equidistant points in 2D, n_components = n_dim
X = np.array(list(product(range(N_per_side), repeat=2)))
# distances from each point to all others
G = neighbors.kneighbors_graph(X, n_neighbors,
mode='distance').toarray()
for eigen_solver in eigen_solvers:
for path_method in path_methods:
clf = manifold.Isomap(n_neighbors=n_neighbors, n_components=2,
eigen_solver=eigen_solver,
path_method=path_method)
clf.fit(X)
G_iso = neighbors.kneighbors_graph(clf.embedding_,
n_neighbors,
mode='distance').toarray()
assert_array_almost_equal(G, G_iso)
def test_isomap_reconstruction_error():
# Same setup as in test_isomap_simple_grid, with an added dimension
N_per_side = 5
Npts = N_per_side ** 2
n_neighbors = Npts - 1
# grid of equidistant points in 2D, n_components = n_dim
X = np.array(list(product(range(N_per_side), repeat=2)))
# add noise in a third dimension
rng = np.random.RandomState(0)
noise = 0.1 * rng.randn(Npts, 1)
X = np.concatenate((X, noise), 1)
# compute input kernel
G = neighbors.kneighbors_graph(X, n_neighbors,
mode='distance').toarray()
centerer = preprocessing.KernelCenterer()
K = centerer.fit_transform(-0.5 * G ** 2)
for eigen_solver in eigen_solvers:
for path_method in path_methods:
clf = manifold.Isomap(n_neighbors=n_neighbors, n_components=2,
eigen_solver=eigen_solver,
path_method=path_method)
clf.fit(X)
# compute output kernel
G_iso = neighbors.kneighbors_graph(clf.embedding_,
n_neighbors,
mode='distance').toarray()
K_iso = centerer.fit_transform(-0.5 * G_iso ** 2)
# make sure error agrees
reconstruction_error = np.linalg.norm(K - K_iso) / Npts
assert_almost_equal(reconstruction_error,
clf.reconstruction_error())
def test_transform():
n_samples = 200
n_components = 10
noise_scale = 0.01
# Create S-curve dataset
X, y = datasets.make_s_curve(n_samples, random_state=0)
# Compute isomap embedding
iso = manifold.Isomap(n_components=n_components, n_neighbors=2)
X_iso = iso.fit_transform(X)
# Re-embed a noisy version of the points
rng = np.random.RandomState(0)
noise = noise_scale * rng.randn(*X.shape)
X_iso2 = iso.transform(X + noise)
# Make sure the rms error on re-embedding is comparable to noise_scale
assert np.sqrt(np.mean((X_iso - X_iso2) ** 2)) < 2 * noise_scale
def test_pipeline():
# check that Isomap works fine as a transformer in a Pipeline
# only checks that no error is raised.
# TODO check that it actually does something useful
X, y = datasets.make_blobs(random_state=0)
clf = pipeline.Pipeline(
[('isomap', manifold.Isomap()),
('clf', neighbors.KNeighborsClassifier())])
clf.fit(X, y)
assert .9 < clf.score(X, y)
def test_pipeline_with_nearest_neighbors_transformer():
# Test chaining NearestNeighborsTransformer and Isomap with
# neighbors_algorithm='precomputed'
algorithm = 'auto'
n_neighbors = 10
X, _ = datasets.make_blobs(random_state=0)
X2, _ = datasets.make_blobs(random_state=1)
# compare the chained version and the compact version
est_chain = pipeline.make_pipeline(
neighbors.KNeighborsTransformer(
n_neighbors=n_neighbors, algorithm=algorithm, mode='distance'),
manifold.Isomap(n_neighbors=n_neighbors, metric='precomputed'))
est_compact = manifold.Isomap(n_neighbors=n_neighbors,
neighbors_algorithm=algorithm)
Xt_chain = est_chain.fit_transform(X)
Xt_compact = est_compact.fit_transform(X)
assert_array_almost_equal(Xt_chain, Xt_compact)
Xt_chain = est_chain.transform(X2)
Xt_compact = est_compact.transform(X2)
assert_array_almost_equal(Xt_chain, Xt_compact)
def test_different_metric():
# Test that the metric parameters work correctly, and default to euclidean
def custom_metric(x1, x2):
return np.sqrt(np.sum(x1 ** 2 + x2 ** 2))
# metric, p, is_euclidean
metrics = [('euclidean', 2, True),
('manhattan', 1, False),
('minkowski', 1, False),
('minkowski', 2, True),
(custom_metric, 2, False)]
X, _ = datasets.make_blobs(random_state=0)
reference = manifold.Isomap().fit_transform(X)
for metric, p, is_euclidean in metrics:
embedding = manifold.Isomap(metric=metric, p=p).fit_transform(X)
if is_euclidean:
assert_array_almost_equal(embedding, reference)
else:
with pytest.raises(AssertionError, match='not almost equal'):
assert_array_almost_equal(embedding, reference)
def test_isomap_clone_bug():
# regression test for bug reported in #6062
model = manifold.Isomap()
for n_neighbors in [10, 15, 20]:
model.set_params(n_neighbors=n_neighbors)
model.fit(np.random.rand(50, 2))
assert (model.nbrs_.n_neighbors ==
n_neighbors)
def test_sparse_input():
X = sparse_rand(100, 3, density=0.1, format='csr')
# Should not error
for eigen_solver in eigen_solvers:
for path_method in path_methods:
clf = manifold.Isomap(n_components=2,
eigen_solver=eigen_solver,
path_method=path_method)
clf.fit(X)
|
bsd-3-clause
|
datapythonista/pandas
|
pandas/tests/frame/methods/test_get_numeric_data.py
|
4
|
3198
|
import numpy as np
import pandas as pd
from pandas import (
Categorical,
DataFrame,
Index,
Series,
Timestamp,
)
import pandas._testing as tm
from pandas.core.arrays import IntervalArray
class TestGetNumericData:
def test_get_numeric_data_preserve_dtype(self):
# get the numeric data
obj = DataFrame({"A": [1, "2", 3.0]})
result = obj._get_numeric_data()
expected = DataFrame(index=[0, 1, 2], dtype=object)
tm.assert_frame_equal(result, expected)
def test_get_numeric_data(self):
datetime64name = np.dtype("M8[ns]").name
objectname = np.dtype(np.object_).name
df = DataFrame(
{"a": 1.0, "b": 2, "c": "foo", "f": Timestamp("20010102")},
index=np.arange(10),
)
result = df.dtypes
expected = Series(
[
np.dtype("float64"),
np.dtype("int64"),
np.dtype(objectname),
np.dtype(datetime64name),
],
index=["a", "b", "c", "f"],
)
tm.assert_series_equal(result, expected)
df = DataFrame(
{
"a": 1.0,
"b": 2,
"c": "foo",
"d": np.array([1.0] * 10, dtype="float32"),
"e": np.array([1] * 10, dtype="int32"),
"f": np.array([1] * 10, dtype="int16"),
"g": Timestamp("20010102"),
},
index=np.arange(10),
)
result = df._get_numeric_data()
expected = df.loc[:, ["a", "b", "d", "e", "f"]]
tm.assert_frame_equal(result, expected)
only_obj = df.loc[:, ["c", "g"]]
result = only_obj._get_numeric_data()
expected = df.loc[:, []]
tm.assert_frame_equal(result, expected)
df = DataFrame.from_dict({"a": [1, 2], "b": ["foo", "bar"], "c": [np.pi, np.e]})
result = df._get_numeric_data()
expected = DataFrame.from_dict({"a": [1, 2], "c": [np.pi, np.e]})
tm.assert_frame_equal(result, expected)
df = result.copy()
result = df._get_numeric_data()
expected = df
tm.assert_frame_equal(result, expected)
def test_get_numeric_data_mixed_dtype(self):
# numeric and object columns
df = DataFrame(
{
"a": [1, 2, 3],
"b": [True, False, True],
"c": ["foo", "bar", "baz"],
"d": [None, None, None],
"e": [3.14, 0.577, 2.773],
}
)
result = df._get_numeric_data()
tm.assert_index_equal(result.columns, Index(["a", "b", "e"]))
def test_get_numeric_data_extension_dtype(self):
# GH#22290
df = DataFrame(
{
"A": pd.array([-10, np.nan, 0, 10, 20, 30], dtype="Int64"),
"B": Categorical(list("abcabc")),
"C": pd.array([0, 1, 2, 3, np.nan, 5], dtype="UInt8"),
"D": IntervalArray.from_breaks(range(7)),
}
)
result = df._get_numeric_data()
expected = df.loc[:, ["A", "C"]]
tm.assert_frame_equal(result, expected)
|
bsd-3-clause
|
lgarren/spack
|
var/spack/repos/builtin/packages/py-elephant/package.py
|
3
|
2338
|
##############################################################################
# Copyright (c) 2013-2017, Lawrence Livermore National Security, LLC.
# Produced at the Lawrence Livermore National Laboratory.
#
# This file is part of Spack.
# Created by Todd Gamblin, [email protected], All rights reserved.
# LLNL-CODE-647188
#
# For details, see https://github.com/llnl/spack
# Please also see the NOTICE and LICENSE files for our notice and the LGPL.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License (as
# published by the Free Software Foundation) version 2.1, February 1999.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and
# conditions of the GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
##############################################################################
from spack import *
class PyElephant(PythonPackage):
"""Elephant is a package for analysis of electrophysiology data in Python
"""
homepage = "http://neuralensemble.org/elephant"
url = "https://pypi.io/packages/source/e/elephant/elephant-0.3.0.tar.gz"
version('0.3.0', '84e69e6628fd617af469780c30d2da6c')
variant('docs', default=False, description='Build the documentation')
variant('pandas', default=True, description='Build with pandas')
depends_on('py-setuptools', type='build')
depends_on('[email protected]:', type=('build', 'run')) # > 0.3.3 ?
depends_on('[email protected]:', type=('build', 'run'))
depends_on('[email protected]:', type=('build', 'run'))
depends_on('[email protected]:', type=('build', 'run'))
depends_on('[email protected]:', type=('build', 'run'), when='+pandas')
depends_on('[email protected]:', type=('build', 'run'), when='+docs')
depends_on('[email protected]:', type=('build', 'run'), when='+docs')
# depends_on('[email protected]:', type=('build', 'run')) # tests
|
lgpl-2.1
|
darionyaphet/spark
|
python/pyspark/sql/tests/test_pandas_udf_window.py
|
21
|
12850
|
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import unittest
from pyspark.sql.utils import AnalysisException
from pyspark.sql.functions import array, explode, col, lit, mean, min, max, rank, \
udf, pandas_udf, PandasUDFType
from pyspark.sql.window import Window
from pyspark.testing.sqlutils import ReusedSQLTestCase, have_pandas, have_pyarrow, \
pandas_requirement_message, pyarrow_requirement_message
from pyspark.testing.utils import QuietTest
if have_pandas:
from pandas.util.testing import assert_frame_equal
@unittest.skipIf(
not have_pandas or not have_pyarrow,
pandas_requirement_message or pyarrow_requirement_message)
class WindowPandasUDFTests(ReusedSQLTestCase):
@property
def data(self):
return self.spark.range(10).toDF('id') \
.withColumn("vs", array([lit(i * 1.0) + col('id') for i in range(20, 30)])) \
.withColumn("v", explode(col('vs'))) \
.drop('vs') \
.withColumn('w', lit(1.0))
@property
def python_plus_one(self):
return udf(lambda v: v + 1, 'double')
@property
def pandas_scalar_time_two(self):
return pandas_udf(lambda v: v * 2, 'double')
@property
def pandas_agg_count_udf(self):
@pandas_udf('long', PandasUDFType.GROUPED_AGG)
def count(v):
return len(v)
return count
@property
def pandas_agg_mean_udf(self):
@pandas_udf('double', PandasUDFType.GROUPED_AGG)
def avg(v):
return v.mean()
return avg
@property
def pandas_agg_max_udf(self):
@pandas_udf('double', PandasUDFType.GROUPED_AGG)
def max(v):
return v.max()
return max
@property
def pandas_agg_min_udf(self):
@pandas_udf('double', PandasUDFType.GROUPED_AGG)
def min(v):
return v.min()
return min
@property
def unbounded_window(self):
return Window.partitionBy('id') \
.rowsBetween(Window.unboundedPreceding, Window.unboundedFollowing).orderBy('v')
@property
def ordered_window(self):
return Window.partitionBy('id').orderBy('v')
@property
def unpartitioned_window(self):
return Window.partitionBy()
@property
def sliding_row_window(self):
return Window.partitionBy('id').orderBy('v').rowsBetween(-2, 1)
@property
def sliding_range_window(self):
return Window.partitionBy('id').orderBy('v').rangeBetween(-2, 4)
@property
def growing_row_window(self):
return Window.partitionBy('id').orderBy('v').rowsBetween(Window.unboundedPreceding, 3)
@property
def growing_range_window(self):
return Window.partitionBy('id').orderBy('v') \
.rangeBetween(Window.unboundedPreceding, 4)
@property
def shrinking_row_window(self):
return Window.partitionBy('id').orderBy('v').rowsBetween(-2, Window.unboundedFollowing)
@property
def shrinking_range_window(self):
return Window.partitionBy('id').orderBy('v') \
.rangeBetween(-3, Window.unboundedFollowing)
def test_simple(self):
df = self.data
w = self.unbounded_window
mean_udf = self.pandas_agg_mean_udf
result1 = df.withColumn('mean_v', mean_udf(df['v']).over(w))
expected1 = df.withColumn('mean_v', mean(df['v']).over(w))
result2 = df.select(mean_udf(df['v']).over(w))
expected2 = df.select(mean(df['v']).over(w))
assert_frame_equal(expected1.toPandas(), result1.toPandas())
assert_frame_equal(expected2.toPandas(), result2.toPandas())
def test_multiple_udfs(self):
df = self.data
w = self.unbounded_window
result1 = df.withColumn('mean_v', self.pandas_agg_mean_udf(df['v']).over(w)) \
.withColumn('max_v', self.pandas_agg_max_udf(df['v']).over(w)) \
.withColumn('min_w', self.pandas_agg_min_udf(df['w']).over(w))
expected1 = df.withColumn('mean_v', mean(df['v']).over(w)) \
.withColumn('max_v', max(df['v']).over(w)) \
.withColumn('min_w', min(df['w']).over(w))
assert_frame_equal(expected1.toPandas(), result1.toPandas())
def test_replace_existing(self):
df = self.data
w = self.unbounded_window
result1 = df.withColumn('v', self.pandas_agg_mean_udf(df['v']).over(w))
expected1 = df.withColumn('v', mean(df['v']).over(w))
assert_frame_equal(expected1.toPandas(), result1.toPandas())
def test_mixed_sql(self):
df = self.data
w = self.unbounded_window
mean_udf = self.pandas_agg_mean_udf
result1 = df.withColumn('v', mean_udf(df['v'] * 2).over(w) + 1)
expected1 = df.withColumn('v', mean(df['v'] * 2).over(w) + 1)
assert_frame_equal(expected1.toPandas(), result1.toPandas())
def test_mixed_udf(self):
df = self.data
w = self.unbounded_window
plus_one = self.python_plus_one
time_two = self.pandas_scalar_time_two
mean_udf = self.pandas_agg_mean_udf
result1 = df.withColumn(
'v2',
plus_one(mean_udf(plus_one(df['v'])).over(w)))
expected1 = df.withColumn(
'v2',
plus_one(mean(plus_one(df['v'])).over(w)))
result2 = df.withColumn(
'v2',
time_two(mean_udf(time_two(df['v'])).over(w)))
expected2 = df.withColumn(
'v2',
time_two(mean(time_two(df['v'])).over(w)))
assert_frame_equal(expected1.toPandas(), result1.toPandas())
assert_frame_equal(expected2.toPandas(), result2.toPandas())
def test_without_partitionBy(self):
df = self.data
w = self.unpartitioned_window
mean_udf = self.pandas_agg_mean_udf
result1 = df.withColumn('v2', mean_udf(df['v']).over(w))
expected1 = df.withColumn('v2', mean(df['v']).over(w))
result2 = df.select(mean_udf(df['v']).over(w))
expected2 = df.select(mean(df['v']).over(w))
assert_frame_equal(expected1.toPandas(), result1.toPandas())
assert_frame_equal(expected2.toPandas(), result2.toPandas())
def test_mixed_sql_and_udf(self):
df = self.data
w = self.unbounded_window
ow = self.ordered_window
max_udf = self.pandas_agg_max_udf
min_udf = self.pandas_agg_min_udf
result1 = df.withColumn('v_diff', max_udf(df['v']).over(w) - min_udf(df['v']).over(w))
expected1 = df.withColumn('v_diff', max(df['v']).over(w) - min(df['v']).over(w))
# Test mixing sql window function and window udf in the same expression
result2 = df.withColumn('v_diff', max_udf(df['v']).over(w) - min(df['v']).over(w))
expected2 = expected1
# Test chaining sql aggregate function and udf
result3 = df.withColumn('max_v', max_udf(df['v']).over(w)) \
.withColumn('min_v', min(df['v']).over(w)) \
.withColumn('v_diff', col('max_v') - col('min_v')) \
.drop('max_v', 'min_v')
expected3 = expected1
# Test mixing sql window function and udf
result4 = df.withColumn('max_v', max_udf(df['v']).over(w)) \
.withColumn('rank', rank().over(ow))
expected4 = df.withColumn('max_v', max(df['v']).over(w)) \
.withColumn('rank', rank().over(ow))
assert_frame_equal(expected1.toPandas(), result1.toPandas())
assert_frame_equal(expected2.toPandas(), result2.toPandas())
assert_frame_equal(expected3.toPandas(), result3.toPandas())
assert_frame_equal(expected4.toPandas(), result4.toPandas())
def test_array_type(self):
df = self.data
w = self.unbounded_window
array_udf = pandas_udf(lambda x: [1.0, 2.0], 'array<double>', PandasUDFType.GROUPED_AGG)
result1 = df.withColumn('v2', array_udf(df['v']).over(w))
self.assertEquals(result1.first()['v2'], [1.0, 2.0])
def test_invalid_args(self):
df = self.data
w = self.unbounded_window
with QuietTest(self.sc):
with self.assertRaisesRegexp(
AnalysisException,
'.*not supported within a window function'):
foo_udf = pandas_udf(lambda x: x, 'v double', PandasUDFType.GROUPED_MAP)
df.withColumn('v2', foo_udf(df['v']).over(w))
def test_bounded_simple(self):
from pyspark.sql.functions import mean, max, min, count
df = self.data
w1 = self.sliding_row_window
w2 = self.shrinking_range_window
plus_one = self.python_plus_one
count_udf = self.pandas_agg_count_udf
mean_udf = self.pandas_agg_mean_udf
max_udf = self.pandas_agg_max_udf
min_udf = self.pandas_agg_min_udf
result1 = df.withColumn('mean_v', mean_udf(plus_one(df['v'])).over(w1)) \
.withColumn('count_v', count_udf(df['v']).over(w2)) \
.withColumn('max_v', max_udf(df['v']).over(w2)) \
.withColumn('min_v', min_udf(df['v']).over(w1))
expected1 = df.withColumn('mean_v', mean(plus_one(df['v'])).over(w1)) \
.withColumn('count_v', count(df['v']).over(w2)) \
.withColumn('max_v', max(df['v']).over(w2)) \
.withColumn('min_v', min(df['v']).over(w1))
assert_frame_equal(expected1.toPandas(), result1.toPandas())
def test_growing_window(self):
from pyspark.sql.functions import mean
df = self.data
w1 = self.growing_row_window
w2 = self.growing_range_window
mean_udf = self.pandas_agg_mean_udf
result1 = df.withColumn('m1', mean_udf(df['v']).over(w1)) \
.withColumn('m2', mean_udf(df['v']).over(w2))
expected1 = df.withColumn('m1', mean(df['v']).over(w1)) \
.withColumn('m2', mean(df['v']).over(w2))
assert_frame_equal(expected1.toPandas(), result1.toPandas())
def test_sliding_window(self):
from pyspark.sql.functions import mean
df = self.data
w1 = self.sliding_row_window
w2 = self.sliding_range_window
mean_udf = self.pandas_agg_mean_udf
result1 = df.withColumn('m1', mean_udf(df['v']).over(w1)) \
.withColumn('m2', mean_udf(df['v']).over(w2))
expected1 = df.withColumn('m1', mean(df['v']).over(w1)) \
.withColumn('m2', mean(df['v']).over(w2))
assert_frame_equal(expected1.toPandas(), result1.toPandas())
def test_shrinking_window(self):
from pyspark.sql.functions import mean
df = self.data
w1 = self.shrinking_row_window
w2 = self.shrinking_range_window
mean_udf = self.pandas_agg_mean_udf
result1 = df.withColumn('m1', mean_udf(df['v']).over(w1)) \
.withColumn('m2', mean_udf(df['v']).over(w2))
expected1 = df.withColumn('m1', mean(df['v']).over(w1)) \
.withColumn('m2', mean(df['v']).over(w2))
assert_frame_equal(expected1.toPandas(), result1.toPandas())
def test_bounded_mixed(self):
from pyspark.sql.functions import mean, max
df = self.data
w1 = self.sliding_row_window
w2 = self.unbounded_window
mean_udf = self.pandas_agg_mean_udf
max_udf = self.pandas_agg_max_udf
result1 = df.withColumn('mean_v', mean_udf(df['v']).over(w1)) \
.withColumn('max_v', max_udf(df['v']).over(w2)) \
.withColumn('mean_unbounded_v', mean_udf(df['v']).over(w1))
expected1 = df.withColumn('mean_v', mean(df['v']).over(w1)) \
.withColumn('max_v', max(df['v']).over(w2)) \
.withColumn('mean_unbounded_v', mean(df['v']).over(w1))
assert_frame_equal(expected1.toPandas(), result1.toPandas())
if __name__ == "__main__":
from pyspark.sql.tests.test_pandas_udf_window import *
try:
import xmlrunner
testRunner = xmlrunner.XMLTestRunner(output='target/test-reports', verbosity=2)
except ImportError:
testRunner = None
unittest.main(testRunner=testRunner, verbosity=2)
|
apache-2.0
|
nirdizati/nirdizati-runtime
|
PredictiveMethods/CaseOutcome/test_all_cases.py
|
1
|
3309
|
"""
Copyright (c) 2016-2017 The Nirdizati Project.
This file is part of "Nirdizati".
"Nirdizati" is free software; you can redistribute it and/or modify
it under the terms of the GNU Lesser General Public License as
published by the Free Software Foundation; either version 3 of the
License, or (at your option) any later version.
"Nirdizati" is distributed in the hope that it will be useful, but
WITHOUT ANY WARRANTY; without even the implied warranty
of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
See the GNU Lesser General Public License for more details.
You should have received a copy of the GNU Lesser General Public
License along with this program.
If not, see <http://www.gnu.org/licenses/lgpl.html>.
"""
from PredictiveMonitor import PredictiveMonitor
import pandas as pd
import sys
import cPickle
from tqdm import tqdm, tqdm_pandas
if len(sys.argv) != 4:
sys.exit("Usage: python test_all_cases.py csv-test-file dataset-id label-column-id \n"
"Example:python test_all_cases.py test_bpi17.csv bpi17 label")
testSet = sys.argv[1]
dataset = sys.argv[2]
label_col = sys.argv[3]
dataset_params = pd.read_json("../data/dataset_params.json", orient="index", typ="series")
test = pd.read_csv('%s' % testSet)
case_id_col = dataset_params[dataset][u'case_id_col']
event_nr_col = dataset_params[dataset][u'event_nr_col']
pos_label = dataset_params[dataset][u'CaseOutcome'][label_col][u'pos_label']
static_cols = dataset_params[dataset][u'CaseOutcome'][u'static_cols']
dynamic_cols = dataset_params[dataset][u'CaseOutcome'][u'dynamic_cols']
cat_cols = dataset_params[dataset][u'CaseOutcome'][u'cat_cols']
encoder_kwargs = {"event_nr_col": event_nr_col, "static_cols": static_cols, "dynamic_cols": dynamic_cols,
"cat_cols": cat_cols, "oversample_fit": False, "minority_label": "true", "fillna": True,
"random_state": 22}
cls_method = dataset_params[dataset][u'CaseOutcome'][label_col][u'cls_method']
if cls_method == "rf":
cls_kwargs = {"n_estimators": dataset_params[dataset][u'CaseOutcome'][label_col][u'n_estimators'],
"max_features": dataset_params[dataset][u'CaseOutcome'][label_col][u'max_features'],
"random_state": 22}
elif cls_method == "gbm":
cls_kwargs = {"n_estimators": dataset_params[dataset][u'CaseOutcome'][label_col][u'n_estimators'],
"learning_rate": dataset_params[dataset][u'CaseOutcome'][label_col][u'learning_rate'],
"random_state": 22}
else:
print("Classifier method not known")
predictive_monitor = PredictiveMonitor(event_nr_col=event_nr_col, case_id_col=case_id_col,
label_col=label_col, pos_label=pos_label,
cls_method=cls_method, encoder_kwargs=encoder_kwargs, cls_kwargs=cls_kwargs)
with open('../pkl/predictive_monitor_%s_%s.pkl' % (dataset,label_col), 'rb') as f:
predictive_monitor.models = cPickle.load(f)
nr_unique_cases = len(test.groupby(case_id_col).nunique()) + 1
tqdm_pandas(tqdm(range(1, nr_unique_cases)))
results = test.groupby(case_id_col).progress_apply(predictive_monitor.test)
res = pd.DataFrame({case_id_col:results.index, '%s'%label_col:results.values})
res.to_csv("results_%s_%s.csv"%(dataset,label_col), index=False)
|
lgpl-3.0
|
robin-lai/scikit-learn
|
examples/cluster/plot_adjusted_for_chance_measures.py
|
286
|
4353
|
"""
==========================================================
Adjustment for chance in clustering performance evaluation
==========================================================
The following plots demonstrate the impact of the number of clusters and
number of samples on various clustering performance evaluation metrics.
Non-adjusted measures such as the V-Measure show a dependency between
the number of clusters and the number of samples: the mean V-Measure
of random labeling increases significantly as the number of clusters is
closer to the total number of samples used to compute the measure.
Adjusted for chance measure such as ARI display some random variations
centered around a mean score of 0.0 for any number of samples and
clusters.
Only adjusted measures can hence safely be used as a consensus index
to evaluate the average stability of clustering algorithms for a given
value of k on various overlapping sub-samples of the dataset.
"""
print(__doc__)
# Author: Olivier Grisel <[email protected]>
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
from time import time
from sklearn import metrics
def uniform_labelings_scores(score_func, n_samples, n_clusters_range,
fixed_n_classes=None, n_runs=5, seed=42):
"""Compute score for 2 random uniform cluster labelings.
Both random labelings have the same number of clusters for each value
possible value in ``n_clusters_range``.
When fixed_n_classes is not None the first labeling is considered a ground
truth class assignment with fixed number of classes.
"""
random_labels = np.random.RandomState(seed).random_integers
scores = np.zeros((len(n_clusters_range), n_runs))
if fixed_n_classes is not None:
labels_a = random_labels(low=0, high=fixed_n_classes - 1,
size=n_samples)
for i, k in enumerate(n_clusters_range):
for j in range(n_runs):
if fixed_n_classes is None:
labels_a = random_labels(low=0, high=k - 1, size=n_samples)
labels_b = random_labels(low=0, high=k - 1, size=n_samples)
scores[i, j] = score_func(labels_a, labels_b)
return scores
score_funcs = [
metrics.adjusted_rand_score,
metrics.v_measure_score,
metrics.adjusted_mutual_info_score,
metrics.mutual_info_score,
]
# 2 independent random clusterings with equal cluster number
n_samples = 100
n_clusters_range = np.linspace(2, n_samples, 10).astype(np.int)
plt.figure(1)
plots = []
names = []
for score_func in score_funcs:
print("Computing %s for %d values of n_clusters and n_samples=%d"
% (score_func.__name__, len(n_clusters_range), n_samples))
t0 = time()
scores = uniform_labelings_scores(score_func, n_samples, n_clusters_range)
print("done in %0.3fs" % (time() - t0))
plots.append(plt.errorbar(
n_clusters_range, np.median(scores, axis=1), scores.std(axis=1))[0])
names.append(score_func.__name__)
plt.title("Clustering measures for 2 random uniform labelings\n"
"with equal number of clusters")
plt.xlabel('Number of clusters (Number of samples is fixed to %d)' % n_samples)
plt.ylabel('Score value')
plt.legend(plots, names)
plt.ylim(ymin=-0.05, ymax=1.05)
# Random labeling with varying n_clusters against ground class labels
# with fixed number of clusters
n_samples = 1000
n_clusters_range = np.linspace(2, 100, 10).astype(np.int)
n_classes = 10
plt.figure(2)
plots = []
names = []
for score_func in score_funcs:
print("Computing %s for %d values of n_clusters and n_samples=%d"
% (score_func.__name__, len(n_clusters_range), n_samples))
t0 = time()
scores = uniform_labelings_scores(score_func, n_samples, n_clusters_range,
fixed_n_classes=n_classes)
print("done in %0.3fs" % (time() - t0))
plots.append(plt.errorbar(
n_clusters_range, scores.mean(axis=1), scores.std(axis=1))[0])
names.append(score_func.__name__)
plt.title("Clustering measures for random uniform labeling\n"
"against reference assignment with %d classes" % n_classes)
plt.xlabel('Number of clusters (Number of samples is fixed to %d)' % n_samples)
plt.ylabel('Score value')
plt.ylim(ymin=-0.05, ymax=1.05)
plt.legend(plots, names)
plt.show()
|
bsd-3-clause
|
poojavade/Genomics_Docker
|
Dockerfiles/gedlab-khmer-filter-abund/pymodules/python2.7/lib/python/statsmodels-0.5.0-py2.7-linux-x86_64.egg/statsmodels/datasets/template_data.py
|
3
|
1650
|
#! /usr/bin/env python
"""Name of dataset."""
__docformat__ = 'restructuredtext'
COPYRIGHT = """E.g., This is public domain."""
TITLE = """Title of the dataset"""
SOURCE = """
This section should provide a link to the original dataset if possible and
attribution and correspondance information for the dataset's original author
if so desired.
"""
DESCRSHORT = """A short description."""
DESCRLONG = """A longer description of the dataset."""
#suggested notes
NOTE = """
Number of observations:
Number of variables:
Variable name definitions:
Any other useful information that does not fit into the above categories.
"""
import numpy as np
from statsmodels.datasets import utils as du
from os.path import dirname, abspath
def load():
"""
Load the data and return a Dataset class instance.
Returns
-------
Dataset instance:
See DATASET_PROPOSAL.txt for more information.
"""
data = _get_data()
##### SET THE INDICES #####
#NOTE: None for exog_idx is the complement of endog_idx
return du.process_recarray(data, endog_idx=0, exog_idx=None, dtype=float)
def load_pandas():
data = _get_data()
##### SET THE INDICES #####
#NOTE: None for exog_idx is the complement of endog_idx
return du.process_recarray_pandas(data, endog_idx=0, exog_idx=None,
dtype=float)
def _get_data():
filepath = dirname(abspath(__file__))
##### EDIT THE FOLLOWING TO POINT TO DatasetName.csv #####
data = np.recfromtxt(open(filepath + '/DatasetName.csv', 'rb'),
delimiter=",", names = True, dtype=float)
return data
|
apache-2.0
|
Eigenstate/msmbuilder
|
msmbuilder/tests/test_utils.py
|
10
|
2830
|
from __future__ import division
import numpy as np
import sklearn.pipeline
from mdtraj.testing import eq
from sklearn.externals.joblib import dump as jl_dump
from msmbuilder.decomposition import tICA
from msmbuilder.utils import Subsampler, dump, load
from .test_commands import tempdir
random = np.random.RandomState(2)
def test_subsampler_lag1():
n_traj, n_samples, n_features = 3, 100, 7
lag_time = 1
X_all_0 = [random.normal(size=(n_samples, n_features))
for i in range(n_traj)]
q_0 = np.concatenate(X_all_0)
subsampler = Subsampler(lag_time=lag_time)
X_all_1 = subsampler.transform(X_all_0)
q_1 = np.concatenate(X_all_1)
eq(q_0.shape, q_1.shape)
eq(q_0.mean(0), q_1.mean(0))
eq(q_0.std(0), q_1.std(0))
subsampler = Subsampler(lag_time=lag_time, sliding_window=False)
X_all_1 = subsampler.transform(X_all_0)
q_1 = np.concatenate(X_all_1)
eq(q_0.shape, q_1.shape)
eq(q_0.mean(0), q_1.mean(0))
eq(q_0.std(0), q_1.std(0))
def test_subsampler_lag2():
n_traj, n_samples, n_features = 3, 100, 7
lag_time = 2
X_all_0 = [random.normal(size=(n_samples, n_features))
for i in range(n_traj)]
q_0 = np.concatenate(X_all_0)
subsampler = Subsampler(lag_time=lag_time)
X_all_1 = subsampler.transform(X_all_0)
q_1 = np.concatenate(X_all_1)
eq(((n_samples - lag_time + 2) * n_traj, n_features), q_1.shape)
subsampler = Subsampler(lag_time=lag_time, sliding_window=False)
X_all_1 = subsampler.transform(X_all_0)
q_1 = np.concatenate(X_all_1)
eq(((n_samples / lag_time) * n_traj, n_features), q_1.shape)
def test_subsampler_tica():
n_traj, n_samples, n_features = 1, 500, 4
lag_time = 2
X_all_0 = [random.normal(size=(n_samples, n_features))
for i in range(n_traj)]
tica_0 = tICA(lag_time=lag_time)
tica_0.fit(X_all_0)
subsampler = Subsampler(lag_time=lag_time)
tica_1 = tICA()
pipeline = sklearn.pipeline.Pipeline([
("subsampler", subsampler),
('tica', tica_1)
])
pipeline.fit(X_all_0)
eq(tica_0.n_features, tica_1.n_features) # Obviously true
eq(tica_0.n_observations_, tica_1.n_observations_)
# The eigenvalues should be the same. NOT the timescales,
# as tica_1 has timescales calculated in a different time unit
eq(tica_0.eigenvalues_, tica_1.eigenvalues_)
def test_dump_load():
data = dict(name="Fancy_name", arr=np.random.rand(10, 5))
with tempdir():
dump(data, 'filename')
data2 = load('filename')
eq(data, data2)
def test_load_legacy():
# Used to save joblib files
data = dict(name="Fancy_name", arr=np.random.rand(10, 5))
with tempdir():
jl_dump(data, 'filename', compress=1)
data2 = load('filename')
eq(data, data2)
|
lgpl-2.1
|
dmytroKarataiev/MachineLearning
|
capstone/daily_returns.py
|
1
|
2094
|
"""Compute daily returns."""
import os
import pandas as pd
import matplotlib.pyplot as plt
def symbol_to_path(symbol, base_dir="data"):
"""Return CSV file path given ticker symbol."""
return os.path.join(base_dir, "{}.csv".format(str(symbol)))
def get_data(symbols, dates):
"""Read stock data (adjusted close) for given symbols from CSV files."""
df = pd.DataFrame(index=dates)
if 'SPY' not in symbols: # add SPY for reference, if absent
symbols.insert(0, 'SPY')
for symbol in symbols:
df_temp = pd.read_csv(symbol_to_path(symbol), index_col='Date',
parse_dates=True, usecols=['Date', 'Adj Close'], na_values=['nan'])
df_temp = df_temp.rename(columns={'Adj Close': symbol})
df = df.join(df_temp)
if symbol == 'SPY': # drop dates SPY did not trade
df = df.dropna(subset=["SPY"])
return df
def plot_data(df, title="Stock prices", xlabel="Date", ylabel="Price"):
"""Plot stock prices with a custom title and meaningful axis labels."""
ax = df.plot(title=title, fontsize=12)
ax.set_xlabel(xlabel)
ax.set_ylabel(ylabel)
plt.show()
def compute_daily_returns(df):
"""Compute and return the daily return values."""
# TODO: Your code here
# Note: Returned DataFrame must have the same number of rows
daily_returns = df.copy()
daily_returns[1:] = (df[1:] / df[:-1].values) - 1
daily_returns.ix[0, :] = 0
return daily_returns
def compute_daily_pandas(df):
"""Compute and return the daily return values."""
daily_returns = (df / df.shift(1)) - 1 # much easier with pandas
daily_returns.ix[0, :] = 0 # replace first row with zeroes
return daily_returns
def test_run():
# Read data
dates = pd.date_range('2012-07-01', '2012-07-31') # one month only
symbols = ['SPY','XOM']
df = get_data(symbols, dates)
plot_data(df)
# Compute daily returns
daily_returns = compute_daily_returns(df)
plot_data(daily_returns, title="Daily returns", ylabel="Daily returns")
if __name__ == "__main__":
test_run()
|
mit
|
behzadnouri/scipy
|
scipy/spatial/kdtree.py
|
23
|
37991
|
# Copyright Anne M. Archibald 2008
# Released under the scipy license
from __future__ import division, print_function, absolute_import
import sys
import numpy as np
from heapq import heappush, heappop
import scipy.sparse
__all__ = ['minkowski_distance_p', 'minkowski_distance',
'distance_matrix',
'Rectangle', 'KDTree']
def minkowski_distance_p(x, y, p=2):
"""
Compute the p-th power of the L**p distance between two arrays.
For efficiency, this function computes the L**p distance but does
not extract the pth root. If `p` is 1 or infinity, this is equal to
the actual L**p distance.
Parameters
----------
x : (M, K) array_like
Input array.
y : (N, K) array_like
Input array.
p : float, 1 <= p <= infinity
Which Minkowski p-norm to use.
Examples
--------
>>> from scipy.spatial import minkowski_distance_p
>>> minkowski_distance_p([[0,0],[0,0]], [[1,1],[0,1]])
array([2, 1])
"""
x = np.asarray(x)
y = np.asarray(y)
if p == np.inf:
return np.amax(np.abs(y-x), axis=-1)
elif p == 1:
return np.sum(np.abs(y-x), axis=-1)
else:
return np.sum(np.abs(y-x)**p, axis=-1)
def minkowski_distance(x, y, p=2):
"""
Compute the L**p distance between two arrays.
Parameters
----------
x : (M, K) array_like
Input array.
y : (N, K) array_like
Input array.
p : float, 1 <= p <= infinity
Which Minkowski p-norm to use.
Examples
--------
>>> from scipy.spatial import minkowski_distance
>>> minkowski_distance([[0,0],[0,0]], [[1,1],[0,1]])
array([ 1.41421356, 1. ])
"""
x = np.asarray(x)
y = np.asarray(y)
if p == np.inf or p == 1:
return minkowski_distance_p(x, y, p)
else:
return minkowski_distance_p(x, y, p)**(1./p)
class Rectangle(object):
"""Hyperrectangle class.
Represents a Cartesian product of intervals.
"""
def __init__(self, maxes, mins):
"""Construct a hyperrectangle."""
self.maxes = np.maximum(maxes,mins).astype(float)
self.mins = np.minimum(maxes,mins).astype(float)
self.m, = self.maxes.shape
def __repr__(self):
return "<Rectangle %s>" % list(zip(self.mins, self.maxes))
def volume(self):
"""Total volume."""
return np.prod(self.maxes-self.mins)
def split(self, d, split):
"""
Produce two hyperrectangles by splitting.
In general, if you need to compute maximum and minimum
distances to the children, it can be done more efficiently
by updating the maximum and minimum distances to the parent.
Parameters
----------
d : int
Axis to split hyperrectangle along.
split : float
Position along axis `d` to split at.
"""
mid = np.copy(self.maxes)
mid[d] = split
less = Rectangle(self.mins, mid)
mid = np.copy(self.mins)
mid[d] = split
greater = Rectangle(mid, self.maxes)
return less, greater
def min_distance_point(self, x, p=2.):
"""
Return the minimum distance between input and points in the hyperrectangle.
Parameters
----------
x : array_like
Input.
p : float, optional
Input.
"""
return minkowski_distance(0, np.maximum(0,np.maximum(self.mins-x,x-self.maxes)),p)
def max_distance_point(self, x, p=2.):
"""
Return the maximum distance between input and points in the hyperrectangle.
Parameters
----------
x : array_like
Input array.
p : float, optional
Input.
"""
return minkowski_distance(0, np.maximum(self.maxes-x,x-self.mins),p)
def min_distance_rectangle(self, other, p=2.):
"""
Compute the minimum distance between points in the two hyperrectangles.
Parameters
----------
other : hyperrectangle
Input.
p : float
Input.
"""
return minkowski_distance(0, np.maximum(0,np.maximum(self.mins-other.maxes,other.mins-self.maxes)),p)
def max_distance_rectangle(self, other, p=2.):
"""
Compute the maximum distance between points in the two hyperrectangles.
Parameters
----------
other : hyperrectangle
Input.
p : float, optional
Input.
"""
return minkowski_distance(0, np.maximum(self.maxes-other.mins,other.maxes-self.mins),p)
class KDTree(object):
"""
kd-tree for quick nearest-neighbor lookup
This class provides an index into a set of k-dimensional points which
can be used to rapidly look up the nearest neighbors of any point.
Parameters
----------
data : (N,K) array_like
The data points to be indexed. This array is not copied, and
so modifying this data will result in bogus results.
leafsize : int, optional
The number of points at which the algorithm switches over to
brute-force. Has to be positive.
Raises
------
RuntimeError
The maximum recursion limit can be exceeded for large data
sets. If this happens, either increase the value for the `leafsize`
parameter or increase the recursion limit by::
>>> import sys
>>> sys.setrecursionlimit(10000)
See Also
--------
cKDTree : Implementation of `KDTree` in Cython
Notes
-----
The algorithm used is described in Maneewongvatana and Mount 1999.
The general idea is that the kd-tree is a binary tree, each of whose
nodes represents an axis-aligned hyperrectangle. Each node specifies
an axis and splits the set of points based on whether their coordinate
along that axis is greater than or less than a particular value.
During construction, the axis and splitting point are chosen by the
"sliding midpoint" rule, which ensures that the cells do not all
become long and thin.
The tree can be queried for the r closest neighbors of any given point
(optionally returning only those within some maximum distance of the
point). It can also be queried, with a substantial gain in efficiency,
for the r approximate closest neighbors.
For large dimensions (20 is already large) do not expect this to run
significantly faster than brute force. High-dimensional nearest-neighbor
queries are a substantial open problem in computer science.
The tree also supports all-neighbors queries, both with arrays of points
and with other kd-trees. These do use a reasonably efficient algorithm,
but the kd-tree is not necessarily the best data structure for this
sort of calculation.
"""
def __init__(self, data, leafsize=10):
self.data = np.asarray(data)
self.n, self.m = np.shape(self.data)
self.leafsize = int(leafsize)
if self.leafsize < 1:
raise ValueError("leafsize must be at least 1")
self.maxes = np.amax(self.data,axis=0)
self.mins = np.amin(self.data,axis=0)
self.tree = self.__build(np.arange(self.n), self.maxes, self.mins)
class node(object):
if sys.version_info[0] >= 3:
def __lt__(self, other):
return id(self) < id(other)
def __gt__(self, other):
return id(self) > id(other)
def __le__(self, other):
return id(self) <= id(other)
def __ge__(self, other):
return id(self) >= id(other)
def __eq__(self, other):
return id(self) == id(other)
class leafnode(node):
def __init__(self, idx):
self.idx = idx
self.children = len(idx)
class innernode(node):
def __init__(self, split_dim, split, less, greater):
self.split_dim = split_dim
self.split = split
self.less = less
self.greater = greater
self.children = less.children+greater.children
def __build(self, idx, maxes, mins):
if len(idx) <= self.leafsize:
return KDTree.leafnode(idx)
else:
data = self.data[idx]
# maxes = np.amax(data,axis=0)
# mins = np.amin(data,axis=0)
d = np.argmax(maxes-mins)
maxval = maxes[d]
minval = mins[d]
if maxval == minval:
# all points are identical; warn user?
return KDTree.leafnode(idx)
data = data[:,d]
# sliding midpoint rule; see Maneewongvatana and Mount 1999
# for arguments that this is a good idea.
split = (maxval+minval)/2
less_idx = np.nonzero(data <= split)[0]
greater_idx = np.nonzero(data > split)[0]
if len(less_idx) == 0:
split = np.amin(data)
less_idx = np.nonzero(data <= split)[0]
greater_idx = np.nonzero(data > split)[0]
if len(greater_idx) == 0:
split = np.amax(data)
less_idx = np.nonzero(data < split)[0]
greater_idx = np.nonzero(data >= split)[0]
if len(less_idx) == 0:
# _still_ zero? all must have the same value
if not np.all(data == data[0]):
raise ValueError("Troublesome data array: %s" % data)
split = data[0]
less_idx = np.arange(len(data)-1)
greater_idx = np.array([len(data)-1])
lessmaxes = np.copy(maxes)
lessmaxes[d] = split
greatermins = np.copy(mins)
greatermins[d] = split
return KDTree.innernode(d, split,
self.__build(idx[less_idx],lessmaxes,mins),
self.__build(idx[greater_idx],maxes,greatermins))
def __query(self, x, k=1, eps=0, p=2, distance_upper_bound=np.inf):
side_distances = np.maximum(0,np.maximum(x-self.maxes,self.mins-x))
if p != np.inf:
side_distances **= p
min_distance = np.sum(side_distances)
else:
min_distance = np.amax(side_distances)
# priority queue for chasing nodes
# entries are:
# minimum distance between the cell and the target
# distances between the nearest side of the cell and the target
# the head node of the cell
q = [(min_distance,
tuple(side_distances),
self.tree)]
# priority queue for the nearest neighbors
# furthest known neighbor first
# entries are (-distance**p, i)
neighbors = []
if eps == 0:
epsfac = 1
elif p == np.inf:
epsfac = 1/(1+eps)
else:
epsfac = 1/(1+eps)**p
if p != np.inf and distance_upper_bound != np.inf:
distance_upper_bound = distance_upper_bound**p
while q:
min_distance, side_distances, node = heappop(q)
if isinstance(node, KDTree.leafnode):
# brute-force
data = self.data[node.idx]
ds = minkowski_distance_p(data,x[np.newaxis,:],p)
for i in range(len(ds)):
if ds[i] < distance_upper_bound:
if len(neighbors) == k:
heappop(neighbors)
heappush(neighbors, (-ds[i], node.idx[i]))
if len(neighbors) == k:
distance_upper_bound = -neighbors[0][0]
else:
# we don't push cells that are too far onto the queue at all,
# but since the distance_upper_bound decreases, we might get
# here even if the cell's too far
if min_distance > distance_upper_bound*epsfac:
# since this is the nearest cell, we're done, bail out
break
# compute minimum distances to the children and push them on
if x[node.split_dim] < node.split:
near, far = node.less, node.greater
else:
near, far = node.greater, node.less
# near child is at the same distance as the current node
heappush(q,(min_distance, side_distances, near))
# far child is further by an amount depending only
# on the split value
sd = list(side_distances)
if p == np.inf:
min_distance = max(min_distance, abs(node.split-x[node.split_dim]))
elif p == 1:
sd[node.split_dim] = np.abs(node.split-x[node.split_dim])
min_distance = min_distance - side_distances[node.split_dim] + sd[node.split_dim]
else:
sd[node.split_dim] = np.abs(node.split-x[node.split_dim])**p
min_distance = min_distance - side_distances[node.split_dim] + sd[node.split_dim]
# far child might be too far, if so, don't bother pushing it
if min_distance <= distance_upper_bound*epsfac:
heappush(q,(min_distance, tuple(sd), far))
if p == np.inf:
return sorted([(-d,i) for (d,i) in neighbors])
else:
return sorted([((-d)**(1./p),i) for (d,i) in neighbors])
def query(self, x, k=1, eps=0, p=2, distance_upper_bound=np.inf):
"""
Query the kd-tree for nearest neighbors
Parameters
----------
x : array_like, last dimension self.m
An array of points to query.
k : int, optional
The number of nearest neighbors to return.
eps : nonnegative float, optional
Return approximate nearest neighbors; the kth returned value
is guaranteed to be no further than (1+eps) times the
distance to the real kth nearest neighbor.
p : float, 1<=p<=infinity, optional
Which Minkowski p-norm to use.
1 is the sum-of-absolute-values "Manhattan" distance
2 is the usual Euclidean distance
infinity is the maximum-coordinate-difference distance
distance_upper_bound : nonnegative float, optional
Return only neighbors within this distance. This is used to prune
tree searches, so if you are doing a series of nearest-neighbor
queries, it may help to supply the distance to the nearest neighbor
of the most recent point.
Returns
-------
d : float or array of floats
The distances to the nearest neighbors.
If x has shape tuple+(self.m,), then d has shape tuple if
k is one, or tuple+(k,) if k is larger than one. Missing
neighbors (e.g. when k > n or distance_upper_bound is
given) are indicated with infinite distances. If k is None,
then d is an object array of shape tuple, containing lists
of distances. In either case the hits are sorted by distance
(nearest first).
i : integer or array of integers
The locations of the neighbors in self.data. i is the same
shape as d.
Examples
--------
>>> from scipy import spatial
>>> x, y = np.mgrid[0:5, 2:8]
>>> tree = spatial.KDTree(list(zip(x.ravel(), y.ravel())))
>>> tree.data
array([[0, 2],
[0, 3],
[0, 4],
[0, 5],
[0, 6],
[0, 7],
[1, 2],
[1, 3],
[1, 4],
[1, 5],
[1, 6],
[1, 7],
[2, 2],
[2, 3],
[2, 4],
[2, 5],
[2, 6],
[2, 7],
[3, 2],
[3, 3],
[3, 4],
[3, 5],
[3, 6],
[3, 7],
[4, 2],
[4, 3],
[4, 4],
[4, 5],
[4, 6],
[4, 7]])
>>> pts = np.array([[0, 0], [2.1, 2.9]])
>>> tree.query(pts)
(array([ 2. , 0.14142136]), array([ 0, 13]))
>>> tree.query(pts[0])
(2.0, 0)
"""
x = np.asarray(x)
if np.shape(x)[-1] != self.m:
raise ValueError("x must consist of vectors of length %d but has shape %s" % (self.m, np.shape(x)))
if p < 1:
raise ValueError("Only p-norms with 1<=p<=infinity permitted")
retshape = np.shape(x)[:-1]
if retshape != ():
if k is None:
dd = np.empty(retshape,dtype=object)
ii = np.empty(retshape,dtype=object)
elif k > 1:
dd = np.empty(retshape+(k,),dtype=float)
dd.fill(np.inf)
ii = np.empty(retshape+(k,),dtype=int)
ii.fill(self.n)
elif k == 1:
dd = np.empty(retshape,dtype=float)
dd.fill(np.inf)
ii = np.empty(retshape,dtype=int)
ii.fill(self.n)
else:
raise ValueError("Requested %s nearest neighbors; acceptable numbers are integers greater than or equal to one, or None")
for c in np.ndindex(retshape):
hits = self.__query(x[c], k=k, eps=eps, p=p, distance_upper_bound=distance_upper_bound)
if k is None:
dd[c] = [d for (d,i) in hits]
ii[c] = [i for (d,i) in hits]
elif k > 1:
for j in range(len(hits)):
dd[c+(j,)], ii[c+(j,)] = hits[j]
elif k == 1:
if len(hits) > 0:
dd[c], ii[c] = hits[0]
else:
dd[c] = np.inf
ii[c] = self.n
return dd, ii
else:
hits = self.__query(x, k=k, eps=eps, p=p, distance_upper_bound=distance_upper_bound)
if k is None:
return [d for (d,i) in hits], [i for (d,i) in hits]
elif k == 1:
if len(hits) > 0:
return hits[0]
else:
return np.inf, self.n
elif k > 1:
dd = np.empty(k,dtype=float)
dd.fill(np.inf)
ii = np.empty(k,dtype=int)
ii.fill(self.n)
for j in range(len(hits)):
dd[j], ii[j] = hits[j]
return dd, ii
else:
raise ValueError("Requested %s nearest neighbors; acceptable numbers are integers greater than or equal to one, or None")
def __query_ball_point(self, x, r, p=2., eps=0):
R = Rectangle(self.maxes, self.mins)
def traverse_checking(node, rect):
if rect.min_distance_point(x, p) > r / (1. + eps):
return []
elif rect.max_distance_point(x, p) < r * (1. + eps):
return traverse_no_checking(node)
elif isinstance(node, KDTree.leafnode):
d = self.data[node.idx]
return node.idx[minkowski_distance(d, x, p) <= r].tolist()
else:
less, greater = rect.split(node.split_dim, node.split)
return traverse_checking(node.less, less) + \
traverse_checking(node.greater, greater)
def traverse_no_checking(node):
if isinstance(node, KDTree.leafnode):
return node.idx.tolist()
else:
return traverse_no_checking(node.less) + \
traverse_no_checking(node.greater)
return traverse_checking(self.tree, R)
def query_ball_point(self, x, r, p=2., eps=0):
"""Find all points within distance r of point(s) x.
Parameters
----------
x : array_like, shape tuple + (self.m,)
The point or points to search for neighbors of.
r : positive float
The radius of points to return.
p : float, optional
Which Minkowski p-norm to use. Should be in the range [1, inf].
eps : nonnegative float, optional
Approximate search. Branches of the tree are not explored if their
nearest points are further than ``r / (1 + eps)``, and branches are
added in bulk if their furthest points are nearer than
``r * (1 + eps)``.
Returns
-------
results : list or array of lists
If `x` is a single point, returns a list of the indices of the
neighbors of `x`. If `x` is an array of points, returns an object
array of shape tuple containing lists of neighbors.
Notes
-----
If you have many points whose neighbors you want to find, you may save
substantial amounts of time by putting them in a KDTree and using
query_ball_tree.
Examples
--------
>>> from scipy import spatial
>>> x, y = np.mgrid[0:5, 0:5]
>>> points = zip(x.ravel(), y.ravel())
>>> tree = spatial.KDTree(points)
>>> tree.query_ball_point([2, 0], 1)
[5, 10, 11, 15]
Query multiple points and plot the results:
>>> import matplotlib.pyplot as plt
>>> points = np.asarray(points)
>>> plt.plot(points[:,0], points[:,1], '.')
>>> for results in tree.query_ball_point(([2, 0], [3, 3]), 1):
... nearby_points = points[results]
... plt.plot(nearby_points[:,0], nearby_points[:,1], 'o')
>>> plt.margins(0.1, 0.1)
>>> plt.show()
"""
x = np.asarray(x)
if x.shape[-1] != self.m:
raise ValueError("Searching for a %d-dimensional point in a "
"%d-dimensional KDTree" % (x.shape[-1], self.m))
if len(x.shape) == 1:
return self.__query_ball_point(x, r, p, eps)
else:
retshape = x.shape[:-1]
result = np.empty(retshape, dtype=object)
for c in np.ndindex(retshape):
result[c] = self.__query_ball_point(x[c], r, p=p, eps=eps)
return result
def query_ball_tree(self, other, r, p=2., eps=0):
"""Find all pairs of points whose distance is at most r
Parameters
----------
other : KDTree instance
The tree containing points to search against.
r : float
The maximum distance, has to be positive.
p : float, optional
Which Minkowski norm to use. `p` has to meet the condition
``1 <= p <= infinity``.
eps : float, optional
Approximate search. Branches of the tree are not explored
if their nearest points are further than ``r/(1+eps)``, and
branches are added in bulk if their furthest points are nearer
than ``r * (1+eps)``. `eps` has to be non-negative.
Returns
-------
results : list of lists
For each element ``self.data[i]`` of this tree, ``results[i]`` is a
list of the indices of its neighbors in ``other.data``.
"""
results = [[] for i in range(self.n)]
def traverse_checking(node1, rect1, node2, rect2):
if rect1.min_distance_rectangle(rect2, p) > r/(1.+eps):
return
elif rect1.max_distance_rectangle(rect2, p) < r*(1.+eps):
traverse_no_checking(node1, node2)
elif isinstance(node1, KDTree.leafnode):
if isinstance(node2, KDTree.leafnode):
d = other.data[node2.idx]
for i in node1.idx:
results[i] += node2.idx[minkowski_distance(d,self.data[i],p) <= r].tolist()
else:
less, greater = rect2.split(node2.split_dim, node2.split)
traverse_checking(node1,rect1,node2.less,less)
traverse_checking(node1,rect1,node2.greater,greater)
elif isinstance(node2, KDTree.leafnode):
less, greater = rect1.split(node1.split_dim, node1.split)
traverse_checking(node1.less,less,node2,rect2)
traverse_checking(node1.greater,greater,node2,rect2)
else:
less1, greater1 = rect1.split(node1.split_dim, node1.split)
less2, greater2 = rect2.split(node2.split_dim, node2.split)
traverse_checking(node1.less,less1,node2.less,less2)
traverse_checking(node1.less,less1,node2.greater,greater2)
traverse_checking(node1.greater,greater1,node2.less,less2)
traverse_checking(node1.greater,greater1,node2.greater,greater2)
def traverse_no_checking(node1, node2):
if isinstance(node1, KDTree.leafnode):
if isinstance(node2, KDTree.leafnode):
for i in node1.idx:
results[i] += node2.idx.tolist()
else:
traverse_no_checking(node1, node2.less)
traverse_no_checking(node1, node2.greater)
else:
traverse_no_checking(node1.less, node2)
traverse_no_checking(node1.greater, node2)
traverse_checking(self.tree, Rectangle(self.maxes, self.mins),
other.tree, Rectangle(other.maxes, other.mins))
return results
def query_pairs(self, r, p=2., eps=0):
"""
Find all pairs of points within a distance.
Parameters
----------
r : positive float
The maximum distance.
p : float, optional
Which Minkowski norm to use. `p` has to meet the condition
``1 <= p <= infinity``.
eps : float, optional
Approximate search. Branches of the tree are not explored
if their nearest points are further than ``r/(1+eps)``, and
branches are added in bulk if their furthest points are nearer
than ``r * (1+eps)``. `eps` has to be non-negative.
Returns
-------
results : set
Set of pairs ``(i,j)``, with ``i < j``, for which the corresponding
positions are close.
"""
results = set()
def traverse_checking(node1, rect1, node2, rect2):
if rect1.min_distance_rectangle(rect2, p) > r/(1.+eps):
return
elif rect1.max_distance_rectangle(rect2, p) < r*(1.+eps):
traverse_no_checking(node1, node2)
elif isinstance(node1, KDTree.leafnode):
if isinstance(node2, KDTree.leafnode):
# Special care to avoid duplicate pairs
if id(node1) == id(node2):
d = self.data[node2.idx]
for i in node1.idx:
for j in node2.idx[minkowski_distance(d,self.data[i],p) <= r]:
if i < j:
results.add((i,j))
else:
d = self.data[node2.idx]
for i in node1.idx:
for j in node2.idx[minkowski_distance(d,self.data[i],p) <= r]:
if i < j:
results.add((i,j))
elif j < i:
results.add((j,i))
else:
less, greater = rect2.split(node2.split_dim, node2.split)
traverse_checking(node1,rect1,node2.less,less)
traverse_checking(node1,rect1,node2.greater,greater)
elif isinstance(node2, KDTree.leafnode):
less, greater = rect1.split(node1.split_dim, node1.split)
traverse_checking(node1.less,less,node2,rect2)
traverse_checking(node1.greater,greater,node2,rect2)
else:
less1, greater1 = rect1.split(node1.split_dim, node1.split)
less2, greater2 = rect2.split(node2.split_dim, node2.split)
traverse_checking(node1.less,less1,node2.less,less2)
traverse_checking(node1.less,less1,node2.greater,greater2)
# Avoid traversing (node1.less, node2.greater) and
# (node1.greater, node2.less) (it's the same node pair twice
# over, which is the source of the complication in the
# original KDTree.query_pairs)
if id(node1) != id(node2):
traverse_checking(node1.greater,greater1,node2.less,less2)
traverse_checking(node1.greater,greater1,node2.greater,greater2)
def traverse_no_checking(node1, node2):
if isinstance(node1, KDTree.leafnode):
if isinstance(node2, KDTree.leafnode):
# Special care to avoid duplicate pairs
if id(node1) == id(node2):
for i in node1.idx:
for j in node2.idx:
if i < j:
results.add((i,j))
else:
for i in node1.idx:
for j in node2.idx:
if i < j:
results.add((i,j))
elif j < i:
results.add((j,i))
else:
traverse_no_checking(node1, node2.less)
traverse_no_checking(node1, node2.greater)
else:
# Avoid traversing (node1.less, node2.greater) and
# (node1.greater, node2.less) (it's the same node pair twice
# over, which is the source of the complication in the
# original KDTree.query_pairs)
if id(node1) == id(node2):
traverse_no_checking(node1.less, node2.less)
traverse_no_checking(node1.less, node2.greater)
traverse_no_checking(node1.greater, node2.greater)
else:
traverse_no_checking(node1.less, node2)
traverse_no_checking(node1.greater, node2)
traverse_checking(self.tree, Rectangle(self.maxes, self.mins),
self.tree, Rectangle(self.maxes, self.mins))
return results
def count_neighbors(self, other, r, p=2.):
"""
Count how many nearby pairs can be formed.
Count the number of pairs (x1,x2) can be formed, with x1 drawn
from self and x2 drawn from `other`, and where
``distance(x1, x2, p) <= r``.
This is the "two-point correlation" described in Gray and Moore 2000,
"N-body problems in statistical learning", and the code here is based
on their algorithm.
Parameters
----------
other : KDTree instance
The other tree to draw points from.
r : float or one-dimensional array of floats
The radius to produce a count for. Multiple radii are searched with
a single tree traversal.
p : float, 1<=p<=infinity, optional
Which Minkowski p-norm to use
Returns
-------
result : int or 1-D array of ints
The number of pairs. Note that this is internally stored in a numpy
int, and so may overflow if very large (2e9).
"""
def traverse(node1, rect1, node2, rect2, idx):
min_r = rect1.min_distance_rectangle(rect2,p)
max_r = rect1.max_distance_rectangle(rect2,p)
c_greater = r[idx] > max_r
result[idx[c_greater]] += node1.children*node2.children
idx = idx[(min_r <= r[idx]) & (r[idx] <= max_r)]
if len(idx) == 0:
return
if isinstance(node1,KDTree.leafnode):
if isinstance(node2,KDTree.leafnode):
ds = minkowski_distance(self.data[node1.idx][:,np.newaxis,:],
other.data[node2.idx][np.newaxis,:,:],
p).ravel()
ds.sort()
result[idx] += np.searchsorted(ds,r[idx],side='right')
else:
less, greater = rect2.split(node2.split_dim, node2.split)
traverse(node1, rect1, node2.less, less, idx)
traverse(node1, rect1, node2.greater, greater, idx)
else:
if isinstance(node2,KDTree.leafnode):
less, greater = rect1.split(node1.split_dim, node1.split)
traverse(node1.less, less, node2, rect2, idx)
traverse(node1.greater, greater, node2, rect2, idx)
else:
less1, greater1 = rect1.split(node1.split_dim, node1.split)
less2, greater2 = rect2.split(node2.split_dim, node2.split)
traverse(node1.less,less1,node2.less,less2,idx)
traverse(node1.less,less1,node2.greater,greater2,idx)
traverse(node1.greater,greater1,node2.less,less2,idx)
traverse(node1.greater,greater1,node2.greater,greater2,idx)
R1 = Rectangle(self.maxes, self.mins)
R2 = Rectangle(other.maxes, other.mins)
if np.shape(r) == ():
r = np.array([r])
result = np.zeros(1,dtype=int)
traverse(self.tree, R1, other.tree, R2, np.arange(1))
return result[0]
elif len(np.shape(r)) == 1:
r = np.asarray(r)
n, = r.shape
result = np.zeros(n,dtype=int)
traverse(self.tree, R1, other.tree, R2, np.arange(n))
return result
else:
raise ValueError("r must be either a single value or a one-dimensional array of values")
def sparse_distance_matrix(self, other, max_distance, p=2.):
"""
Compute a sparse distance matrix
Computes a distance matrix between two KDTrees, leaving as zero
any distance greater than max_distance.
Parameters
----------
other : KDTree
max_distance : positive float
p : float, optional
Returns
-------
result : dok_matrix
Sparse matrix representing the results in "dictionary of keys" format.
"""
result = scipy.sparse.dok_matrix((self.n,other.n))
def traverse(node1, rect1, node2, rect2):
if rect1.min_distance_rectangle(rect2, p) > max_distance:
return
elif isinstance(node1, KDTree.leafnode):
if isinstance(node2, KDTree.leafnode):
for i in node1.idx:
for j in node2.idx:
d = minkowski_distance(self.data[i],other.data[j],p)
if d <= max_distance:
result[i,j] = d
else:
less, greater = rect2.split(node2.split_dim, node2.split)
traverse(node1,rect1,node2.less,less)
traverse(node1,rect1,node2.greater,greater)
elif isinstance(node2, KDTree.leafnode):
less, greater = rect1.split(node1.split_dim, node1.split)
traverse(node1.less,less,node2,rect2)
traverse(node1.greater,greater,node2,rect2)
else:
less1, greater1 = rect1.split(node1.split_dim, node1.split)
less2, greater2 = rect2.split(node2.split_dim, node2.split)
traverse(node1.less,less1,node2.less,less2)
traverse(node1.less,less1,node2.greater,greater2)
traverse(node1.greater,greater1,node2.less,less2)
traverse(node1.greater,greater1,node2.greater,greater2)
traverse(self.tree, Rectangle(self.maxes, self.mins),
other.tree, Rectangle(other.maxes, other.mins))
return result
def distance_matrix(x, y, p=2, threshold=1000000):
"""
Compute the distance matrix.
Returns the matrix of all pair-wise distances.
Parameters
----------
x : (M, K) array_like
TODO: description needed
y : (N, K) array_like
TODO: description needed
p : float, 1 <= p <= infinity
Which Minkowski p-norm to use.
threshold : positive int
If ``M * N * K`` > `threshold`, algorithm uses a Python loop instead
of large temporary arrays.
Returns
-------
result : (M, N) ndarray
Distance matrix.
Examples
--------
>>> from scipy.spatial import distance_matrix
>>> distance_matrix([[0,0],[0,1]], [[1,0],[1,1]])
array([[ 1. , 1.41421356],
[ 1.41421356, 1. ]])
"""
x = np.asarray(x)
m, k = x.shape
y = np.asarray(y)
n, kk = y.shape
if k != kk:
raise ValueError("x contains %d-dimensional vectors but y contains %d-dimensional vectors" % (k, kk))
if m*n*k <= threshold:
return minkowski_distance(x[:,np.newaxis,:],y[np.newaxis,:,:],p)
else:
result = np.empty((m,n),dtype=float) # FIXME: figure out the best dtype
if m < n:
for i in range(m):
result[i,:] = minkowski_distance(x[i],y,p)
else:
for j in range(n):
result[:,j] = minkowski_distance(x,y[j],p)
return result
|
bsd-3-clause
|
tengpeng/spark
|
python/pyspark/sql/udf.py
|
16
|
17796
|
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""
User-defined function related classes and functions
"""
import functools
import sys
from pyspark import SparkContext, since
from pyspark.rdd import _prepare_for_python_RDD, PythonEvalType, ignore_unicode_prefix
from pyspark.sql.column import Column, _to_java_column, _to_seq
from pyspark.sql.types import StringType, DataType, StructType, _parse_datatype_string,\
to_arrow_type, to_arrow_schema
from pyspark.util import _get_argspec
__all__ = ["UDFRegistration"]
def _wrap_function(sc, func, returnType):
command = (func, returnType)
pickled_command, broadcast_vars, env, includes = _prepare_for_python_RDD(sc, command)
return sc._jvm.PythonFunction(bytearray(pickled_command), env, includes, sc.pythonExec,
sc.pythonVer, broadcast_vars, sc._javaAccumulator)
def _create_udf(f, returnType, evalType):
if evalType in (PythonEvalType.SQL_SCALAR_PANDAS_UDF,
PythonEvalType.SQL_GROUPED_MAP_PANDAS_UDF,
PythonEvalType.SQL_GROUPED_AGG_PANDAS_UDF):
from pyspark.sql.utils import require_minimum_pyarrow_version
require_minimum_pyarrow_version()
argspec = _get_argspec(f)
if evalType == PythonEvalType.SQL_SCALAR_PANDAS_UDF and len(argspec.args) == 0 and \
argspec.varargs is None:
raise ValueError(
"Invalid function: 0-arg pandas_udfs are not supported. "
"Instead, create a 1-arg pandas_udf and ignore the arg in your function."
)
if evalType == PythonEvalType.SQL_GROUPED_MAP_PANDAS_UDF \
and len(argspec.args) not in (1, 2):
raise ValueError(
"Invalid function: pandas_udfs with function type GROUPED_MAP "
"must take either one argument (data) or two arguments (key, data).")
# Set the name of the UserDefinedFunction object to be the name of function f
udf_obj = UserDefinedFunction(
f, returnType=returnType, name=None, evalType=evalType, deterministic=True)
return udf_obj._wrapped()
class UserDefinedFunction(object):
"""
User defined function in Python
.. versionadded:: 1.3
"""
def __init__(self, func,
returnType=StringType(),
name=None,
evalType=PythonEvalType.SQL_BATCHED_UDF,
deterministic=True):
if not callable(func):
raise TypeError(
"Invalid function: not a function or callable (__call__ is not defined): "
"{0}".format(type(func)))
if not isinstance(returnType, (DataType, str)):
raise TypeError(
"Invalid returnType: returnType should be DataType or str "
"but is {}".format(returnType))
if not isinstance(evalType, int):
raise TypeError(
"Invalid evalType: evalType should be an int but is {}".format(evalType))
self.func = func
self._returnType = returnType
# Stores UserDefinedPythonFunctions jobj, once initialized
self._returnType_placeholder = None
self._judf_placeholder = None
self._name = name or (
func.__name__ if hasattr(func, '__name__')
else func.__class__.__name__)
self.evalType = evalType
self.deterministic = deterministic
@property
def returnType(self):
# This makes sure this is called after SparkContext is initialized.
# ``_parse_datatype_string`` accesses to JVM for parsing a DDL formatted string.
if self._returnType_placeholder is None:
if isinstance(self._returnType, DataType):
self._returnType_placeholder = self._returnType
else:
self._returnType_placeholder = _parse_datatype_string(self._returnType)
if self.evalType == PythonEvalType.SQL_SCALAR_PANDAS_UDF:
try:
to_arrow_type(self._returnType_placeholder)
except TypeError:
raise NotImplementedError(
"Invalid returnType with scalar Pandas UDFs: %s is "
"not supported" % str(self._returnType_placeholder))
elif self.evalType == PythonEvalType.SQL_GROUPED_MAP_PANDAS_UDF:
if isinstance(self._returnType_placeholder, StructType):
try:
to_arrow_schema(self._returnType_placeholder)
except TypeError:
raise NotImplementedError(
"Invalid returnType with grouped map Pandas UDFs: "
"%s is not supported" % str(self._returnType_placeholder))
else:
raise TypeError("Invalid returnType for grouped map Pandas "
"UDFs: returnType must be a StructType.")
elif self.evalType == PythonEvalType.SQL_GROUPED_AGG_PANDAS_UDF:
try:
to_arrow_type(self._returnType_placeholder)
except TypeError:
raise NotImplementedError(
"Invalid returnType with grouped aggregate Pandas UDFs: "
"%s is not supported" % str(self._returnType_placeholder))
return self._returnType_placeholder
@property
def _judf(self):
# It is possible that concurrent access, to newly created UDF,
# will initialize multiple UserDefinedPythonFunctions.
# This is unlikely, doesn't affect correctness,
# and should have a minimal performance impact.
if self._judf_placeholder is None:
self._judf_placeholder = self._create_judf()
return self._judf_placeholder
def _create_judf(self):
from pyspark.sql import SparkSession
spark = SparkSession.builder.getOrCreate()
sc = spark.sparkContext
wrapped_func = _wrap_function(sc, self.func, self.returnType)
jdt = spark._jsparkSession.parseDataType(self.returnType.json())
judf = sc._jvm.org.apache.spark.sql.execution.python.UserDefinedPythonFunction(
self._name, wrapped_func, jdt, self.evalType, self.deterministic)
return judf
def __call__(self, *cols):
judf = self._judf
sc = SparkContext._active_spark_context
return Column(judf.apply(_to_seq(sc, cols, _to_java_column)))
# This function is for improving the online help system in the interactive interpreter.
# For example, the built-in help / pydoc.help. It wraps the UDF with the docstring and
# argument annotation. (See: SPARK-19161)
def _wrapped(self):
"""
Wrap this udf with a function and attach docstring from func
"""
# It is possible for a callable instance without __name__ attribute or/and
# __module__ attribute to be wrapped here. For example, functools.partial. In this case,
# we should avoid wrapping the attributes from the wrapped function to the wrapper
# function. So, we take out these attribute names from the default names to set and
# then manually assign it after being wrapped.
assignments = tuple(
a for a in functools.WRAPPER_ASSIGNMENTS if a != '__name__' and a != '__module__')
@functools.wraps(self.func, assigned=assignments)
def wrapper(*args):
return self(*args)
wrapper.__name__ = self._name
wrapper.__module__ = (self.func.__module__ if hasattr(self.func, '__module__')
else self.func.__class__.__module__)
wrapper.func = self.func
wrapper.returnType = self.returnType
wrapper.evalType = self.evalType
wrapper.deterministic = self.deterministic
wrapper.asNondeterministic = functools.wraps(
self.asNondeterministic)(lambda: self.asNondeterministic()._wrapped())
return wrapper
def asNondeterministic(self):
"""
Updates UserDefinedFunction to nondeterministic.
.. versionadded:: 2.3
"""
# Here, we explicitly clean the cache to create a JVM UDF instance
# with 'deterministic' updated. See SPARK-23233.
self._judf_placeholder = None
self.deterministic = False
return self
class UDFRegistration(object):
"""
Wrapper for user-defined function registration. This instance can be accessed by
:attr:`spark.udf` or :attr:`sqlContext.udf`.
.. versionadded:: 1.3.1
"""
def __init__(self, sparkSession):
self.sparkSession = sparkSession
@ignore_unicode_prefix
@since("1.3.1")
def register(self, name, f, returnType=None):
"""Register a Python function (including lambda function) or a user-defined function
as a SQL function.
:param name: name of the user-defined function in SQL statements.
:param f: a Python function, or a user-defined function. The user-defined function can
be either row-at-a-time or vectorized. See :meth:`pyspark.sql.functions.udf` and
:meth:`pyspark.sql.functions.pandas_udf`.
:param returnType: the return type of the registered user-defined function. The value can
be either a :class:`pyspark.sql.types.DataType` object or a DDL-formatted type string.
:return: a user-defined function.
To register a nondeterministic Python function, users need to first build
a nondeterministic user-defined function for the Python function and then register it
as a SQL function.
`returnType` can be optionally specified when `f` is a Python function but not
when `f` is a user-defined function. Please see below.
1. When `f` is a Python function:
`returnType` defaults to string type and can be optionally specified. The produced
object must match the specified type. In this case, this API works as if
`register(name, f, returnType=StringType())`.
>>> strlen = spark.udf.register("stringLengthString", lambda x: len(x))
>>> spark.sql("SELECT stringLengthString('test')").collect()
[Row(stringLengthString(test)=u'4')]
>>> spark.sql("SELECT 'foo' AS text").select(strlen("text")).collect()
[Row(stringLengthString(text)=u'3')]
>>> from pyspark.sql.types import IntegerType
>>> _ = spark.udf.register("stringLengthInt", lambda x: len(x), IntegerType())
>>> spark.sql("SELECT stringLengthInt('test')").collect()
[Row(stringLengthInt(test)=4)]
>>> from pyspark.sql.types import IntegerType
>>> _ = spark.udf.register("stringLengthInt", lambda x: len(x), IntegerType())
>>> spark.sql("SELECT stringLengthInt('test')").collect()
[Row(stringLengthInt(test)=4)]
2. When `f` is a user-defined function:
Spark uses the return type of the given user-defined function as the return type of
the registered user-defined function. `returnType` should not be specified.
In this case, this API works as if `register(name, f)`.
>>> from pyspark.sql.types import IntegerType
>>> from pyspark.sql.functions import udf
>>> slen = udf(lambda s: len(s), IntegerType())
>>> _ = spark.udf.register("slen", slen)
>>> spark.sql("SELECT slen('test')").collect()
[Row(slen(test)=4)]
>>> import random
>>> from pyspark.sql.functions import udf
>>> from pyspark.sql.types import IntegerType
>>> random_udf = udf(lambda: random.randint(0, 100), IntegerType()).asNondeterministic()
>>> new_random_udf = spark.udf.register("random_udf", random_udf)
>>> spark.sql("SELECT random_udf()").collect() # doctest: +SKIP
[Row(random_udf()=82)]
>>> from pyspark.sql.functions import pandas_udf, PandasUDFType
>>> @pandas_udf("integer", PandasUDFType.SCALAR) # doctest: +SKIP
... def add_one(x):
... return x + 1
...
>>> _ = spark.udf.register("add_one", add_one) # doctest: +SKIP
>>> spark.sql("SELECT add_one(id) FROM range(3)").collect() # doctest: +SKIP
[Row(add_one(id)=1), Row(add_one(id)=2), Row(add_one(id)=3)]
.. note:: Registration for a user-defined function (case 2.) was added from
Spark 2.3.0.
"""
# This is to check whether the input function is from a user-defined function or
# Python function.
if hasattr(f, 'asNondeterministic'):
if returnType is not None:
raise TypeError(
"Invalid returnType: data type can not be specified when f is"
"a user-defined function, but got %s." % returnType)
if f.evalType not in [PythonEvalType.SQL_BATCHED_UDF,
PythonEvalType.SQL_SCALAR_PANDAS_UDF]:
raise ValueError(
"Invalid f: f must be either SQL_BATCHED_UDF or SQL_SCALAR_PANDAS_UDF")
register_udf = UserDefinedFunction(f.func, returnType=f.returnType, name=name,
evalType=f.evalType,
deterministic=f.deterministic)
return_udf = f
else:
if returnType is None:
returnType = StringType()
register_udf = UserDefinedFunction(f, returnType=returnType, name=name,
evalType=PythonEvalType.SQL_BATCHED_UDF)
return_udf = register_udf._wrapped()
self.sparkSession._jsparkSession.udf().registerPython(name, register_udf._judf)
return return_udf
@ignore_unicode_prefix
@since(2.3)
def registerJavaFunction(self, name, javaClassName, returnType=None):
"""Register a Java user-defined function as a SQL function.
In addition to a name and the function itself, the return type can be optionally specified.
When the return type is not specified we would infer it via reflection.
:param name: name of the user-defined function
:param javaClassName: fully qualified name of java class
:param returnType: the return type of the registered Java function. The value can be either
a :class:`pyspark.sql.types.DataType` object or a DDL-formatted type string.
>>> from pyspark.sql.types import IntegerType
>>> spark.udf.registerJavaFunction(
... "javaStringLength", "test.org.apache.spark.sql.JavaStringLength", IntegerType())
>>> spark.sql("SELECT javaStringLength('test')").collect()
[Row(UDF:javaStringLength(test)=4)]
>>> spark.udf.registerJavaFunction(
... "javaStringLength2", "test.org.apache.spark.sql.JavaStringLength")
>>> spark.sql("SELECT javaStringLength2('test')").collect()
[Row(UDF:javaStringLength2(test)=4)]
>>> spark.udf.registerJavaFunction(
... "javaStringLength3", "test.org.apache.spark.sql.JavaStringLength", "integer")
>>> spark.sql("SELECT javaStringLength3('test')").collect()
[Row(UDF:javaStringLength3(test)=4)]
"""
jdt = None
if returnType is not None:
if not isinstance(returnType, DataType):
returnType = _parse_datatype_string(returnType)
jdt = self.sparkSession._jsparkSession.parseDataType(returnType.json())
self.sparkSession._jsparkSession.udf().registerJava(name, javaClassName, jdt)
@ignore_unicode_prefix
@since(2.3)
def registerJavaUDAF(self, name, javaClassName):
"""Register a Java user-defined aggregate function as a SQL function.
:param name: name of the user-defined aggregate function
:param javaClassName: fully qualified name of java class
>>> spark.udf.registerJavaUDAF("javaUDAF", "test.org.apache.spark.sql.MyDoubleAvg")
>>> df = spark.createDataFrame([(1, "a"),(2, "b"), (3, "a")],["id", "name"])
>>> df.createOrReplaceTempView("df")
>>> spark.sql("SELECT name, javaUDAF(id) as avg from df group by name").collect()
[Row(name=u'b', avg=102.0), Row(name=u'a', avg=102.0)]
"""
self.sparkSession._jsparkSession.udf().registerJavaUDAF(name, javaClassName)
def _test():
import doctest
from pyspark.sql import SparkSession
import pyspark.sql.udf
globs = pyspark.sql.udf.__dict__.copy()
spark = SparkSession.builder\
.master("local[4]")\
.appName("sql.udf tests")\
.getOrCreate()
globs['spark'] = spark
(failure_count, test_count) = doctest.testmod(
pyspark.sql.udf, globs=globs,
optionflags=doctest.ELLIPSIS | doctest.NORMALIZE_WHITESPACE)
spark.stop()
if failure_count:
sys.exit(-1)
if __name__ == "__main__":
_test()
|
apache-2.0
|
KasperPRasmussen/bokeh
|
examples/app/weather/main.py
|
7
|
3317
|
from os.path import join, dirname
import numpy as np
import pandas as pd
from bokeh.io import curdoc
from bokeh.models import ColumnDataSource, DataRange1d, Range1d, VBox, HBox, Select
from bokeh.palettes import Blues4
from bokeh.plotting import Figure
from scipy.signal import savgol_filter
STATISTICS = ['record_min_temp', 'actual_min_temp', 'average_min_temp', 'average_max_temp', 'actual_max_temp', 'record_max_temp']
# Filter for smoothing data originates from http://stackoverflow.com/questions/20618804/how-to-smooth-a-curve-in-the-right-way
def get_dataset(src, name, distribution):
df = src[src.airport == name].copy()
del df['airport']
df['date'] = pd.to_datetime(df.date)
df['left'] = df.date - pd.DateOffset(days=0.5)
df['right'] = df.date + pd.DateOffset(days=0.5)
df = df.set_index(['date'])
df.sort_index(inplace=True)
if distribution == 'Smooth':
window, order = 51, 3
for key in STATISTICS:
df[key] = savgol_filter(df[key], window, order)
return ColumnDataSource(data=df)
def make_plot(source, title):
plot = Figure(x_axis_type="datetime", plot_width=1000, tools="", toolbar_location=None)
plot.title = title
colors = Blues4[0:3]
plot.quad(top='record_max_temp', bottom='record_min_temp', left='left', right='right', color=colors[2], source=source, legend="Record")
plot.quad(top='average_max_temp', bottom='average_min_temp', left='left', right='right', color=colors[1], source=source, legend="Average")
plot.quad(top='actual_max_temp', bottom='actual_min_temp', left='left', right='right', color=colors[0], alpha=0.5, line_color="black", source=source, legend="Actual")
# fixed attributes
plot.border_fill_color = "whitesmoke"
plot.xaxis.axis_label = None
plot.yaxis.axis_label = "Temperature (F)"
plot.axis.major_label_text_font_size = "8pt"
plot.axis.axis_label_text_font_size = "8pt"
plot.axis.axis_label_text_font_style = "bold"
plot.x_range = DataRange1d(range_padding=0.0, bounds=None)
plot.grid.grid_line_alpha = 0.3
plot.grid[0].ticker.desired_num_ticks = 12
return plot
# set up callbacks
def update_plot(attrname, old, new):
city = city_select.value
plot.title = cities[city]['title']
src = get_dataset(df, cities[city]['airport'], distribution_select.value)
for key in STATISTICS + ['left', 'right']:
source.data.update(src.data)
# set up initial data
city = 'Austin'
distribution = 'Discrete'
cities = {
'Austin': {
'airport': 'AUS',
'title': 'Austin, TX',
},
'Boston': {
'airport': 'BOS',
'title': 'Boston, MA',
},
'Seattle': {
'airport': 'SEA',
'title': 'Seattle, WA',
}
}
city_select = Select(value=city, title='City', options=sorted(cities.keys()))
distribution_select = Select(value=distribution, title='Distribution', options=['Discrete', 'Smooth'])
df = pd.read_csv(join(dirname(__file__), 'data/2015_weather.csv'))
source = get_dataset(df, cities[city]['airport'], distribution)
plot = make_plot(source, cities[city]['title'])
city_select.on_change('value', update_plot)
distribution_select.on_change('value', update_plot)
controls = VBox(city_select, distribution_select)
# add to document
curdoc().add_root(HBox(controls, plot))
|
bsd-3-clause
|
jjhelmus/scipy
|
scipy/interpolate/_fitpack_impl.py
|
15
|
46563
|
#!/usr/bin/env python
"""
fitpack (dierckx in netlib) --- A Python-C wrapper to FITPACK (by P. Dierckx).
FITPACK is a collection of FORTRAN programs for curve and surface
fitting with splines and tensor product splines.
See
http://www.cs.kuleuven.ac.be/cwis/research/nalag/research/topics/fitpack.html
or
http://www.netlib.org/dierckx/index.html
Copyright 2002 Pearu Peterson all rights reserved,
Pearu Peterson <[email protected]>
Permission to use, modify, and distribute this software is given under the
terms of the SciPy (BSD style) license. See LICENSE.txt that came with
this distribution for specifics.
NO WARRANTY IS EXPRESSED OR IMPLIED. USE AT YOUR OWN RISK.
TODO: Make interfaces to the following fitpack functions:
For univariate splines: cocosp, concon, fourco, insert
For bivariate splines: profil, regrid, parsur, surev
"""
from __future__ import division, print_function, absolute_import
__all__ = ['splrep', 'splprep', 'splev', 'splint', 'sproot', 'spalde',
'bisplrep', 'bisplev', 'insert', 'splder', 'splantider']
import warnings
import numpy as np
from . import _fitpack
from numpy import (atleast_1d, array, ones, zeros, sqrt, ravel, transpose,
empty, iinfo, intc, asarray)
# Try to replace _fitpack interface with
# f2py-generated version
from . import dfitpack
def _intc_overflow(x, msg=None):
"""Cast the value to an intc and raise an OverflowError if the value
cannot fit.
"""
if x > iinfo(intc).max:
if msg is None:
msg = '%r cannot fit into an intc' % x
raise OverflowError(msg)
return intc(x)
_iermess = {
0: ["The spline has a residual sum of squares fp such that "
"abs(fp-s)/s<=0.001", None],
-1: ["The spline is an interpolating spline (fp=0)", None],
-2: ["The spline is weighted least-squares polynomial of degree k.\n"
"fp gives the upper bound fp0 for the smoothing factor s", None],
1: ["The required storage space exceeds the available storage space.\n"
"Probable causes: data (x,y) size is too small or smoothing parameter"
"\ns is too small (fp>s).", ValueError],
2: ["A theoretically impossible result when finding a smoothing spline\n"
"with fp = s. Probable cause: s too small. (abs(fp-s)/s>0.001)",
ValueError],
3: ["The maximal number of iterations (20) allowed for finding smoothing\n"
"spline with fp=s has been reached. Probable cause: s too small.\n"
"(abs(fp-s)/s>0.001)", ValueError],
10: ["Error on input data", ValueError],
'unknown': ["An error occurred", TypeError]
}
_iermess2 = {
0: ["The spline has a residual sum of squares fp such that "
"abs(fp-s)/s<=0.001", None],
-1: ["The spline is an interpolating spline (fp=0)", None],
-2: ["The spline is weighted least-squares polynomial of degree kx and ky."
"\nfp gives the upper bound fp0 for the smoothing factor s", None],
-3: ["Warning. The coefficients of the spline have been computed as the\n"
"minimal norm least-squares solution of a rank deficient system.",
None],
1: ["The required storage space exceeds the available storage space.\n"
"Probable causes: nxest or nyest too small or s is too small. (fp>s)",
ValueError],
2: ["A theoretically impossible result when finding a smoothing spline\n"
"with fp = s. Probable causes: s too small or badly chosen eps.\n"
"(abs(fp-s)/s>0.001)", ValueError],
3: ["The maximal number of iterations (20) allowed for finding smoothing\n"
"spline with fp=s has been reached. Probable cause: s too small.\n"
"(abs(fp-s)/s>0.001)", ValueError],
4: ["No more knots can be added because the number of B-spline\n"
"coefficients already exceeds the number of data points m.\n"
"Probable causes: either s or m too small. (fp>s)", ValueError],
5: ["No more knots can be added because the additional knot would\n"
"coincide with an old one. Probable cause: s too small or too large\n"
"a weight to an inaccurate data point. (fp>s)", ValueError],
10: ["Error on input data", ValueError],
11: ["rwrk2 too small, i.e. there is not enough workspace for computing\n"
"the minimal least-squares solution of a rank deficient system of\n"
"linear equations.", ValueError],
'unknown': ["An error occurred", TypeError]
}
_parcur_cache = {'t': array([], float), 'wrk': array([], float),
'iwrk': array([], intc), 'u': array([], float),
'ub': 0, 'ue': 1}
def splprep(x, w=None, u=None, ub=None, ue=None, k=3, task=0, s=None, t=None,
full_output=0, nest=None, per=0, quiet=1):
"""
Find the B-spline representation of an N-dimensional curve.
Given a list of N rank-1 arrays, `x`, which represent a curve in
N-dimensional space parametrized by `u`, find a smooth approximating
spline curve g(`u`). Uses the FORTRAN routine parcur from FITPACK.
Parameters
----------
x : array_like
A list of sample vector arrays representing the curve.
w : array_like, optional
Strictly positive rank-1 array of weights the same length as `x[0]`.
The weights are used in computing the weighted least-squares spline
fit. If the errors in the `x` values have standard-deviation given by
the vector d, then `w` should be 1/d. Default is ``ones(len(x[0]))``.
u : array_like, optional
An array of parameter values. If not given, these values are
calculated automatically as ``M = len(x[0])``, where
v[0] = 0
v[i] = v[i-1] + distance(`x[i]`, `x[i-1]`)
u[i] = v[i] / v[M-1]
ub, ue : int, optional
The end-points of the parameters interval. Defaults to
u[0] and u[-1].
k : int, optional
Degree of the spline. Cubic splines are recommended.
Even values of `k` should be avoided especially with a small s-value.
``1 <= k <= 5``, default is 3.
task : int, optional
If task==0 (default), find t and c for a given smoothing factor, s.
If task==1, find t and c for another value of the smoothing factor, s.
There must have been a previous call with task=0 or task=1
for the same set of data.
If task=-1 find the weighted least square spline for a given set of
knots, t.
s : float, optional
A smoothing condition. The amount of smoothness is determined by
satisfying the conditions: ``sum((w * (y - g))**2,axis=0) <= s``,
where g(x) is the smoothed interpolation of (x,y). The user can
use `s` to control the trade-off between closeness and smoothness
of fit. Larger `s` means more smoothing while smaller values of `s`
indicate less smoothing. Recommended values of `s` depend on the
weights, w. If the weights represent the inverse of the
standard-deviation of y, then a good `s` value should be found in
the range ``(m-sqrt(2*m),m+sqrt(2*m))``, where m is the number of
data points in x, y, and w.
t : int, optional
The knots needed for task=-1.
full_output : int, optional
If non-zero, then return optional outputs.
nest : int, optional
An over-estimate of the total number of knots of the spline to
help in determining the storage space. By default nest=m/2.
Always large enough is nest=m+k+1.
per : int, optional
If non-zero, data points are considered periodic with period
``x[m-1] - x[0]`` and a smooth periodic spline approximation is
returned. Values of ``y[m-1]`` and ``w[m-1]`` are not used.
quiet : int, optional
Non-zero to suppress messages.
This parameter is deprecated; use standard Python warning filters
instead.
Returns
-------
tck : tuple
A tuple (t,c,k) containing the vector of knots, the B-spline
coefficients, and the degree of the spline.
u : array
An array of the values of the parameter.
fp : float
The weighted sum of squared residuals of the spline approximation.
ier : int
An integer flag about splrep success. Success is indicated
if ier<=0. If ier in [1,2,3] an error occurred but was not raised.
Otherwise an error is raised.
msg : str
A message corresponding to the integer flag, ier.
See Also
--------
splrep, splev, sproot, spalde, splint,
bisplrep, bisplev
UnivariateSpline, BivariateSpline
Notes
-----
See `splev` for evaluation of the spline and its derivatives.
The number of dimensions N must be smaller than 11.
References
----------
.. [1] P. Dierckx, "Algorithms for smoothing data with periodic and
parametric splines, Computer Graphics and Image Processing",
20 (1982) 171-184.
.. [2] P. Dierckx, "Algorithms for smoothing data with periodic and
parametric splines", report tw55, Dept. Computer Science,
K.U.Leuven, 1981.
.. [3] P. Dierckx, "Curve and surface fitting with splines", Monographs on
Numerical Analysis, Oxford University Press, 1993.
"""
if task <= 0:
_parcur_cache = {'t': array([], float), 'wrk': array([], float),
'iwrk': array([], intc), 'u': array([], float),
'ub': 0, 'ue': 1}
x = atleast_1d(x)
idim, m = x.shape
if per:
for i in range(idim):
if x[i][0] != x[i][-1]:
if quiet < 2:
warnings.warn(RuntimeWarning('Setting x[%d][%d]=x[%d][0]' %
(i, m, i)))
x[i][-1] = x[i][0]
if not 0 < idim < 11:
raise TypeError('0 < idim < 11 must hold')
if w is None:
w = ones(m, float)
else:
w = atleast_1d(w)
ipar = (u is not None)
if ipar:
_parcur_cache['u'] = u
if ub is None:
_parcur_cache['ub'] = u[0]
else:
_parcur_cache['ub'] = ub
if ue is None:
_parcur_cache['ue'] = u[-1]
else:
_parcur_cache['ue'] = ue
else:
_parcur_cache['u'] = zeros(m, float)
if not (1 <= k <= 5):
raise TypeError('1 <= k= %d <=5 must hold' % k)
if not (-1 <= task <= 1):
raise TypeError('task must be -1, 0 or 1')
if (not len(w) == m) or (ipar == 1 and (not len(u) == m)):
raise TypeError('Mismatch of input dimensions')
if s is None:
s = m - sqrt(2*m)
if t is None and task == -1:
raise TypeError('Knots must be given for task=-1')
if t is not None:
_parcur_cache['t'] = atleast_1d(t)
n = len(_parcur_cache['t'])
if task == -1 and n < 2*k + 2:
raise TypeError('There must be at least 2*k+2 knots for task=-1')
if m <= k:
raise TypeError('m > k must hold')
if nest is None:
nest = m + 2*k
if (task >= 0 and s == 0) or (nest < 0):
if per:
nest = m + 2*k
else:
nest = m + k + 1
nest = max(nest, 2*k + 3)
u = _parcur_cache['u']
ub = _parcur_cache['ub']
ue = _parcur_cache['ue']
t = _parcur_cache['t']
wrk = _parcur_cache['wrk']
iwrk = _parcur_cache['iwrk']
t, c, o = _fitpack._parcur(ravel(transpose(x)), w, u, ub, ue, k,
task, ipar, s, t, nest, wrk, iwrk, per)
_parcur_cache['u'] = o['u']
_parcur_cache['ub'] = o['ub']
_parcur_cache['ue'] = o['ue']
_parcur_cache['t'] = t
_parcur_cache['wrk'] = o['wrk']
_parcur_cache['iwrk'] = o['iwrk']
ier = o['ier']
fp = o['fp']
n = len(t)
u = o['u']
c.shape = idim, n - k - 1
tcku = [t, list(c), k], u
if ier <= 0 and not quiet:
warnings.warn(RuntimeWarning(_iermess[ier][0] +
"\tk=%d n=%d m=%d fp=%f s=%f" %
(k, len(t), m, fp, s)))
if ier > 0 and not full_output:
if ier in [1, 2, 3]:
warnings.warn(RuntimeWarning(_iermess[ier][0]))
else:
try:
raise _iermess[ier][1](_iermess[ier][0])
except KeyError:
raise _iermess['unknown'][1](_iermess['unknown'][0])
if full_output:
try:
return tcku, fp, ier, _iermess[ier][0]
except KeyError:
return tcku, fp, ier, _iermess['unknown'][0]
else:
return tcku
_curfit_cache = {'t': array([], float), 'wrk': array([], float),
'iwrk': array([], intc)}
def splrep(x, y, w=None, xb=None, xe=None, k=3, task=0, s=None, t=None,
full_output=0, per=0, quiet=1):
"""
Find the B-spline representation of 1-D curve.
Given the set of data points ``(x[i], y[i])`` determine a smooth spline
approximation of degree k on the interval ``xb <= x <= xe``.
Parameters
----------
x, y : array_like
The data points defining a curve y = f(x).
w : array_like, optional
Strictly positive rank-1 array of weights the same length as x and y.
The weights are used in computing the weighted least-squares spline
fit. If the errors in the y values have standard-deviation given by the
vector d, then w should be 1/d. Default is ones(len(x)).
xb, xe : float, optional
The interval to fit. If None, these default to x[0] and x[-1]
respectively.
k : int, optional
The order of the spline fit. It is recommended to use cubic splines.
Even order splines should be avoided especially with small s values.
1 <= k <= 5
task : {1, 0, -1}, optional
If task==0 find t and c for a given smoothing factor, s.
If task==1 find t and c for another value of the smoothing factor, s.
There must have been a previous call with task=0 or task=1 for the same
set of data (t will be stored an used internally)
If task=-1 find the weighted least square spline for a given set of
knots, t. These should be interior knots as knots on the ends will be
added automatically.
s : float, optional
A smoothing condition. The amount of smoothness is determined by
satisfying the conditions: sum((w * (y - g))**2,axis=0) <= s where g(x)
is the smoothed interpolation of (x,y). The user can use s to control
the tradeoff between closeness and smoothness of fit. Larger s means
more smoothing while smaller values of s indicate less smoothing.
Recommended values of s depend on the weights, w. If the weights
represent the inverse of the standard-deviation of y, then a good s
value should be found in the range (m-sqrt(2*m),m+sqrt(2*m)) where m is
the number of datapoints in x, y, and w. default : s=m-sqrt(2*m) if
weights are supplied. s = 0.0 (interpolating) if no weights are
supplied.
t : array_like, optional
The knots needed for task=-1. If given then task is automatically set
to -1.
full_output : bool, optional
If non-zero, then return optional outputs.
per : bool, optional
If non-zero, data points are considered periodic with period x[m-1] -
x[0] and a smooth periodic spline approximation is returned. Values of
y[m-1] and w[m-1] are not used.
quiet : bool, optional
Non-zero to suppress messages.
This parameter is deprecated; use standard Python warning filters
instead.
Returns
-------
tck : tuple
(t,c,k) a tuple containing the vector of knots, the B-spline
coefficients, and the degree of the spline.
fp : array, optional
The weighted sum of squared residuals of the spline approximation.
ier : int, optional
An integer flag about splrep success. Success is indicated if ier<=0.
If ier in [1,2,3] an error occurred but was not raised. Otherwise an
error is raised.
msg : str, optional
A message corresponding to the integer flag, ier.
Notes
-----
See splev for evaluation of the spline and its derivatives.
The user is responsible for assuring that the values of *x* are unique.
Otherwise, *splrep* will not return sensible results.
See Also
--------
UnivariateSpline, BivariateSpline
splprep, splev, sproot, spalde, splint
bisplrep, bisplev
Notes
-----
See splev for evaluation of the spline and its derivatives. Uses the
FORTRAN routine curfit from FITPACK.
If provided, knots `t` must satisfy the Schoenberg-Whitney conditions,
i.e., there must be a subset of data points ``x[j]`` such that
``t[j] < x[j] < t[j+k+1]``, for ``j=0, 1,...,n-k-2``.
References
----------
Based on algorithms described in [1]_, [2]_, [3]_, and [4]_:
.. [1] P. Dierckx, "An algorithm for smoothing, differentiation and
integration of experimental data using spline functions",
J.Comp.Appl.Maths 1 (1975) 165-184.
.. [2] P. Dierckx, "A fast algorithm for smoothing data on a rectangular
grid while using spline functions", SIAM J.Numer.Anal. 19 (1982)
1286-1304.
.. [3] P. Dierckx, "An improved algorithm for curve fitting with spline
functions", report tw54, Dept. Computer Science,K.U. Leuven, 1981.
.. [4] P. Dierckx, "Curve and surface fitting with splines", Monographs on
Numerical Analysis, Oxford University Press, 1993.
Examples
--------
>>> import matplotlib.pyplot as plt
>>> from scipy.interpolate import splev, splrep
>>> x = np.linspace(0, 10, 10)
>>> y = np.sin(x)
>>> tck = splrep(x, y)
>>> x2 = np.linspace(0, 10, 200)
>>> y2 = splev(x2, tck)
>>> plt.plot(x, y, 'o', x2, y2)
>>> plt.show()
"""
if task <= 0:
_curfit_cache = {}
x, y = map(atleast_1d, [x, y])
m = len(x)
if w is None:
w = ones(m, float)
if s is None:
s = 0.0
else:
w = atleast_1d(w)
if s is None:
s = m - sqrt(2*m)
if not len(w) == m:
raise TypeError('len(w)=%d is not equal to m=%d' % (len(w), m))
if (m != len(y)) or (m != len(w)):
raise TypeError('Lengths of the first three arguments (x,y,w) must '
'be equal')
if not (1 <= k <= 5):
raise TypeError('Given degree of the spline (k=%d) is not supported. '
'(1<=k<=5)' % k)
if m <= k:
raise TypeError('m > k must hold')
if xb is None:
xb = x[0]
if xe is None:
xe = x[-1]
if not (-1 <= task <= 1):
raise TypeError('task must be -1, 0 or 1')
if t is not None:
task = -1
if task == -1:
if t is None:
raise TypeError('Knots must be given for task=-1')
numknots = len(t)
_curfit_cache['t'] = empty((numknots + 2*k + 2,), float)
_curfit_cache['t'][k+1:-k-1] = t
nest = len(_curfit_cache['t'])
elif task == 0:
if per:
nest = max(m + 2*k, 2*k + 3)
else:
nest = max(m + k + 1, 2*k + 3)
t = empty((nest,), float)
_curfit_cache['t'] = t
if task <= 0:
if per:
_curfit_cache['wrk'] = empty((m*(k + 1) + nest*(8 + 5*k),), float)
else:
_curfit_cache['wrk'] = empty((m*(k + 1) + nest*(7 + 3*k),), float)
_curfit_cache['iwrk'] = empty((nest,), intc)
try:
t = _curfit_cache['t']
wrk = _curfit_cache['wrk']
iwrk = _curfit_cache['iwrk']
except KeyError:
raise TypeError("must call with task=1 only after"
" call with task=0,-1")
if not per:
n, c, fp, ier = dfitpack.curfit(task, x, y, w, t, wrk, iwrk,
xb, xe, k, s)
else:
n, c, fp, ier = dfitpack.percur(task, x, y, w, t, wrk, iwrk, k, s)
tck = (t[:n], c[:n], k)
if ier <= 0 and not quiet:
_mess = (_iermess[ier][0] + "\tk=%d n=%d m=%d fp=%f s=%f" %
(k, len(t), m, fp, s))
warnings.warn(RuntimeWarning(_mess))
if ier > 0 and not full_output:
if ier in [1, 2, 3]:
warnings.warn(RuntimeWarning(_iermess[ier][0]))
else:
try:
raise _iermess[ier][1](_iermess[ier][0])
except KeyError:
raise _iermess['unknown'][1](_iermess['unknown'][0])
if full_output:
try:
return tck, fp, ier, _iermess[ier][0]
except KeyError:
return tck, fp, ier, _iermess['unknown'][0]
else:
return tck
def splev(x, tck, der=0, ext=0):
"""
Evaluate a B-spline or its derivatives.
Given the knots and coefficients of a B-spline representation, evaluate
the value of the smoothing polynomial and its derivatives. This is a
wrapper around the FORTRAN routines splev and splder of FITPACK.
Parameters
----------
x : array_like
An array of points at which to return the value of the smoothed
spline or its derivatives. If `tck` was returned from `splprep`,
then the parameter values, u should be given.
tck : tuple
A sequence of length 3 returned by `splrep` or `splprep` containing
the knots, coefficients, and degree of the spline.
der : int, optional
The order of derivative of the spline to compute (must be less than
or equal to k).
ext : int, optional
Controls the value returned for elements of ``x`` not in the
interval defined by the knot sequence.
* if ext=0, return the extrapolated value.
* if ext=1, return 0
* if ext=2, raise a ValueError
* if ext=3, return the boundary value.
The default value is 0.
Returns
-------
y : ndarray or list of ndarrays
An array of values representing the spline function evaluated at
the points in ``x``. If `tck` was returned from `splprep`, then this
is a list of arrays representing the curve in N-dimensional space.
See Also
--------
splprep, splrep, sproot, spalde, splint
bisplrep, bisplev
References
----------
.. [1] C. de Boor, "On calculating with b-splines", J. Approximation
Theory, 6, p.50-62, 1972.
.. [2] M.G. Cox, "The numerical evaluation of b-splines", J. Inst. Maths
Applics, 10, p.134-149, 1972.
.. [3] P. Dierckx, "Curve and surface fitting with splines", Monographs
on Numerical Analysis, Oxford University Press, 1993.
"""
t, c, k = tck
try:
c[0][0]
parametric = True
except:
parametric = False
if parametric:
return list(map(lambda c, x=x, t=t, k=k, der=der:
splev(x, [t, c, k], der, ext), c))
else:
if not (0 <= der <= k):
raise ValueError("0<=der=%d<=k=%d must hold" % (der, k))
if ext not in (0, 1, 2, 3):
raise ValueError("ext = %s not in (0, 1, 2, 3) " % ext)
x = asarray(x)
shape = x.shape
x = atleast_1d(x).ravel()
y, ier = _fitpack._spl_(x, der, t, c, k, ext)
if ier == 10:
raise ValueError("Invalid input data")
if ier == 1:
raise ValueError("Found x value not in the domain")
if ier:
raise TypeError("An error occurred")
return y.reshape(shape)
def splint(a, b, tck, full_output=0):
"""
Evaluate the definite integral of a B-spline.
Given the knots and coefficients of a B-spline, evaluate the definite
integral of the smoothing polynomial between two given points.
Parameters
----------
a, b : float
The end-points of the integration interval.
tck : tuple
A tuple (t,c,k) containing the vector of knots, the B-spline
coefficients, and the degree of the spline (see `splev`).
full_output : int, optional
Non-zero to return optional output.
Returns
-------
integral : float
The resulting integral.
wrk : ndarray
An array containing the integrals of the normalized B-splines
defined on the set of knots.
Notes
-----
splint silently assumes that the spline function is zero outside the data
interval (a, b).
See Also
--------
splprep, splrep, sproot, spalde, splev
bisplrep, bisplev
UnivariateSpline, BivariateSpline
References
----------
.. [1] P.W. Gaffney, The calculation of indefinite integrals of b-splines",
J. Inst. Maths Applics, 17, p.37-41, 1976.
.. [2] P. Dierckx, "Curve and surface fitting with splines", Monographs
on Numerical Analysis, Oxford University Press, 1993.
"""
t, c, k = tck
try:
c[0][0]
parametric = True
except:
parametric = False
if parametric:
return list(map(lambda c, a=a, b=b, t=t, k=k:
splint(a, b, [t, c, k]), c))
else:
aint, wrk = _fitpack._splint(t, c, k, a, b)
if full_output:
return aint, wrk
else:
return aint
def sproot(tck, mest=10):
"""
Find the roots of a cubic B-spline.
Given the knots (>=8) and coefficients of a cubic B-spline return the
roots of the spline.
Parameters
----------
tck : tuple
A tuple (t,c,k) containing the vector of knots,
the B-spline coefficients, and the degree of the spline.
The number of knots must be >= 8, and the degree must be 3.
The knots must be a montonically increasing sequence.
mest : int, optional
An estimate of the number of zeros (Default is 10).
Returns
-------
zeros : ndarray
An array giving the roots of the spline.
See also
--------
splprep, splrep, splint, spalde, splev
bisplrep, bisplev
UnivariateSpline, BivariateSpline
References
----------
.. [1] C. de Boor, "On calculating with b-splines", J. Approximation
Theory, 6, p.50-62, 1972.
.. [2] M.G. Cox, "The numerical evaluation of b-splines", J. Inst. Maths
Applics, 10, p.134-149, 1972.
.. [3] P. Dierckx, "Curve and surface fitting with splines", Monographs
on Numerical Analysis, Oxford University Press, 1993.
"""
t, c, k = tck
if k != 3:
raise ValueError("sproot works only for cubic (k=3) splines")
try:
c[0][0]
parametric = True
except:
parametric = False
if parametric:
return list(map(lambda c, t=t, k=k, mest=mest:
sproot([t, c, k], mest), c))
else:
if len(t) < 8:
raise TypeError("The number of knots %d>=8" % len(t))
z, ier = _fitpack._sproot(t, c, k, mest)
if ier == 10:
raise TypeError("Invalid input data. "
"t1<=..<=t4<t5<..<tn-3<=..<=tn must hold.")
if ier == 0:
return z
if ier == 1:
warnings.warn(RuntimeWarning("The number of zeros exceeds mest"))
return z
raise TypeError("Unknown error")
def spalde(x, tck):
"""
Evaluate all derivatives of a B-spline.
Given the knots and coefficients of a cubic B-spline compute all
derivatives up to order k at a point (or set of points).
Parameters
----------
x : array_like
A point or a set of points at which to evaluate the derivatives.
Note that ``t(k) <= x <= t(n-k+1)`` must hold for each `x`.
tck : tuple
A tuple (t,c,k) containing the vector of knots,
the B-spline coefficients, and the degree of the spline.
Returns
-------
results : {ndarray, list of ndarrays}
An array (or a list of arrays) containing all derivatives
up to order k inclusive for each point `x`.
See Also
--------
splprep, splrep, splint, sproot, splev, bisplrep, bisplev,
UnivariateSpline, BivariateSpline
References
----------
.. [1] de Boor C : On calculating with b-splines, J. Approximation Theory
6 (1972) 50-62.
.. [2] Cox M.G. : The numerical evaluation of b-splines, J. Inst. Maths
applics 10 (1972) 134-149.
.. [3] Dierckx P. : Curve and surface fitting with splines, Monographs on
Numerical Analysis, Oxford University Press, 1993.
"""
t, c, k = tck
try:
c[0][0]
parametric = True
except:
parametric = False
if parametric:
return list(map(lambda c, x=x, t=t, k=k:
spalde(x, [t, c, k]), c))
else:
x = atleast_1d(x)
if len(x) > 1:
return list(map(lambda x, tck=tck: spalde(x, tck), x))
d, ier = _fitpack._spalde(t, c, k, x[0])
if ier == 0:
return d
if ier == 10:
raise TypeError("Invalid input data. t(k)<=x<=t(n-k+1) must hold.")
raise TypeError("Unknown error")
# def _curfit(x,y,w=None,xb=None,xe=None,k=3,task=0,s=None,t=None,
# full_output=0,nest=None,per=0,quiet=1):
_surfit_cache = {'tx': array([], float), 'ty': array([], float),
'wrk': array([], float), 'iwrk': array([], intc)}
def bisplrep(x, y, z, w=None, xb=None, xe=None, yb=None, ye=None,
kx=3, ky=3, task=0, s=None, eps=1e-16, tx=None, ty=None,
full_output=0, nxest=None, nyest=None, quiet=1):
"""
Find a bivariate B-spline representation of a surface.
Given a set of data points (x[i], y[i], z[i]) representing a surface
z=f(x,y), compute a B-spline representation of the surface. Based on
the routine SURFIT from FITPACK.
Parameters
----------
x, y, z : ndarray
Rank-1 arrays of data points.
w : ndarray, optional
Rank-1 array of weights. By default ``w=np.ones(len(x))``.
xb, xe : float, optional
End points of approximation interval in `x`.
By default ``xb = x.min(), xe=x.max()``.
yb, ye : float, optional
End points of approximation interval in `y`.
By default ``yb=y.min(), ye = y.max()``.
kx, ky : int, optional
The degrees of the spline (1 <= kx, ky <= 5).
Third order (kx=ky=3) is recommended.
task : int, optional
If task=0, find knots in x and y and coefficients for a given
smoothing factor, s.
If task=1, find knots and coefficients for another value of the
smoothing factor, s. bisplrep must have been previously called
with task=0 or task=1.
If task=-1, find coefficients for a given set of knots tx, ty.
s : float, optional
A non-negative smoothing factor. If weights correspond
to the inverse of the standard-deviation of the errors in z,
then a good s-value should be found in the range
``(m-sqrt(2*m),m+sqrt(2*m))`` where m=len(x).
eps : float, optional
A threshold for determining the effective rank of an
over-determined linear system of equations (0 < eps < 1).
`eps` is not likely to need changing.
tx, ty : ndarray, optional
Rank-1 arrays of the knots of the spline for task=-1
full_output : int, optional
Non-zero to return optional outputs.
nxest, nyest : int, optional
Over-estimates of the total number of knots. If None then
``nxest = max(kx+sqrt(m/2),2*kx+3)``,
``nyest = max(ky+sqrt(m/2),2*ky+3)``.
quiet : int, optional
Non-zero to suppress printing of messages.
This parameter is deprecated; use standard Python warning filters
instead.
Returns
-------
tck : array_like
A list [tx, ty, c, kx, ky] containing the knots (tx, ty) and
coefficients (c) of the bivariate B-spline representation of the
surface along with the degree of the spline.
fp : ndarray
The weighted sum of squared residuals of the spline approximation.
ier : int
An integer flag about splrep success. Success is indicated if
ier<=0. If ier in [1,2,3] an error occurred but was not raised.
Otherwise an error is raised.
msg : str
A message corresponding to the integer flag, ier.
See Also
--------
splprep, splrep, splint, sproot, splev
UnivariateSpline, BivariateSpline
Notes
-----
See `bisplev` to evaluate the value of the B-spline given its tck
representation.
References
----------
.. [1] Dierckx P.:An algorithm for surface fitting with spline functions
Ima J. Numer. Anal. 1 (1981) 267-283.
.. [2] Dierckx P.:An algorithm for surface fitting with spline functions
report tw50, Dept. Computer Science,K.U.Leuven, 1980.
.. [3] Dierckx P.:Curve and surface fitting with splines, Monographs on
Numerical Analysis, Oxford University Press, 1993.
"""
x, y, z = map(ravel, [x, y, z]) # ensure 1-d arrays.
m = len(x)
if not (m == len(y) == len(z)):
raise TypeError('len(x)==len(y)==len(z) must hold.')
if w is None:
w = ones(m, float)
else:
w = atleast_1d(w)
if not len(w) == m:
raise TypeError('len(w)=%d is not equal to m=%d' % (len(w), m))
if xb is None:
xb = x.min()
if xe is None:
xe = x.max()
if yb is None:
yb = y.min()
if ye is None:
ye = y.max()
if not (-1 <= task <= 1):
raise TypeError('task must be -1, 0 or 1')
if s is None:
s = m - sqrt(2*m)
if tx is None and task == -1:
raise TypeError('Knots_x must be given for task=-1')
if tx is not None:
_surfit_cache['tx'] = atleast_1d(tx)
nx = len(_surfit_cache['tx'])
if ty is None and task == -1:
raise TypeError('Knots_y must be given for task=-1')
if ty is not None:
_surfit_cache['ty'] = atleast_1d(ty)
ny = len(_surfit_cache['ty'])
if task == -1 and nx < 2*kx+2:
raise TypeError('There must be at least 2*kx+2 knots_x for task=-1')
if task == -1 and ny < 2*ky+2:
raise TypeError('There must be at least 2*ky+2 knots_x for task=-1')
if not ((1 <= kx <= 5) and (1 <= ky <= 5)):
raise TypeError('Given degree of the spline (kx,ky=%d,%d) is not '
'supported. (1<=k<=5)' % (kx, ky))
if m < (kx + 1)*(ky + 1):
raise TypeError('m >= (kx+1)(ky+1) must hold')
if nxest is None:
nxest = int(kx + sqrt(m/2))
if nyest is None:
nyest = int(ky + sqrt(m/2))
nxest, nyest = max(nxest, 2*kx + 3), max(nyest, 2*ky + 3)
if task >= 0 and s == 0:
nxest = int(kx + sqrt(3*m))
nyest = int(ky + sqrt(3*m))
if task == -1:
_surfit_cache['tx'] = atleast_1d(tx)
_surfit_cache['ty'] = atleast_1d(ty)
tx, ty = _surfit_cache['tx'], _surfit_cache['ty']
wrk = _surfit_cache['wrk']
u = nxest - kx - 1
v = nyest - ky - 1
km = max(kx, ky) + 1
ne = max(nxest, nyest)
bx, by = kx*v + ky + 1, ky*u + kx + 1
b1, b2 = bx, bx + v - ky
if bx > by:
b1, b2 = by, by + u - kx
msg = "Too many data points to interpolate"
lwrk1 = _intc_overflow(u*v*(2 + b1 + b2) +
2*(u + v + km*(m + ne) + ne - kx - ky) + b2 + 1,
msg=msg)
lwrk2 = _intc_overflow(u*v*(b2 + 1) + b2, msg=msg)
tx, ty, c, o = _fitpack._surfit(x, y, z, w, xb, xe, yb, ye, kx, ky,
task, s, eps, tx, ty, nxest, nyest,
wrk, lwrk1, lwrk2)
_curfit_cache['tx'] = tx
_curfit_cache['ty'] = ty
_curfit_cache['wrk'] = o['wrk']
ier, fp = o['ier'], o['fp']
tck = [tx, ty, c, kx, ky]
ierm = min(11, max(-3, ier))
if ierm <= 0 and not quiet:
_mess = (_iermess2[ierm][0] +
"\tkx,ky=%d,%d nx,ny=%d,%d m=%d fp=%f s=%f" %
(kx, ky, len(tx), len(ty), m, fp, s))
warnings.warn(RuntimeWarning(_mess))
if ierm > 0 and not full_output:
if ier in [1, 2, 3, 4, 5]:
_mess = ("\n\tkx,ky=%d,%d nx,ny=%d,%d m=%d fp=%f s=%f" %
(kx, ky, len(tx), len(ty), m, fp, s))
warnings.warn(RuntimeWarning(_iermess2[ierm][0] + _mess))
else:
try:
raise _iermess2[ierm][1](_iermess2[ierm][0])
except KeyError:
raise _iermess2['unknown'][1](_iermess2['unknown'][0])
if full_output:
try:
return tck, fp, ier, _iermess2[ierm][0]
except KeyError:
return tck, fp, ier, _iermess2['unknown'][0]
else:
return tck
def bisplev(x, y, tck, dx=0, dy=0):
"""
Evaluate a bivariate B-spline and its derivatives.
Return a rank-2 array of spline function values (or spline derivative
values) at points given by the cross-product of the rank-1 arrays `x` and
`y`. In special cases, return an array or just a float if either `x` or
`y` or both are floats. Based on BISPEV from FITPACK.
Parameters
----------
x, y : ndarray
Rank-1 arrays specifying the domain over which to evaluate the
spline or its derivative.
tck : tuple
A sequence of length 5 returned by `bisplrep` containing the knot
locations, the coefficients, and the degree of the spline:
[tx, ty, c, kx, ky].
dx, dy : int, optional
The orders of the partial derivatives in `x` and `y` respectively.
Returns
-------
vals : ndarray
The B-spline or its derivative evaluated over the set formed by
the cross-product of `x` and `y`.
See Also
--------
splprep, splrep, splint, sproot, splev
UnivariateSpline, BivariateSpline
Notes
-----
See `bisplrep` to generate the `tck` representation.
References
----------
.. [1] Dierckx P. : An algorithm for surface fitting
with spline functions
Ima J. Numer. Anal. 1 (1981) 267-283.
.. [2] Dierckx P. : An algorithm for surface fitting
with spline functions
report tw50, Dept. Computer Science,K.U.Leuven, 1980.
.. [3] Dierckx P. : Curve and surface fitting with splines,
Monographs on Numerical Analysis, Oxford University Press, 1993.
"""
tx, ty, c, kx, ky = tck
if not (0 <= dx < kx):
raise ValueError("0 <= dx = %d < kx = %d must hold" % (dx, kx))
if not (0 <= dy < ky):
raise ValueError("0 <= dy = %d < ky = %d must hold" % (dy, ky))
x, y = map(atleast_1d, [x, y])
if (len(x.shape) != 1) or (len(y.shape) != 1):
raise ValueError("First two entries should be rank-1 arrays.")
z, ier = _fitpack._bispev(tx, ty, c, kx, ky, x, y, dx, dy)
if ier == 10:
raise ValueError("Invalid input data")
if ier:
raise TypeError("An error occurred")
z.shape = len(x), len(y)
if len(z) > 1:
return z
if len(z[0]) > 1:
return z[0]
return z[0][0]
def dblint(xa, xb, ya, yb, tck):
"""Evaluate the integral of a spline over area [xa,xb] x [ya,yb].
Parameters
----------
xa, xb : float
The end-points of the x integration interval.
ya, yb : float
The end-points of the y integration interval.
tck : list [tx, ty, c, kx, ky]
A sequence of length 5 returned by bisplrep containing the knot
locations tx, ty, the coefficients c, and the degrees kx, ky
of the spline.
Returns
-------
integ : float
The value of the resulting integral.
"""
tx, ty, c, kx, ky = tck
return dfitpack.dblint(tx, ty, c, kx, ky, xa, xb, ya, yb)
def insert(x, tck, m=1, per=0):
"""
Insert knots into a B-spline.
Given the knots and coefficients of a B-spline representation, create a
new B-spline with a knot inserted `m` times at point `x`.
This is a wrapper around the FORTRAN routine insert of FITPACK.
Parameters
----------
x (u) : array_like
A 1-D point at which to insert a new knot(s). If `tck` was returned
from ``splprep``, then the parameter values, u should be given.
tck : tuple
A tuple (t,c,k) returned by ``splrep`` or ``splprep`` containing
the vector of knots, the B-spline coefficients,
and the degree of the spline.
m : int, optional
The number of times to insert the given knot (its multiplicity).
Default is 1.
per : int, optional
If non-zero, the input spline is considered periodic.
Returns
-------
tck : tuple
A tuple (t,c,k) containing the vector of knots, the B-spline
coefficients, and the degree of the new spline.
``t(k+1) <= x <= t(n-k)``, where k is the degree of the spline.
In case of a periodic spline (``per != 0``) there must be
either at least k interior knots t(j) satisfying ``t(k+1)<t(j)<=x``
or at least k interior knots t(j) satisfying ``x<=t(j)<t(n-k)``.
Notes
-----
Based on algorithms from [1]_ and [2]_.
References
----------
.. [1] W. Boehm, "Inserting new knots into b-spline curves.",
Computer Aided Design, 12, p.199-201, 1980.
.. [2] P. Dierckx, "Curve and surface fitting with splines, Monographs on
Numerical Analysis", Oxford University Press, 1993.
"""
t, c, k = tck
try:
c[0][0]
parametric = True
except:
parametric = False
if parametric:
cc = []
for c_vals in c:
tt, cc_val, kk = insert(x, [t, c_vals, k], m)
cc.append(cc_val)
return (tt, cc, kk)
else:
tt, cc, ier = _fitpack._insert(per, t, c, k, x, m)
if ier == 10:
raise ValueError("Invalid input data")
if ier:
raise TypeError("An error occurred")
return (tt, cc, k)
def splder(tck, n=1):
"""
Compute the spline representation of the derivative of a given spline
Parameters
----------
tck : tuple of (t, c, k)
Spline whose derivative to compute
n : int, optional
Order of derivative to evaluate. Default: 1
Returns
-------
tck_der : tuple of (t2, c2, k2)
Spline of order k2=k-n representing the derivative
of the input spline.
Notes
-----
.. versionadded:: 0.13.0
See Also
--------
splantider, splev, spalde
Examples
--------
This can be used for finding maxima of a curve:
>>> from scipy.interpolate import splrep, splder, sproot
>>> x = np.linspace(0, 10, 70)
>>> y = np.sin(x)
>>> spl = splrep(x, y, k=4)
Now, differentiate the spline and find the zeros of the
derivative. (NB: `sproot` only works for order 3 splines, so we
fit an order 4 spline):
>>> dspl = splder(spl)
>>> sproot(dspl) / np.pi
array([ 0.50000001, 1.5 , 2.49999998])
This agrees well with roots :math:`\\pi/2 + n\\pi` of
:math:`\\cos(x) = \\sin'(x)`.
"""
if n < 0:
return splantider(tck, -n)
t, c, k = tck
if n > k:
raise ValueError(("Order of derivative (n = %r) must be <= "
"order of spline (k = %r)") % (n, tck[2]))
# Extra axes for the trailing dims of the `c` array:
sh = (slice(None),) + ((None,)*len(c.shape[1:]))
with np.errstate(invalid='raise', divide='raise'):
try:
for j in range(n):
# See e.g. Schumaker, Spline Functions: Basic Theory, Chapter 5
# Compute the denominator in the differentiation formula.
# (and append traling dims, if necessary)
dt = t[k+1:-1] - t[1:-k-1]
dt = dt[sh]
# Compute the new coefficients
c = (c[1:-1-k] - c[:-2-k]) * k / dt
# Pad coefficient array to same size as knots (FITPACK
# convention)
c = np.r_[c, np.zeros((k,) + c.shape[1:])]
# Adjust knots
t = t[1:-1]
k -= 1
except FloatingPointError:
raise ValueError(("The spline has internal repeated knots "
"and is not differentiable %d times") % n)
return t, c, k
def splantider(tck, n=1):
"""
Compute the spline for the antiderivative (integral) of a given spline.
Parameters
----------
tck : tuple of (t, c, k)
Spline whose antiderivative to compute
n : int, optional
Order of antiderivative to evaluate. Default: 1
Returns
-------
tck_ader : tuple of (t2, c2, k2)
Spline of order k2=k+n representing the antiderivative of the input
spline.
See Also
--------
splder, splev, spalde
Notes
-----
The `splder` function is the inverse operation of this function.
Namely, ``splder(splantider(tck))`` is identical to `tck`, modulo
rounding error.
.. versionadded:: 0.13.0
Examples
--------
>>> from scipy.interpolate import splrep, splder, splantider, splev
>>> x = np.linspace(0, np.pi/2, 70)
>>> y = 1 / np.sqrt(1 - 0.8*np.sin(x)**2)
>>> spl = splrep(x, y)
The derivative is the inverse operation of the antiderivative,
although some floating point error accumulates:
>>> splev(1.7, spl), splev(1.7, splder(splantider(spl)))
(array(2.1565429877197317), array(2.1565429877201865))
Antiderivative can be used to evaluate definite integrals:
>>> ispl = splantider(spl)
>>> splev(np.pi/2, ispl) - splev(0, ispl)
2.2572053588768486
This is indeed an approximation to the complete elliptic integral
:math:`K(m) = \\int_0^{\\pi/2} [1 - m\\sin^2 x]^{-1/2} dx`:
>>> from scipy.special import ellipk
>>> ellipk(0.8)
2.2572053268208538
"""
if n < 0:
return splder(tck, -n)
t, c, k = tck
# Extra axes for the trailing dims of the `c` array:
sh = (slice(None),) + (None,)*len(c.shape[1:])
for j in range(n):
# This is the inverse set of operations to splder.
# Compute the multiplier in the antiderivative formula.
dt = t[k+1:] - t[:-k-1]
dt = dt[sh]
# Compute the new coefficients
c = np.cumsum(c[:-k-1] * dt, axis=0) / (k + 1)
c = np.r_[np.zeros((1,) + c.shape[1:]),
c,
[c[-1]] * (k+2)]
# New knots
t = np.r_[t[0], t, t[-1]]
k += 1
return t, c, k
|
bsd-3-clause
|
cider-team-mercury/mercury
|
heat_flux/spherical_harmonics.py
|
1
|
2535
|
import numpy as np
from scipy.special import sph_harm
from mpl_toolkits.basemap import Basemap
import matplotlib.pyplot as plt
from numpy.polynomial.legendre import leggauss
def real_spherical_harmonic( theta, phi, l, m):
assert( m <= l )
assert( l >= 0. )
if m==0:
val = sph_harm(m, l, phi, theta)
return val.real
elif m < 0:
val = 1.0j/np.sqrt(2.) * (sph_harm(m,l,phi, theta)
- np.power(-1., m ) * sph_harm(-m, l, phi, theta) )
return val.real
elif m > 0:
val = 1.0/np.sqrt(2.) * (sph_harm(-m,l,phi, theta)
+ np.power(-1., m ) * sph_harm(m, l, phi, theta) )
return val.real
def spherical_harmonic_transform( func, lmax ):
coeffs = []
glpoints, glweights = leggauss(lmax)
glpoints = np.arccos(glpoints)
fpoints = np.linspace(0.,np.pi*2., lmax+1, endpoint=False)
for l in range(lmax+1):
c = []
for m in range(2*l+1):
mp = m-l
val = 0.
for theta, weight in zip(glpoints, glweights):
for phi in fpoints:
val += weight*real_spherical_harmonic( theta, phi, l, mp)*func(theta,phi)
if val < 1.e-13:
val = 0.
c.append(val)
coeffs.append(c)
return coeffs
def plot_spherical_harmonic_expansion( coeffs, func = None):
res = 40
lats = np.linspace(0.,np.pi, res)
lons = np.linspace(0.,2.*np.pi, 2*res)
LONS, LATS = np.meshgrid(lons, lats)
T = np.empty_like(LATS)
T2 = np.empty_like(LATS)
for l,c in enumerate(coeffs):
assert(len(c) == 2*l+1)
for m,val in enumerate(c):
mp = m-l
for i,theta in enumerate(lats):
for j,phi in enumerate(lons):
T[i,j] += val*real_spherical_harmonic(theta, phi, l, mp)
if(func != None):
T2[i,j] = func(theta, phi)
map = Basemap( projection='hammer', lat_0=30., lon_0=0.)
x,y=map(180.-LONS*180./np.pi, 90.-LATS*180./np.pi)
minval = np.min(T)
maxval = np.max(T)
print "Minimum: ", minval, "Maximum: ", maxval, "Variation: ", (maxval-minval)/minval
if( func == None):
map.pcolor(x, y, T, cmap='Spectral_r')
cb1 = plt.colorbar()
plt.show()
else:
plt.subplot(121)
map.pcolor(x, y, T)
cb1 = plt.colorbar()
plt.subplot(122)
map.pcolor(x, y, T2)
cb2 = plt.colorbar()
plt.show()
|
gpl-2.0
|
sgenoud/scikit-learn
|
examples/linear_model/plot_sgd_penalties.py
|
7
|
1500
|
"""
==============
SGD: Penalties
==============
Plot the contours of the three penalties supported by
`sklearn.linear_model.stochastic_gradient`.
"""
from __future__ import division
print __doc__
import numpy as np
import pylab as pl
def l1(xs):
return np.array([np.sqrt((1 - np.sqrt(x ** 2.0)) ** 2.0) for x in xs])
def l2(xs):
return np.array([np.sqrt(1.0 - x ** 2.0) for x in xs])
def el(xs, z):
return np.array([(2 - 2 * x - 2 * z + 4 * x * z -
(4 * z ** 2
- 8 * x * z ** 2
+ 8 * x ** 2 * z ** 2
- 16 * x ** 2 * z ** 3
+ 8 * x * z ** 3 + 4 * x ** 2 * z ** 4) ** (1. / 2)
- 2 * x * z ** 2) / (2 - 4 * z) for x in xs])
def cross(ext):
pl.plot([-ext, ext], [0, 0], "k-")
pl.plot([0, 0], [-ext, ext], "k-")
xs = np.linspace(0, 1, 100)
alpha = 0.501 # 0.5 division throuh zero
cross(1.2)
pl.plot(xs, l1(xs), "r-", label="L1")
pl.plot(xs, -1.0 * l1(xs), "r-")
pl.plot(-1 * xs, l1(xs), "r-")
pl.plot(-1 * xs, -1.0 * l1(xs), "r-")
pl.plot(xs, l2(xs), "b-", label="L2")
pl.plot(xs, -1.0 * l2(xs), "b-")
pl.plot(-1 * xs, l2(xs), "b-")
pl.plot(-1 * xs, -1.0 * l2(xs), "b-")
pl.plot(xs, el(xs, alpha), "y-", label="Elastic Net")
pl.plot(xs, -1.0 * el(xs, alpha), "y-")
pl.plot(-1 * xs, el(xs, alpha), "y-")
pl.plot(-1 * xs, -1.0 * el(xs, alpha), "y-")
pl.xlabel(r"$w_0$")
pl.ylabel(r"$w_1$")
pl.legend()
pl.axis("equal")
pl.show()
|
bsd-3-clause
|
davidmcclure/textplot
|
textplot/graphs.py
|
1
|
2306
|
import networkx as nx
import matplotlib.pyplot as plt
from abc import ABCMeta, abstractmethod
from clint.textui.progress import bar
class Graph(metaclass=ABCMeta):
def __init__(self):
"""
Initialize the graph.
"""
self.graph = nx.Graph()
@abstractmethod
def build(self):
pass
def draw_spring(self, **kwargs):
"""
Render a spring layout.
"""
nx.draw_spring(
self.graph,
with_labels=True,
font_size=10,
edge_color='#dddddd',
node_size=0,
**kwargs
)
plt.show()
def write_gml(self, path):
"""
Write a GML file.
Args:
path (str): The file path.
"""
nx.write_gml(self.graph, path)
def write_graphml(self, path):
"""
Write a GraphML file.
Args:
path (str): The file path.
"""
nx.write_graphml(self.graph, path)
class Skimmer(Graph):
def build(self, text, matrix, skim_depth=10, d_weights=False):
"""
1. For each term in the passed matrix, score its KDE similarity with
all other indexed terms.
2. With the ordered stack of similarities in hand, skim off the top X
pairs and add them as edges.
Args:
text (Text): The source text instance.
matrix (Matrix): An indexed term matrix.
skim_depth (int): The number of siblings for each term.
d_weights (bool): If true, give "close" words low edge weights.
"""
for anchor in bar(matrix.keys):
n1 = text.unstem(anchor)
# Heaviest pair scores:
pairs = matrix.anchored_pairs(anchor).items()
for term, weight in list(pairs)[:skim_depth]:
# If edges represent distance, use the complement of the raw
# score, so that similar words are connected by "short" edges.
if d_weights: weight = 1-weight
n2 = text.unstem(term)
# NetworkX does not handle numpy types when writing graphml,
# so we cast the weight to a regular float.
self.graph.add_edge(n1, n2, weight=float(weight))
|
mit
|
jhlch/sparklingpandas
|
sparklingpandas/pstatcounter.py
|
4
|
5444
|
"""
This module provides statistics for L{PRDD}s.
Look at the stats() method on PRDD for more info.
"""
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from sparklingpandas.utils import add_pyspark_path
add_pyspark_path()
from pyspark.statcounter import StatCounter
import scipy.stats as scistats
import numpy as np
class PStatCounter(object):
"""
A wrapper around StatCounter which collects stats for multiple columns
"""
def __init__(self, dataframes, columns):
"""
Creates a stats counter for the provided DataFrames
computing the stats for all of the columns in columns.
Parameters
----------
dataframes: list of dataframes, containing the values to compute stats
on.
columns: list of strs, list of columns to compute the stats on.
"""
assert (not isinstance(columns, basestring)), "columns should be a " \
"list of strs, " \
"not a str!"
assert isinstance(columns, list), "columns should be a list!"
self._columns = columns
self._counters = dict((column, StatCounter()) for column in columns)
for df in dataframes:
self.merge(df)
def merge(self, frame):
"""
Add another DataFrame to the PStatCounter.
"""
for column, values in frame.iteritems():
# Temporary hack, fix later
counter = self._counters.get(column)
for value in values:
if counter is not None:
counter.merge(value)
def merge_pstats(self, other):
"""
Merge all of the stats counters of the other PStatCounter with our
counters.
"""
if not isinstance(other, PStatCounter):
raise Exception("Can only merge PStatcounters!")
for column, counter in self._counters.items():
other_counter = other._counters.get(column)
self._counters[column] = counter.mergeStats(other_counter)
return self
def __str__(self):
formatted_str = ""
for column, counter in self._counters.items():
formatted_str += "(field: %s, counters: %s)" % (column, counter)
return formatted_str
def __repr__(self):
return self.__str__()
class ColumnStatCounters(object):
"""
A wrapper around StatCounter which collects stats for multiple columns
"""
def __init__(self, dataframes=None, columns=None):
"""
Creates a stats counter for the provided data frames
computing the stats for all of the columns in columns.
Parameters
----------
dataframes: list of dataframes, containing the values to compute stats
on columns: list of strs, list of columns to compute the stats on
"""
self._column_stats = dict((column_name, StatCounter()) for
column_name in columns)
for single_df in dataframes:
self.merge(single_df)
def merge(self, frame):
"""
Add another DataFrame to the accumulated stats for each column.
Parameters
----------
frame: pandas DataFrame we will update our stats counter with.
"""
for column_name, _ in self._column_stats.items():
data_arr = frame[[column_name]].values
count, min_max_tup, mean, _, _, _ = \
scistats.describe(data_arr)
stats_counter = StatCounter()
stats_counter.n = count
stats_counter.mu = mean
stats_counter.m2 = np.sum((data_arr - mean) ** 2)
stats_counter.minValue, stats_counter.maxValue = min_max_tup
self._column_stats[column_name] = self._column_stats[
column_name].mergeStats(stats_counter)
return self
def merge_stats(self, other_col_counters):
"""
Merge statistics from a different column stats counter in to this one.
Parameters
----------
other_column_counters: Other col_stat_counter to marge in to this one.
"""
for column_name, _ in self._column_stats.items():
self._column_stats[column_name] = self._column_stats[column_name] \
.mergeStats(other_col_counters._column_stats[column_name])
return self
def __str__(self):
formatted_str = ""
for column, counter in self._column_stats.items():
formatted_str += "(field: %s, counters: %s)" % (column, counter)
return formatted_str
def __repr__(self):
return self.__str__()
|
apache-2.0
|
raincoatrun/basemap
|
examples/maskoceans.py
|
4
|
1922
|
from mpl_toolkits.basemap import Basemap, shiftgrid, maskoceans, interp
import numpy as np
import matplotlib.pyplot as plt
# example showing how to mask out 'wet' areas on a contour or pcolor plot.
topodatin = np.loadtxt('etopo20data.gz')
lonsin = np.loadtxt('etopo20lons.gz')
latsin = np.loadtxt('etopo20lats.gz')
# shift data so lons go from -180 to 180 instead of 20 to 380.
topoin,lons1 = shiftgrid(180.,topodatin,lonsin,start=False)
lats1 = latsin
fig=plt.figure()
# setup basemap
m=Basemap(resolution='l',projection='lcc',lon_0=-100,lat_0=40,width=8.e6,height=6.e6)
lons, lats = np.meshgrid(lons1,lats1)
x, y = m(lons, lats)
# interpolate land/sea mask to topo grid, mask ocean values.
# output may look 'blocky' near coastlines, since data is at much
# lower resolution than land/sea mask.
topo = maskoceans(lons, lats, topoin)
# make contour plot (ocean values will be masked)
CS=m.contourf(x,y,topo,np.arange(-300,3001,50),cmap=plt.cm.jet,extend='both')
#im=m.pcolormesh(x,y,topo,cmap=plt.cm.jet,vmin=-300,vmax=3000)
# draw coastlines.
m.drawcoastlines()
plt.title('ETOPO data with marine areas masked (original grid)')
fig=plt.figure()
# interpolate topo data to higher resolution grid (to better match
# the land/sea mask). Output looks less 'blocky' near coastlines.
nlats = 3*topoin.shape[0]
nlons = 3*topoin.shape[1]
lons = np.linspace(-180,180,nlons)
lats = np.linspace(-90,90,nlats)
lons, lats = np.meshgrid(lons, lats)
x, y = m(lons, lats)
topo = interp(topoin,lons1,lats1,lons,lats,order=1)
# interpolate land/sea mask to topo grid, mask ocean values.
topo = maskoceans(lons, lats, topo)
# make contour plot (ocean values will be masked)
CS=m.contourf(x,y,topo,np.arange(-300,3001,50),cmap=plt.cm.jet,extend='both')
#im=m.pcolormesh(x,y,topo,cmap=plt.cm.jet,vmin=-300,vmax=3000)
# draw coastlines.
m.drawcoastlines()
plt.title('ETOPO data with marine areas masked (data on finer grid)')
plt.show()
|
gpl-2.0
|
usc-isi-i2/WEDC
|
wedc/domain/core/ml/classifier/label_propagation/labelprop.py
|
1
|
11209
|
import os
from wedc.domain.core.ml.helper import label
from wedc.domain.core.ml.labelprop import LabelProp
from wedc.domain.core.ml.graph.knn import KNNGraph
from wedc.domain.core.data.seed import seed_vector
#######################################################
# Run Label Propagation
#######################################################
def run(data, labelled_data, n_neighbors=10, iter=100, eps=0.00001):
pid = 1
mapping = {} # from pid to sid
X = []
y = []
pids = []
labelled_pids = []
ans = {}
# load data
for i, kv in enumerate(data):
sid, vector = kv
vector = [float(_) for _ in vector.split()]
if max(vector) == 0:
ans[sid] = [1, 1]
else:
mapping[pid] = sid
pids.append(pid)
X.append(vector)
y.append(0)
pid = pid + 1
# load labelled data
for item in labelled_data:
vector = item[0]
label = item[1]
vector = [float(_) for _ in vector.split()]
if max(vector) == 0:
ans[sid] = [1, 1]
else:
pids.append(pid)
labelled_pids.append(pid)
X.append(vector)
y.append(label)
pid = pid + 1
graph_input = [[pids[_], X[_], y[_]] for _ in range(len(pids))]
graph = KNNGraph().build(graph_input, n_neighbors=n_neighbors)
labelprop = LabelProp()
labelprop.load_data_from_mem(graph)
rtn_lp = labelprop.run(eps, iter, clean_result=True)
rtn_lp = [_ for _ in rtn_lp if _[0] not in labelled_pids] # in order, asc
for preds in rtn_lp:
pid = int(preds[0])
if pid not in mapping:
continue
pred_label = preds[1]
score = preds[2]
ans[mapping[pid]] = [pred_label, score]
return ans
# def run_lp(input, output=None, iter=100, eps=0.00001):
# return lp.run_by_jar(input, output=output, iter=iter, eps=eps)
#######################################################
# Evaluation
#######################################################
def do_evaluation(output_path, num_of_tests=1, test_rate=.9, n_neighbors=10, max_iter=100, tol=0.00001):
import numpy as np
import shutil
from wedc.infrastructure.model.seed_dict import SeedDict
# load data and label
dataset = load_dataset()
# load seeds and generate post vector
# short posts will be removed when load post vectors
seeds = SeedDict.load_data()
dataset = load_post_vectors(dataset, seeds) # short post removed
# load file path
if os.path.isdir(output_path):
shutil.rmtree(output_path)
os.mkdir(output_path)
pid_set = [_[0] for _ in dataset]
random_seeds = np.random.randint(1, 10000000, size=num_of_tests)
# count = 0
ans = []
for i, random_seed in enumerate(random_seeds):
# prepare report env
round_path_ = os.path.join(output_path, 'round_' + str(i+1) + '_random_seed_' + str(random_seed))
os.mkdir(round_path_)
graph_path_ = os.path.join(round_path_, 'graph_knn.txt')
labelprop_path_ = os.path.join(round_path_, 'graph_lp.txt')
report_path_ = os.path.join(round_path_, 'report.txt')
# shuffle post id set to load random data
shuffled_pid_set = list(pid_set)
np.random.seed(random_seed)
np.random.shuffle(shuffled_pid_set)
total_size = len(shuffled_pid_set)
total_testing_items = int(total_size*test_rate)
total_training_items = total_size - total_testing_items
# print total_training_items, '+', total_testing_items, '=', total_size
training_pid_set = shuffled_pid_set[total_testing_items:]
testing_pid_set = shuffled_pid_set[:total_testing_items]
# item[0]: post id
# item[1]: label
# item[2]: vector
# training_set = [[_[0], _[1], _[3]] for _ in dataset if _[0] in training_pid_set]
# testing_set = [[_[0], _[1], _[3]] for _ in dataset if _[0] in testing_pid_set]
# # for X_train, vector formated in a string
training_data = [_[3] for _ in dataset if _[0] in training_pid_set]
# # for y_train, label in int
training_label = [_[1] for _ in dataset if _[0] in training_pid_set]
# # for X_test, vector formated in a string
testing_data = [_[3] for _ in dataset if _[0] in testing_pid_set]
# # for y_test, label in int
testing_label = [_[1] for _ in dataset if _[0] in testing_pid_set]
# prepare X, y for graph
# X = np.array(np.mat(';'.join([_[3] for _ in dataset]))) # in order, asc
# y = np.copy([_[1] for _ in dataset]) # in order, asc
# y[[pid_set.index(_) for _ in testing_pid_set]] = 0 # in order, asc
X = [[float(v) for v in _[3].split()] for _ in dataset] # in order, asc
y = [_[1] for _ in dataset] # in order, asc
for _ in testing_pid_set:
y[pid_set.index(_)] = 0
# item[0]: post id
# item[1]: vector in numpy
# item[2]: label in numpy, filling with 0 for testing data
graph_input = [[pid_set[_], X[_], y[_]] for _ in range(len(pid_set))]
# build knn graph
graph = KNNGraph().build(graph_input, output=graph_path_, n_neighbors=n_neighbors)
# graph = knn.build(graph_input, output=graph_path_, n_neighbors=n_neighbors)
# rtn_lp = run_lp(graph_path_, output=labelprop_path_, iter=max_iter, eps=tol)
labelprop = LabelProp()
labelprop.load_data_from_mem(graph)
rtn_lp = labelprop.run(tol, max_iter, clean_result=True)
# rtn_valid = []
# for line in rtn_lp:
# try:
# score = sum([float(_[1]) for _ in line[2:]])
# if score:
# rtn_valid.append([line[0], line[1], score])
# except Exception as e:
# raise Exception('r')
# rtn_lp = rtn_valid
valid_pid_set = [_[0] for _ in rtn_lp if _[0] in testing_pid_set] # in order, asc
y_predict = [_[1] for _ in rtn_lp if _[0] in valid_pid_set] # in order, asc
y_test = [_[1] for _ in dataset if _[0] in valid_pid_set] # in order, asc
# from sklearn.metrics import accuracy_score
# accuracy = accuracy_score(y_test, y_predict)
# if accuracy < 0.9:
# count += 1
info_data = [[training_pid_set, training_data, training_label], [testing_pid_set, testing_data, testing_label], [len(y), len(valid_pid_set)]]
label_data = [y_test, y_predict, valid_pid_set]
accuracy = generate_report(report_path_, i+1, random_seed, info_data, label_data)
ans.append([i+1, accuracy, info_data, label_data])
return ans
# print 1.*count/num_of_tests
def generate_report(report_path_, round_id, random_seed, info_data, label_data):
from sklearn.metrics import classification_report
from sklearn.metrics import accuracy_score
# info data
training_pid_set = info_data[0][0]
training_data = info_data[0][1]
training_label = info_data[0][2]
testing_pid_set = info_data[1][0]
testing_data = info_data[1][1]
testing_label = info_data[1][2]
size_witout_short_posts = info_data[2][0]
size_valid_lp_pred = info_data[2][0]
# label data
# label_data = sorted(label_data, key=lambda x: x[2])
y_test = label_data[0]
y_predict = label_data[1]
valid_pid_set = label_data[2]
report = classification_report(y_test, y_predict)
accuracy = accuracy_score(y_test, y_predict)
# test only
"""
if accuracy < 0.8:
print 'round_id:', round_id, ', accuracy: ', accuracy
print valid_pid_set
print accuracy
"""
with open(report_path_, 'wb') as rf:
rf.write('+--------------------------------------------------------+\n')
rf.write('| Report |\n')
rf.write('+--------------------------------------------------------+\n\n')
rf.write('total size of posts without short posts: ' + str(size_witout_short_posts) + '\n')
rf.write('valid prediction from label propagation: ' + str(size_valid_lp_pred) + '\n')
rf.write('\n'+ report +'\n')
rf.write('accuracy: ' + str(accuracy) + '\n')
rf.write('y_test:\n')
rf.write(str(y_test) + '\n')
rf.write('y_predict:\n')
rf.write(str(y_predict) + '\n')
rf.write('y_test | y_pred | pid \n')
for i in range(len(y_test)):
rf.write(str(y_test[i]) + ' | ' + str(y_predict[i]) + ' | ' + str(valid_pid_set[i]) + '\n')
rf.write('\n\n\n\n')
rf.write('---- Training ----\n')
rf.write('size: '+str(len(training_pid_set))+'\n')
rf.write('post id set:\n'+str(training_pid_set)+'\n')
rf.write('training labels:\n'+str(training_label)+'\n')
rf.write('label | pid \n')
for i in range(len(training_pid_set)):
rf.write(str(training_label[i]) + ' | ' + str(training_pid_set[i]) + '\n')
rf.write('\n\n\n\n')
rf.write('---- Testing ----\n')
rf.write('size: '+str(len(testing_pid_set))+'\n')
rf.write('post id set:\n'+str(testing_pid_set)+'\n')
rf.write('testing labels:\n'+str(testing_label)+'\n')
rf.write('label | pid \n')
for i in range(len(testing_pid_set)):
rf.write(str(testing_label[i]) + ' | ' + str(testing_pid_set[i]) + '\n')
return accuracy
#######################################################
# Common
#######################################################
def load_dataset():
from wedc.infrastructure.model.labelled_data import LabelledData
# load dataset from database
labelled_dataset = LabelledData.load_data()
dataset = []
for idx, ld in enumerate(labelled_dataset):
# data[0]: post id
# data[1]: data label
# data[2]: data extraction
# data[3]: source id
data = [idx+1, int(ld.label), str(ld.extraction)]
dataset.append(data)
return dataset
def load_post_vectors(dataset, seeds):
# load extractions and generate post vectors
post_vectors = seed_vector.generate_post_vector([_[2] for _ in dataset], seeds)
# add post_id for post vectors
# data[0]: post id
# data[1]: post label
# data[2]: post extration
# data[3]: post vector
[dataset[i].extend([_]) for i, _ in enumerate(post_vectors)]
# refine dataset
# 1. less than 8 extraction words will be removed
# 2. extraction without any seed words will be removed
refined_dataset = []
ext_len_threshold = 8
for data in dataset:
# pid = data[0]
# label = data[1]
# extraction = data[2]
# vector = data[3]
extractions_count = len(data[2].split(' '))
vector_list = [float(_) for _ in data[3].strip().split(' ')]
if extractions_count < ext_len_threshold or max(vector_list) == 0:
continue
else:
refined_dataset.append(data)
return refined_dataset
|
apache-2.0
|
johndpope/tensorflow
|
tensorflow/examples/learn/multiple_gpu.py
|
49
|
3078
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Example of using Estimator with multiple GPUs to distribute one model.
This example only runs if you have multiple GPUs to assign to.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from sklearn import cross_validation
from sklearn import datasets
from sklearn import metrics
import tensorflow as tf
layers = tf.contrib.layers
learn = tf.contrib.learn
def my_model(features, target):
"""DNN with three hidden layers, and dropout of 0.1 probability.
Note: If you want to run this example with multiple GPUs, Cuda Toolkit 7.0 and
CUDNN 6.5 V2 from NVIDIA need to be installed beforehand.
Args:
features: `Tensor` of input features.
target: `Tensor` of targets.
Returns:
Tuple of predictions, loss and training op.
"""
# Convert the target to a one-hot tensor of shape (length of features, 3) and
# with a on-value of 1 for each one-hot vector of length 3.
target = tf.one_hot(target, 3, 1, 0)
# Create three fully connected layers respectively of size 10, 20, and 10 with
# each layer having a dropout probability of 0.1.
normalizer_fn = layers.dropout
normalizer_params = {'keep_prob': 0.5}
with tf.device('/gpu:1'):
features = layers.stack(
features,
layers.fully_connected, [10, 20, 10],
normalizer_fn=normalizer_fn,
normalizer_params=normalizer_params)
with tf.device('/gpu:2'):
# Compute logits (1 per class) and compute loss.
logits = layers.fully_connected(features, 3, activation_fn=None)
loss = tf.losses.softmax_cross_entropy(target, logits)
# Create a tensor for training op.
train_op = tf.contrib.layers.optimize_loss(
loss,
tf.contrib.framework.get_global_step(),
optimizer='Adagrad',
learning_rate=0.1)
return ({
'class': tf.argmax(logits, 1),
'prob': tf.nn.softmax(logits)
}, loss, train_op)
def main(unused_argv):
iris = datasets.load_iris()
x_train, x_test, y_train, y_test = cross_validation.train_test_split(
iris.data, iris.target, test_size=0.2, random_state=42)
classifier = learn.Estimator(model_fn=my_model)
classifier.fit(x_train, y_train, steps=1000)
y_predicted = [
p['class'] for p in classifier.predict(
x_test, as_iterable=True)
]
score = metrics.accuracy_score(y_test, y_predicted)
print('Accuracy: {0:f}'.format(score))
if __name__ == '__main__':
tf.app.run()
|
apache-2.0
|
josemao/nilmtk
|
nilmtk/dataset_converters/iawe/convert_iawe.py
|
6
|
3735
|
from __future__ import print_function, division
import pandas as pd
import numpy as np
from os.path import join, isdir, isfile, dirname, abspath
from os import getcwd
from sys import getfilesystemencoding
from nilmtk.datastore import Key
from nilmtk.measurement import LEVEL_NAMES
from nilmtk.utils import check_directory_exists, get_datastore
from nilm_metadata import convert_yaml_to_hdf5
from inspect import currentframe, getfile, getsourcefile
from copy import deepcopy
def reindex_fill_na(df, idx):
df_copy = deepcopy(df)
df_copy = df_copy.reindex(idx)
power_columns = [
x for x in df.columns if x[0] in ['power']]
non_power_columns = [x for x in df.columns if x not in power_columns]
for power in power_columns:
df_copy[power].fillna(0, inplace=True)
for measurement in non_power_columns:
df_copy[measurement].fillna(
df[measurement].median(), inplace=True)
return df_copy
column_mapping = {
'frequency': ('frequency', ""),
'voltage': ('voltage', ""),
'W': ('power', 'active'),
'energy': ('energy', 'apparent'),
'A': ('current', ''),
'reactive_power': ('power', 'reactive'),
'apparent_power': ('power', 'apparent'),
'power_factor': ('pf', ''),
'PF': ('pf', ''),
'phase_angle': ('phi', ''),
'VA': ('power', 'apparent'),
'VAR': ('power', 'reactive'),
'VLN': ('voltage', ""),
'V': ('voltage', ""),
'f': ('frequency', "")
}
TIMESTAMP_COLUMN_NAME = "timestamp"
TIMEZONE = "Asia/Kolkata"
START_DATETIME, END_DATETIME = '7-13-2013', '8-4-2013'
FREQ = "1T"
def convert_iawe(iawe_path, output_filename, format="HDF"):
"""
Parameters
----------
iawe_path : str
The root path of the iawe dataset.
output_filename : str
The destination filename (including path and suffix).
"""
check_directory_exists(iawe_path)
idx = pd.DatetimeIndex(start=START_DATETIME, end=END_DATETIME, freq=FREQ)
idx = idx.tz_localize('GMT').tz_convert(TIMEZONE)
# Open data store
store = get_datastore(output_filename, format, mode='w')
electricity_path = join(iawe_path, "electricity")
# Mains data
for chan in range(1, 12):
key = Key(building=1, meter=chan)
filename = join(electricity_path, "%d.csv" % chan)
print('Loading ', chan)
df = pd.read_csv(filename)
df.drop_duplicates(subset=["timestamp"], inplace=True)
df.index = pd.to_datetime(df.timestamp.values, unit='s', utc=True)
df = df.tz_convert(TIMEZONE)
df = df.drop(TIMESTAMP_COLUMN_NAME, 1)
df.rename(columns=lambda x: column_mapping[x], inplace=True)
df.columns.set_names(LEVEL_NAMES, inplace=True)
df = df.convert_objects(convert_numeric=True)
df = df.dropna()
df = df.astype(np.float32)
df = df.sort_index()
df = df.resample("1T")
df = reindex_fill_na(df, idx)
assert df.isnull().sum().sum() == 0
store.put(str(key), df)
store.close()
convert_yaml_to_hdf5(join(_get_module_directory(), 'metadata'),
output_filename)
print("Done converting iAWE to HDF5!")
def _get_module_directory():
# Taken from http://stackoverflow.com/a/6098238/732596
path_to_this_file = dirname(getfile(currentframe()))
if not isdir(path_to_this_file):
encoding = getfilesystemencoding()
path_to_this_file = dirname(unicode(__file__, encoding))
if not isdir(path_to_this_file):
abspath(getsourcefile(lambda _: None))
if not isdir(path_to_this_file):
path_to_this_file = getcwd()
assert isdir(path_to_this_file), path_to_this_file + ' is not a directory'
return path_to_this_file
|
apache-2.0
|
CamDavidsonPilon/lifelines
|
lifelines/tests/test_npmle.py
|
1
|
3894
|
# -*- coding: utf-8 -*-
import pytest
from lifelines.fitters.npmle import npmle, is_subset, create_turnbull_intervals, interval, reconstruct_survival_function
from numpy import testing as npt
from lifelines.datasets import load_mice
from lifelines import KaplanMeierFitter
import numpy as np
import pandas as pd
def test_is_subset():
assert is_subset(interval(4, 4), interval(3, 7))
assert is_subset(interval(4, 4), interval(4, 7))
assert is_subset(interval(4, 7), interval(4, 7))
assert is_subset(interval(4, 7), interval(3.9, 7))
assert is_subset(interval(4, np.inf), interval(3, np.inf))
assert not is_subset(interval(4, 9), interval(3.9, 7))
assert not is_subset(interval(2, 9), interval(3.9, 7))
assert not is_subset(interval(4, 9), interval(5, 7))
def test_create_turnbull_intervals():
left, right = zip(*([0, 1], [4, 6], [2, 6], [0, 3], [2, 4], [5, 7]))
assert create_turnbull_intervals(left, right) == [interval(0, 1), interval(2, 3), interval(4, 4), interval(5, 6)]
left, right = zip(*[(0, 1)])
assert create_turnbull_intervals(left, right) == [interval(0, 1)]
left, right = zip(*[(-1, 1)])
assert create_turnbull_intervals(left, right) == [interval(-1, 1)]
left, right = zip(*[(-1, 1), (-2, 2)])
assert create_turnbull_intervals(left, right) == [interval(-1, 1)]
def test_npmle():
left, right = [1, 8, 8, 7, 7, 17, 37, 46, 46, 45], [7, 8, 10, 16, 14, np.inf, 44, np.inf, np.inf, np.inf]
npt.assert_allclose(npmle(left, right, verbose=True)[0], np.array([0.16667016, 0.33332984, 0.125, 0.375]), rtol=1e-4)
def test_npmle_with_weights_is_identical_if_uniform_weights():
left, right = [1, 8, 8, 7, 7, 17, 37, 46, 46, 45], [7, 8, 10, 16, 14, np.inf, 44, np.inf, np.inf, np.inf]
weights = 2 * np.ones_like(right)
npt.assert_allclose(npmle(left, right, verbose=True)[0], np.array([0.16667016, 0.33332984, 0.125, 0.375]), rtol=1e-4)
def test_npmle_with_weights():
sol = np.array([0.2051282, 0.4102564, 0.0961539, 0.2884615])
left, right = [1, 8, 8, 7, 7, 17, 37, 46, 46, 45], [7, 8, 10, 16, 14, np.inf, 44, np.inf, np.inf, np.inf]
weights = np.array([2, 2, 2, 1, 1, 1, 1, 1, 1, 1])
npt.assert_allclose(npmle(left, right, weights=weights)[0], sol, rtol=1e-4)
left, right = [1, 1, 8, 8, 8, 8, 7, 7, 17, 37, 46, 46, 45], [7, 7, 8, 8, 10, 10, 16, 14, np.inf, 44, np.inf, np.inf, np.inf]
npt.assert_allclose(npmle(left, right)[0], sol, rtol=1e-4)
def test_sf_doesnt_return_nans():
left = [6, 7, 8, 7, 5]
right = [7, 8, 10, 16, 20]
results = npmle(left, right)
npt.assert_allclose(results[1], [interval(7, 7), interval(8, 8)])
npt.assert_allclose(results[0], [0.5, 0.5])
sf = reconstruct_survival_function(*results, timeline=[6, 7, 8, 16, 20])
assert not np.isnan(sf.values).any()
def test_mice_and_optimization_flag():
df = load_mice()
results = npmle(df["l"], df["u"], verbose=True, optimize=True)
npt.assert_allclose(results[0][0], 1 - 0.8571429, rtol=1e-4)
npt.assert_allclose(results[0][-1], 0.166667, rtol=1e-4)
def test_mice_scipy():
df = load_mice()
results = npmle(df["l"], df["u"], verbose=True, fit_method="scipy")
npt.assert_allclose(results[0][0], 1 - 0.8571429, rtol=1e-4)
npt.assert_allclose(results[0][-1], 0.166667, rtol=1e-4)
def test_max_lower_bound_less_than_min_upper_bound():
# https://github.com/CamDavidsonPilon/lifelines/issues/1151
import numpy as np
import pandas as pd
from lifelines import KaplanMeierFitter
# Data
np.random.seed(1)
left0 = np.random.normal(loc=60, scale=2, size=20)
add_time = np.random.normal(loc=100, scale=2, size=10)
right1 = left0[0:10] + add_time
right0 = right1.tolist() + [np.inf] * 10
# KaplanMeier
model = KaplanMeierFitter()
model.fit_interval_censoring(lower_bound=left0, upper_bound=right0)
|
mit
|
akloster/bokeh
|
bokeh/charts/builder/donut_builder.py
|
31
|
8206
|
"""This is the Bokeh charts interface. It gives you a high level API to build
complex plot is a simple way.
This is the Donut class which lets you build your Donut charts just passing
the arguments to the Chart class and calling the proper functions.
It also add a new chained stacked method.
"""
#-----------------------------------------------------------------------------
# Copyright (c) 2012 - 2014, Continuum Analytics, Inc. All rights reserved.
#
# Powered by the Bokeh Development Team.
#
# The full license is in the file LICENSE.txt, distributed with this software.
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Imports
#-----------------------------------------------------------------------------
from __future__ import absolute_import, division
from math import pi
import pandas as pd
from ..utils import cycle_colors, polar_to_cartesian
from .._builder import Builder, create_and_build
from ...models import ColumnDataSource, GlyphRenderer, Range1d
from ...models.glyphs import AnnularWedge, Text, Wedge
from ...properties import Any, Bool, Either, List
#-----------------------------------------------------------------------------
# Classes and functions
#-----------------------------------------------------------------------------
def Donut(values, cat=None, width=800, height=800, xgrid=False, ygrid=False, **kws):
""" Creates a Donut chart using :class:`DonutBuilder <bokeh.charts.builder.donut_builder.DonutBuilder>`
to render the geometry from values and cat.
Args:
values (iterable): iterable 2d representing the data series
values matrix.
cat (list or bool, optional): list of string representing the categories.
Defaults to None.
In addition the the parameters specific to this chart,
:ref:`userguide_charts_generic_arguments` are also accepted as keyword parameters.
Returns:
a new :class:`Chart <bokeh.charts.Chart>`
Examples:
.. bokeh-plot::
:source-position: above
from bokeh.charts import Donut, output_file, show
# dict, OrderedDict, lists, arrays and DataFrames are valid inputs
xyvalues = [[2., 5., 3.], [4., 1., 4.], [6., 4., 3.]]
donut = Donut(xyvalues, ['cpu1', 'cpu2', 'cpu3'])
output_file('donut.html')
show(donut)
"""
return create_and_build(
DonutBuilder, values, cat=cat, width=width, height=height,
xgrid=xgrid, ygrid=ygrid, **kws
)
class DonutBuilder(Builder):
"""This is the Donut class and it is in charge of plotting
Donut chart in an easy and intuitive way.
Essentially, it provides a way to ingest the data, make the proper
calculations and push the references into a source object.
We additionally make calculations for the donut slices and angles.
And finally add the needed glyphs (Wedges and AnnularWedges) taking
the references from the source.
"""
cat = Either(Bool, List(Any), help="""
List of string representing the categories. (Defaults to None.)
""")
def _process_data(self):
"""Take the chart data from self._values.
It calculates the chart properties accordingly (start/end angles).
Then build a dict containing references to all the calculated
points to be used by the Wedge glyph inside the ``_yield_renderers`` method.
"""
dd = dict(zip(self._values.keys(), self._values.values()))
self._df = df = pd.DataFrame(dd)
self._groups = df.index = self.cat
df.columns = self._values.keys()
# Get the sum per category
aggregated = df.T.sum()
# Get the total (sum of all categories)
self._total_units = total = aggregated.sum()
radians = lambda x: 2*pi*(x/total)
angles = aggregated.map(radians).cumsum()
end_angles = angles.tolist()
start_angles = [0] + end_angles[:-1]
colors = cycle_colors(self.cat, self.palette)
self.set_and_get("", "colors", colors)
self.set_and_get("", "end", end_angles)
self.set_and_get("", "start", start_angles)
def _set_sources(self):
"""Push the Donut data into the ColumnDataSource and calculate
the proper ranges.
"""
self._source = ColumnDataSource(self._data)
self.x_range = Range1d(start=-2, end=2)
self.y_range = Range1d(start=-2, end=2)
def draw_central_wedge(self):
"""Draw the central part of the donut wedge from donut.source and
its calculated start and end angles.
"""
glyph = Wedge(
x=0, y=0, radius=1, start_angle="start", end_angle="end",
line_color="white", line_width=2, fill_color="colors"
)
yield GlyphRenderer(data_source=self._source, glyph=glyph)
def draw_central_descriptions(self):
"""Draw the descriptions to be placed on the central part of the
donut wedge
"""
text = ["%s" % cat for cat in self.cat]
x, y = polar_to_cartesian(0.7, self._data["start"], self._data["end"])
text_source = ColumnDataSource(dict(text=text, x=x, y=y))
glyph = Text(
x="x", y="y", text="text",
text_align="center", text_baseline="middle"
)
yield GlyphRenderer(data_source=text_source, glyph=glyph)
def draw_external_ring(self, colors=None):
"""Draw the external part of the donut wedge from donut.source
and its related descriptions
"""
if colors is None:
colors = cycle_colors(self.cat, self.palette)
first = True
for i, (cat, start_angle, end_angle) in enumerate(zip(
self.cat, self._data['start'], self._data['end'])):
details = self._df.ix[i]
radians = lambda x: 2*pi*(x/self._total_units)
angles = details.map(radians).cumsum() + start_angle
end = angles.tolist() + [end_angle]
start = [start_angle] + end[:-1]
base_color = colors[i]
#fill = [ base_color.lighten(i*0.05) for i in range(len(details) + 1) ]
fill = [base_color for i in range(len(details) + 1)]
text = [rowlabel for rowlabel in details.index]
x, y = polar_to_cartesian(1.25, start, end)
source = ColumnDataSource(dict(start=start, end=end, fill=fill))
glyph = AnnularWedge(
x=0, y=0, inner_radius=1, outer_radius=1.5,
start_angle="start", end_angle="end",
line_color="white", line_width=2,
fill_color="fill"
)
yield GlyphRenderer(data_source=source, glyph=glyph)
text_angle = [(start[i]+end[i])/2 for i in range(len(start))]
text_angle = [angle + pi if pi/2 < angle < 3*pi/2 else angle
for angle in text_angle]
if first and text:
text.insert(0, '')
offset = pi / 48
text_angle.insert(0, text_angle[0] - offset)
start.insert(0, start[0] - offset)
end.insert(0, end[0] - offset)
x, y = polar_to_cartesian(1.25, start, end)
first = False
data = dict(text=text, x=x, y=y, angle=text_angle)
text_source = ColumnDataSource(data)
glyph = Text(
x="x", y="y", text="text", angle="angle",
text_align="center", text_baseline="middle"
)
yield GlyphRenderer(data_source=text_source, glyph=glyph)
def _yield_renderers(self):
"""Use the AnnularWedge and Wedge glyphs to display the wedges.
Takes reference points from data loaded at the ColumnDataSurce.
"""
# build the central round area of the donut
renderers = []
renderers += self.draw_central_wedge()
# write central descriptions
renderers += self.draw_central_descriptions()
# build external donut ring
renderers += self.draw_external_ring()
return renderers
|
bsd-3-clause
|
Adai0808/scikit-learn
|
sklearn/externals/joblib/parallel.py
|
86
|
35087
|
"""
Helpers for embarrassingly parallel code.
"""
# Author: Gael Varoquaux < gael dot varoquaux at normalesup dot org >
# Copyright: 2010, Gael Varoquaux
# License: BSD 3 clause
from __future__ import division
import os
import sys
import gc
import warnings
from math import sqrt
import functools
import time
import threading
import itertools
from numbers import Integral
try:
import cPickle as pickle
except:
import pickle
from ._multiprocessing_helpers import mp
if mp is not None:
from .pool import MemmapingPool
from multiprocessing.pool import ThreadPool
from .format_stack import format_exc, format_outer_frames
from .logger import Logger, short_format_time
from .my_exceptions import TransportableException, _mk_exception
from .disk import memstr_to_kbytes
from ._compat import _basestring
VALID_BACKENDS = ['multiprocessing', 'threading']
# Environment variables to protect against bad situations when nesting
JOBLIB_SPAWNED_PROCESS = "__JOBLIB_SPAWNED_PARALLEL__"
# In seconds, should be big enough to hide multiprocessing dispatching
# overhead.
# This settings was found by running benchmarks/bench_auto_batching.py
# with various parameters on various platforms.
MIN_IDEAL_BATCH_DURATION = .2
# Should not be too high to avoid stragglers: long jobs running alone
# on a single worker while other workers have no work to process any more.
MAX_IDEAL_BATCH_DURATION = 2
class BatchedCalls(object):
"""Wrap a sequence of (func, args, kwargs) tuples as a single callable"""
def __init__(self, iterator_slice):
self.items = list(iterator_slice)
self._size = len(self.items)
def __call__(self):
return [func(*args, **kwargs) for func, args, kwargs in self.items]
def __len__(self):
return self._size
###############################################################################
# CPU count that works also when multiprocessing has been disabled via
# the JOBLIB_MULTIPROCESSING environment variable
def cpu_count():
""" Return the number of CPUs.
"""
if mp is None:
return 1
return mp.cpu_count()
###############################################################################
# For verbosity
def _verbosity_filter(index, verbose):
""" Returns False for indices increasingly apart, the distance
depending on the value of verbose.
We use a lag increasing as the square of index
"""
if not verbose:
return True
elif verbose > 10:
return False
if index == 0:
return False
verbose = .5 * (11 - verbose) ** 2
scale = sqrt(index / verbose)
next_scale = sqrt((index + 1) / verbose)
return (int(next_scale) == int(scale))
###############################################################################
class WorkerInterrupt(Exception):
""" An exception that is not KeyboardInterrupt to allow subprocesses
to be interrupted.
"""
pass
###############################################################################
class SafeFunction(object):
""" Wraps a function to make it exception with full traceback in
their representation.
Useful for parallel computing with multiprocessing, for which
exceptions cannot be captured.
"""
def __init__(self, func):
self.func = func
def __call__(self, *args, **kwargs):
try:
return self.func(*args, **kwargs)
except KeyboardInterrupt:
# We capture the KeyboardInterrupt and reraise it as
# something different, as multiprocessing does not
# interrupt processing for a KeyboardInterrupt
raise WorkerInterrupt()
except:
e_type, e_value, e_tb = sys.exc_info()
text = format_exc(e_type, e_value, e_tb, context=10,
tb_offset=1)
if issubclass(e_type, TransportableException):
raise
else:
raise TransportableException(text, e_type)
###############################################################################
def delayed(function, check_pickle=True):
"""Decorator used to capture the arguments of a function.
Pass `check_pickle=False` when:
- performing a possibly repeated check is too costly and has been done
already once outside of the call to delayed.
- when used in conjunction `Parallel(backend='threading')`.
"""
# Try to pickle the input function, to catch the problems early when
# using with multiprocessing:
if check_pickle:
pickle.dumps(function)
def delayed_function(*args, **kwargs):
return function, args, kwargs
try:
delayed_function = functools.wraps(function)(delayed_function)
except AttributeError:
" functools.wraps fails on some callable objects "
return delayed_function
###############################################################################
class ImmediateComputeBatch(object):
"""Sequential computation of a batch of tasks.
This replicates the async computation API but actually does not delay
the computations when joblib.Parallel runs in sequential mode.
"""
def __init__(self, batch):
# Don't delay the application, to avoid keeping the input
# arguments in memory
self.results = batch()
def get(self):
return self.results
###############################################################################
class BatchCompletionCallBack(object):
"""Callback used by joblib.Parallel's multiprocessing backend.
This callable is executed by the parent process whenever a worker process
has returned the results of a batch of tasks.
It is used for progress reporting, to update estimate of the batch
processing duration and to schedule the next batch of tasks to be
processed.
"""
def __init__(self, dispatch_timestamp, batch_size, parallel):
self.dispatch_timestamp = dispatch_timestamp
self.batch_size = batch_size
self.parallel = parallel
def __call__(self, out):
self.parallel.n_completed_tasks += self.batch_size
this_batch_duration = time.time() - self.dispatch_timestamp
if (self.parallel.batch_size == 'auto'
and self.batch_size == self.parallel._effective_batch_size):
# Update the smoothed streaming estimate of the duration of a batch
# from dispatch to completion
old_duration = self.parallel._smoothed_batch_duration
if old_duration == 0:
# First record of duration for this batch size after the last
# reset.
new_duration = this_batch_duration
else:
# Update the exponentially weighted average of the duration of
# batch for the current effective size.
new_duration = 0.8 * old_duration + 0.2 * this_batch_duration
self.parallel._smoothed_batch_duration = new_duration
self.parallel.print_progress()
if self.parallel._original_iterator is not None:
self.parallel.dispatch_next()
###############################################################################
class Parallel(Logger):
''' Helper class for readable parallel mapping.
Parameters
-----------
n_jobs: int, default: 1
The maximum number of concurrently running jobs, such as the number
of Python worker processes when backend="multiprocessing"
or the size of the thread-pool when backend="threading".
If -1 all CPUs are used. If 1 is given, no parallel computing code
is used at all, which is useful for debugging. For n_jobs below -1,
(n_cpus + 1 + n_jobs) are used. Thus for n_jobs = -2, all
CPUs but one are used.
backend: str or None, default: 'multiprocessing'
Specify the parallelization backend implementation.
Supported backends are:
- "multiprocessing" used by default, can induce some
communication and memory overhead when exchanging input and
output data with the with the worker Python processes.
- "threading" is a very low-overhead backend but it suffers
from the Python Global Interpreter Lock if the called function
relies a lot on Python objects. "threading" is mostly useful
when the execution bottleneck is a compiled extension that
explicitly releases the GIL (for instance a Cython loop wrapped
in a "with nogil" block or an expensive call to a library such
as NumPy).
verbose: int, optional
The verbosity level: if non zero, progress messages are
printed. Above 50, the output is sent to stdout.
The frequency of the messages increases with the verbosity level.
If it more than 10, all iterations are reported.
pre_dispatch: {'all', integer, or expression, as in '3*n_jobs'}
The number of batches (of tasks) to be pre-dispatched.
Default is '2*n_jobs'. When batch_size="auto" this is reasonable
default and the multiprocessing workers shoud never starve.
batch_size: int or 'auto', default: 'auto'
The number of atomic tasks to dispatch at once to each
worker. When individual evaluations are very fast, multiprocessing
can be slower than sequential computation because of the overhead.
Batching fast computations together can mitigate this.
The ``'auto'`` strategy keeps track of the time it takes for a batch
to complete, and dynamically adjusts the batch size to keep the time
on the order of half a second, using a heuristic. The initial batch
size is 1.
``batch_size="auto"`` with ``backend="threading"`` will dispatch
batches of a single task at a time as the threading backend has
very little overhead and using larger batch size has not proved to
bring any gain in that case.
temp_folder: str, optional
Folder to be used by the pool for memmaping large arrays
for sharing memory with worker processes. If None, this will try in
order:
- a folder pointed by the JOBLIB_TEMP_FOLDER environment variable,
- /dev/shm if the folder exists and is writable: this is a RAMdisk
filesystem available by default on modern Linux distributions,
- the default system temporary folder that can be overridden
with TMP, TMPDIR or TEMP environment variables, typically /tmp
under Unix operating systems.
Only active when backend="multiprocessing".
max_nbytes int, str, or None, optional, 1M by default
Threshold on the size of arrays passed to the workers that
triggers automated memory mapping in temp_folder. Can be an int
in Bytes, or a human-readable string, e.g., '1M' for 1 megabyte.
Use None to disable memmaping of large arrays.
Only active when backend="multiprocessing".
Notes
-----
This object uses the multiprocessing module to compute in
parallel the application of a function to many different
arguments. The main functionality it brings in addition to
using the raw multiprocessing API are (see examples for details):
* More readable code, in particular since it avoids
constructing list of arguments.
* Easier debugging:
- informative tracebacks even when the error happens on
the client side
- using 'n_jobs=1' enables to turn off parallel computing
for debugging without changing the codepath
- early capture of pickling errors
* An optional progress meter.
* Interruption of multiprocesses jobs with 'Ctrl-C'
* Flexible pickling control for the communication to and from
the worker processes.
* Ability to use shared memory efficiently with worker
processes for large numpy-based datastructures.
Examples
--------
A simple example:
>>> from math import sqrt
>>> from sklearn.externals.joblib import Parallel, delayed
>>> Parallel(n_jobs=1)(delayed(sqrt)(i**2) for i in range(10))
[0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0]
Reshaping the output when the function has several return
values:
>>> from math import modf
>>> from sklearn.externals.joblib import Parallel, delayed
>>> r = Parallel(n_jobs=1)(delayed(modf)(i/2.) for i in range(10))
>>> res, i = zip(*r)
>>> res
(0.0, 0.5, 0.0, 0.5, 0.0, 0.5, 0.0, 0.5, 0.0, 0.5)
>>> i
(0.0, 0.0, 1.0, 1.0, 2.0, 2.0, 3.0, 3.0, 4.0, 4.0)
The progress meter: the higher the value of `verbose`, the more
messages::
>>> from time import sleep
>>> from sklearn.externals.joblib import Parallel, delayed
>>> r = Parallel(n_jobs=2, verbose=5)(delayed(sleep)(.1) for _ in range(10)) #doctest: +SKIP
[Parallel(n_jobs=2)]: Done 1 out of 10 | elapsed: 0.1s remaining: 0.9s
[Parallel(n_jobs=2)]: Done 3 out of 10 | elapsed: 0.2s remaining: 0.5s
[Parallel(n_jobs=2)]: Done 6 out of 10 | elapsed: 0.3s remaining: 0.2s
[Parallel(n_jobs=2)]: Done 9 out of 10 | elapsed: 0.5s remaining: 0.1s
[Parallel(n_jobs=2)]: Done 10 out of 10 | elapsed: 0.5s finished
Traceback example, note how the line of the error is indicated
as well as the values of the parameter passed to the function that
triggered the exception, even though the traceback happens in the
child process::
>>> from heapq import nlargest
>>> from sklearn.externals.joblib import Parallel, delayed
>>> Parallel(n_jobs=2)(delayed(nlargest)(2, n) for n in (range(4), 'abcde', 3)) #doctest: +SKIP
#...
---------------------------------------------------------------------------
Sub-process traceback:
---------------------------------------------------------------------------
TypeError Mon Nov 12 11:37:46 2012
PID: 12934 Python 2.7.3: /usr/bin/python
...........................................................................
/usr/lib/python2.7/heapq.pyc in nlargest(n=2, iterable=3, key=None)
419 if n >= size:
420 return sorted(iterable, key=key, reverse=True)[:n]
421
422 # When key is none, use simpler decoration
423 if key is None:
--> 424 it = izip(iterable, count(0,-1)) # decorate
425 result = _nlargest(n, it)
426 return map(itemgetter(0), result) # undecorate
427
428 # General case, slowest method
TypeError: izip argument #1 must support iteration
___________________________________________________________________________
Using pre_dispatch in a producer/consumer situation, where the
data is generated on the fly. Note how the producer is first
called a 3 times before the parallel loop is initiated, and then
called to generate new data on the fly. In this case the total
number of iterations cannot be reported in the progress messages::
>>> from math import sqrt
>>> from sklearn.externals.joblib import Parallel, delayed
>>> def producer():
... for i in range(6):
... print('Produced %s' % i)
... yield i
>>> out = Parallel(n_jobs=2, verbose=100, pre_dispatch='1.5*n_jobs')(
... delayed(sqrt)(i) for i in producer()) #doctest: +SKIP
Produced 0
Produced 1
Produced 2
[Parallel(n_jobs=2)]: Done 1 jobs | elapsed: 0.0s
Produced 3
[Parallel(n_jobs=2)]: Done 2 jobs | elapsed: 0.0s
Produced 4
[Parallel(n_jobs=2)]: Done 3 jobs | elapsed: 0.0s
Produced 5
[Parallel(n_jobs=2)]: Done 4 jobs | elapsed: 0.0s
[Parallel(n_jobs=2)]: Done 5 out of 6 | elapsed: 0.0s remaining: 0.0s
[Parallel(n_jobs=2)]: Done 6 out of 6 | elapsed: 0.0s finished
'''
def __init__(self, n_jobs=1, backend='multiprocessing', verbose=0,
pre_dispatch='2 * n_jobs', batch_size='auto', temp_folder=None,
max_nbytes='1M', mmap_mode='r'):
self.verbose = verbose
self._mp_context = None
if backend is None:
# `backend=None` was supported in 0.8.2 with this effect
backend = "multiprocessing"
elif hasattr(backend, 'Pool') and hasattr(backend, 'Lock'):
# Make it possible to pass a custom multiprocessing context as
# backend to change the start method to forkserver or spawn or
# preload modules on the forkserver helper process.
self._mp_context = backend
backend = "multiprocessing"
if backend not in VALID_BACKENDS:
raise ValueError("Invalid backend: %s, expected one of %r"
% (backend, VALID_BACKENDS))
self.backend = backend
self.n_jobs = n_jobs
if (batch_size == 'auto'
or isinstance(batch_size, Integral) and batch_size > 0):
self.batch_size = batch_size
else:
raise ValueError(
"batch_size must be 'auto' or a positive integer, got: %r"
% batch_size)
self.pre_dispatch = pre_dispatch
self._temp_folder = temp_folder
if isinstance(max_nbytes, _basestring):
self._max_nbytes = 1024 * memstr_to_kbytes(max_nbytes)
else:
self._max_nbytes = max_nbytes
self._mmap_mode = mmap_mode
# Not starting the pool in the __init__ is a design decision, to be
# able to close it ASAP, and not burden the user with closing it
# unless they choose to use the context manager API with a with block.
self._pool = None
self._output = None
self._jobs = list()
self._managed_pool = False
# This lock is used coordinate the main thread of this process with
# the async callback thread of our the pool.
self._lock = threading.Lock()
def __enter__(self):
self._managed_pool = True
self._initialize_pool()
return self
def __exit__(self, exc_type, exc_value, traceback):
self._terminate_pool()
self._managed_pool = False
def _effective_n_jobs(self):
n_jobs = self.n_jobs
if n_jobs == 0:
raise ValueError('n_jobs == 0 in Parallel has no meaning')
elif mp is None or n_jobs is None:
# multiprocessing is not available or disabled, fallback
# to sequential mode
return 1
elif n_jobs < 0:
n_jobs = max(mp.cpu_count() + 1 + n_jobs, 1)
return n_jobs
def _initialize_pool(self):
"""Build a process or thread pool and return the number of workers"""
n_jobs = self._effective_n_jobs()
# The list of exceptions that we will capture
self.exceptions = [TransportableException]
if n_jobs == 1:
# Sequential mode: do not use a pool instance to avoid any
# useless dispatching overhead
self._pool = None
elif self.backend == 'threading':
self._pool = ThreadPool(n_jobs)
elif self.backend == 'multiprocessing':
if mp.current_process().daemon:
# Daemonic processes cannot have children
self._pool = None
warnings.warn(
'Multiprocessing-backed parallel loops cannot be nested,'
' setting n_jobs=1',
stacklevel=3)
return 1
elif threading.current_thread().name != 'MainThread':
# Prevent posix fork inside in non-main posix threads
self._pool = None
warnings.warn(
'Multiprocessing backed parallel loops cannot be nested'
' below threads, setting n_jobs=1',
stacklevel=3)
return 1
else:
already_forked = int(os.environ.get(JOBLIB_SPAWNED_PROCESS, 0))
if already_forked:
raise ImportError('[joblib] Attempting to do parallel computing '
'without protecting your import on a system that does '
'not support forking. To use parallel-computing in a '
'script, you must protect your main loop using "if '
"__name__ == '__main__'"
'". Please see the joblib documentation on Parallel '
'for more information'
)
# Set an environment variable to avoid infinite loops
os.environ[JOBLIB_SPAWNED_PROCESS] = '1'
# Make sure to free as much memory as possible before forking
gc.collect()
poolargs = dict(
max_nbytes=self._max_nbytes,
mmap_mode=self._mmap_mode,
temp_folder=self._temp_folder,
verbose=max(0, self.verbose - 50),
context_id=0, # the pool is used only for one call
)
if self._mp_context is not None:
# Use Python 3.4+ multiprocessing context isolation
poolargs['context'] = self._mp_context
self._pool = MemmapingPool(n_jobs, **poolargs)
# We are using multiprocessing, we also want to capture
# KeyboardInterrupts
self.exceptions.extend([KeyboardInterrupt, WorkerInterrupt])
else:
raise ValueError("Unsupported backend: %s" % self.backend)
return n_jobs
def _terminate_pool(self):
if self._pool is not None:
self._pool.close()
self._pool.terminate() # terminate does a join()
self._pool = None
if self.backend == 'multiprocessing':
os.environ.pop(JOBLIB_SPAWNED_PROCESS, 0)
def _dispatch(self, batch):
"""Queue the batch for computing, with or without multiprocessing
WARNING: this method is not thread-safe: it should be only called
indirectly via dispatch_one_batch.
"""
# If job.get() catches an exception, it closes the queue:
if self._aborting:
return
if self._pool is None:
job = ImmediateComputeBatch(batch)
self._jobs.append(job)
self.n_dispatched_batches += 1
self.n_dispatched_tasks += len(batch)
self.n_completed_tasks += len(batch)
if not _verbosity_filter(self.n_dispatched_batches, self.verbose):
self._print('Done %3i tasks | elapsed: %s',
(self.n_completed_tasks,
short_format_time(time.time() - self._start_time)
))
else:
dispatch_timestamp = time.time()
cb = BatchCompletionCallBack(dispatch_timestamp, len(batch), self)
job = self._pool.apply_async(SafeFunction(batch), callback=cb)
self._jobs.append(job)
self.n_dispatched_tasks += len(batch)
self.n_dispatched_batches += 1
def dispatch_next(self):
"""Dispatch more data for parallel processing
This method is meant to be called concurrently by the multiprocessing
callback. We rely on the thread-safety of dispatch_one_batch to protect
against concurrent consumption of the unprotected iterator.
"""
if not self.dispatch_one_batch(self._original_iterator):
self._iterating = False
self._original_iterator = None
def dispatch_one_batch(self, iterator):
"""Prefetch the tasks for the next batch and dispatch them.
The effective size of the batch is computed here.
If there are no more jobs to dispatch, return False, else return True.
The iterator consumption and dispatching is protected by the same
lock so calling this function should be thread safe.
"""
if self.batch_size == 'auto' and self.backend == 'threading':
# Batching is never beneficial with the threading backend
batch_size = 1
elif self.batch_size == 'auto':
old_batch_size = self._effective_batch_size
batch_duration = self._smoothed_batch_duration
if (batch_duration > 0 and
batch_duration < MIN_IDEAL_BATCH_DURATION):
# The current batch size is too small: the duration of the
# processing of a batch of task is not large enough to hide
# the scheduling overhead.
ideal_batch_size = int(
old_batch_size * MIN_IDEAL_BATCH_DURATION / batch_duration)
# Multiply by two to limit oscilations between min and max.
batch_size = max(2 * ideal_batch_size, 1)
self._effective_batch_size = batch_size
if self.verbose >= 10:
self._print("Batch computation too fast (%.4fs.) "
"Setting batch_size=%d.", (
batch_duration, batch_size))
elif (batch_duration > MAX_IDEAL_BATCH_DURATION and
old_batch_size >= 2):
# The current batch size is too big. If we schedule overly long
# running batches some CPUs might wait with nothing left to do
# while a couple of CPUs a left processing a few long running
# batches. Better reduce the batch size a bit to limit the
# likelihood of scheduling such stragglers.
self._effective_batch_size = batch_size = old_batch_size // 2
if self.verbose >= 10:
self._print("Batch computation too slow (%.2fs.) "
"Setting batch_size=%d.", (
batch_duration, batch_size))
else:
# No batch size adjustment
batch_size = old_batch_size
if batch_size != old_batch_size:
# Reset estimation of the smoothed mean batch duration: this
# estimate is updated in the multiprocessing apply_async
# CallBack as long as the batch_size is constant. Therefore
# we need to reset the estimate whenever we re-tune the batch
# size.
self._smoothed_batch_duration = 0
else:
# Fixed batch size strategy
batch_size = self.batch_size
with self._lock:
tasks = BatchedCalls(itertools.islice(iterator, batch_size))
if not tasks:
# No more tasks available in the iterator: tell caller to stop.
return False
else:
self._dispatch(tasks)
return True
def _print(self, msg, msg_args):
"""Display the message on stout or stderr depending on verbosity"""
# XXX: Not using the logger framework: need to
# learn to use logger better.
if not self.verbose:
return
if self.verbose < 50:
writer = sys.stderr.write
else:
writer = sys.stdout.write
msg = msg % msg_args
writer('[%s]: %s\n' % (self, msg))
def print_progress(self):
"""Display the process of the parallel execution only a fraction
of time, controlled by self.verbose.
"""
if not self.verbose:
return
elapsed_time = time.time() - self._start_time
# This is heuristic code to print only 'verbose' times a messages
# The challenge is that we may not know the queue length
if self._original_iterator:
if _verbosity_filter(self.n_dispatched_batches, self.verbose):
return
self._print('Done %3i tasks | elapsed: %s',
(self.n_completed_tasks,
short_format_time(elapsed_time),
))
else:
index = self.n_dispatched_batches
# We are finished dispatching
total_tasks = self.n_dispatched_tasks
# We always display the first loop
if not index == 0:
# Display depending on the number of remaining items
# A message as soon as we finish dispatching, cursor is 0
cursor = (total_tasks - index + 1
- self._pre_dispatch_amount)
frequency = (total_tasks // self.verbose) + 1
is_last_item = (index + 1 == total_tasks)
if (is_last_item or cursor % frequency):
return
remaining_time = (elapsed_time / (index + 1) *
(self.n_dispatched_tasks - index - 1.))
self._print('Done %3i out of %3i | elapsed: %s remaining: %s',
(index + 1,
total_tasks,
short_format_time(elapsed_time),
short_format_time(remaining_time),
))
def retrieve(self):
self._output = list()
while self._iterating or len(self._jobs) > 0:
if len(self._jobs) == 0:
# Wait for an async callback to dispatch new jobs
time.sleep(0.01)
continue
# We need to be careful: the job list can be filling up as
# we empty it and Python list are not thread-safe by default hence
# the use of the lock
with self._lock:
job = self._jobs.pop(0)
try:
self._output.extend(job.get())
except tuple(self.exceptions) as exception:
# Stop dispatching any new job in the async callback thread
self._aborting = True
if isinstance(exception, TransportableException):
# Capture exception to add information on the local
# stack in addition to the distant stack
this_report = format_outer_frames(context=10,
stack_start=1)
report = """Multiprocessing exception:
%s
---------------------------------------------------------------------------
Sub-process traceback:
---------------------------------------------------------------------------
%s""" % (this_report, exception.message)
# Convert this to a JoblibException
exception_type = _mk_exception(exception.etype)[0]
exception = exception_type(report)
# Kill remaining running processes without waiting for
# the results as we will raise the exception we got back
# to the caller instead of returning any result.
with self._lock:
self._terminate_pool()
if self._managed_pool:
# In case we had to terminate a managed pool, let
# us start a new one to ensure that subsequent calls
# to __call__ on the same Parallel instance will get
# a working pool as they expect.
self._initialize_pool()
raise exception
def __call__(self, iterable):
if self._jobs:
raise ValueError('This Parallel instance is already running')
# A flag used to abort the dispatching of jobs in case an
# exception is found
self._aborting = False
if not self._managed_pool:
n_jobs = self._initialize_pool()
else:
n_jobs = self._effective_n_jobs()
if self.batch_size == 'auto':
self._effective_batch_size = 1
iterator = iter(iterable)
pre_dispatch = self.pre_dispatch
if pre_dispatch == 'all' or n_jobs == 1:
# prevent further dispatch via multiprocessing callback thread
self._original_iterator = None
self._pre_dispatch_amount = 0
else:
self._original_iterator = iterator
if hasattr(pre_dispatch, 'endswith'):
pre_dispatch = eval(pre_dispatch)
self._pre_dispatch_amount = pre_dispatch = int(pre_dispatch)
# The main thread will consume the first pre_dispatch items and
# the remaining items will later be lazily dispatched by async
# callbacks upon task completions.
iterator = itertools.islice(iterator, pre_dispatch)
self._start_time = time.time()
self.n_dispatched_batches = 0
self.n_dispatched_tasks = 0
self.n_completed_tasks = 0
self._smoothed_batch_duration = 0.0
try:
self._iterating = True
while self.dispatch_one_batch(iterator):
pass
if pre_dispatch == "all" or n_jobs == 1:
# The iterable was consumed all at once by the above for loop.
# No need to wait for async callbacks to trigger to
# consumption.
self._iterating = False
self.retrieve()
# Make sure that we get a last message telling us we are done
elapsed_time = time.time() - self._start_time
self._print('Done %3i out of %3i | elapsed: %s finished',
(len(self._output), len(self._output),
short_format_time(elapsed_time)))
finally:
if not self._managed_pool:
self._terminate_pool()
self._jobs = list()
output = self._output
self._output = None
return output
def __repr__(self):
return '%s(n_jobs=%s)' % (self.__class__.__name__, self.n_jobs)
|
bsd-3-clause
|
RomainBrault/scikit-learn
|
examples/ensemble/plot_isolation_forest.py
|
39
|
2361
|
"""
==========================================
IsolationForest example
==========================================
An example using IsolationForest for anomaly detection.
The IsolationForest 'isolates' observations by randomly selecting a feature
and then randomly selecting a split value between the maximum and minimum
values of the selected feature.
Since recursive partitioning can be represented by a tree structure, the
number of splittings required to isolate a sample is equivalent to the path
length from the root node to the terminating node.
This path length, averaged over a forest of such random trees, is a measure
of normality and our decision function.
Random partitioning produces noticeable shorter paths for anomalies.
Hence, when a forest of random trees collectively produce shorter path lengths
for particular samples, they are highly likely to be anomalies.
.. [1] Liu, Fei Tony, Ting, Kai Ming and Zhou, Zhi-Hua. "Isolation forest."
Data Mining, 2008. ICDM'08. Eighth IEEE International Conference on.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from sklearn.ensemble import IsolationForest
rng = np.random.RandomState(42)
# Generate train data
X = 0.3 * rng.randn(100, 2)
X_train = np.r_[X + 2, X - 2]
# Generate some regular novel observations
X = 0.3 * rng.randn(20, 2)
X_test = np.r_[X + 2, X - 2]
# Generate some abnormal novel observations
X_outliers = rng.uniform(low=-4, high=4, size=(20, 2))
# fit the model
clf = IsolationForest(max_samples=100, random_state=rng)
clf.fit(X_train)
y_pred_train = clf.predict(X_train)
y_pred_test = clf.predict(X_test)
y_pred_outliers = clf.predict(X_outliers)
# plot the line, the samples, and the nearest vectors to the plane
xx, yy = np.meshgrid(np.linspace(-5, 5, 50), np.linspace(-5, 5, 50))
Z = clf.decision_function(np.c_[xx.ravel(), yy.ravel()])
Z = Z.reshape(xx.shape)
plt.title("IsolationForest")
plt.contourf(xx, yy, Z, cmap=plt.cm.Blues_r)
b1 = plt.scatter(X_train[:, 0], X_train[:, 1], c='white')
b2 = plt.scatter(X_test[:, 0], X_test[:, 1], c='green')
c = plt.scatter(X_outliers[:, 0], X_outliers[:, 1], c='red')
plt.axis('tight')
plt.xlim((-5, 5))
plt.ylim((-5, 5))
plt.legend([b1, b2, c],
["training observations",
"new regular observations", "new abnormal observations"],
loc="upper left")
plt.show()
|
bsd-3-clause
|
bbfamily/abu
|
python/c10.py
|
1
|
37241
|
# -*- encoding:utf-8 -*-
from __future__ import print_function
import matplotlib.pyplot as plt
import seaborn as sns
import numpy as np
import pandas as pd
# import warnings
# noinspection PyUnresolvedReferences
import abu_local_env
import abupy
from abupy import abu
from abupy import ABuSymbolPd
import sklearn.preprocessing as preprocessing
# warnings.filterwarnings('ignore')
sns.set_context(rc={'figure.figsize': (14, 7)})
# 使用沙盒数据,目的是和书中一样的数据环境
abupy.env.enable_example_env_ipython()
"""
第10章 量化系统——机器学习•猪老三
abu量化系统github地址:https://github.com/bbfamily/abu (您的star是我的动力!)
abu量化文档教程ipython notebook:https://github.com/bbfamily/abu/tree/master/abupy_lecture
"""
"""
10.2 猪老三世界中的量化环境
"""
"""
是否开启date_week噪音, 开启这个的目的是让分类结果正确率降低,接近真实
"""
g_with_date_week_noise = False
def _gen_another_word_price(kl_another_word):
"""
生成股票在另一个世界中的价格
:param kl_another_word:
:return:
"""
for ind in np.arange(2, kl_another_word.shape[0]):
# 前天数据
bf_yesterday = kl_another_word.iloc[ind - 2]
# 昨天
yesterday = kl_another_word.iloc[ind - 1]
# 今天
today = kl_another_word.iloc[ind]
# 生成今天的收盘价格
kl_another_word.close[ind] = _gen_another_word_price_rule(
yesterday.close, yesterday.volume,
bf_yesterday.close, bf_yesterday.volume,
today.volume, today.date_week)
def _gen_another_word_price_rule(yesterday_close, yesterday_volume,
bf_yesterday_close,
bf_yesterday_volume,
today_volume, date_week):
"""
通过前天收盘量价,昨天收盘量价,今天的量,构建另一个世界中的价格模型
"""
# 昨天收盘价格与前天收盘价格的价格差
price_change = yesterday_close - bf_yesterday_close
# 昨天成交量与前天成交量的量差
volume_change = yesterday_volume - bf_yesterday_volume
# 如果量和价变动一致,今天价格涨,否则跌
# 即量价齐涨->涨, 量价齐跌->涨,量价不一致->跌
sign = 1.0 if price_change * volume_change > 0 else -1.0
# 通过date_week生成噪音,否则之后分类100%分对
if g_with_date_week_noise:
# 针对sign生成噪音,噪音的生效的先决条件是今天的量是这三天最大的
gen_noise = today_volume > np.max(
[yesterday_volume, bf_yesterday_volume])
# 如果量是这三天最大 且是周五,下跌
if gen_noise and date_week == 4:
sign = -1.0
# 如果量是这三天最大,如果是周一,上涨
elif gen_noise and date_week == 0:
sign = 1.0
# 今天的涨跌幅度基础是price_change(昨天前天的价格变动)
price_base = abs(price_change)
# 今天的涨跌幅度变动因素:量比,
# 今天的成交量/昨天的成交量 和 今天的成交量/前天的成交量 的均值
price_factor = np.mean([today_volume / yesterday_volume,
today_volume / bf_yesterday_volume])
if abs(price_base * price_factor) < yesterday_close * 0.10:
# 如果 量比 * price_base 没超过10%,今天价格计算
today_price = yesterday_close + \
sign * price_base * price_factor
else:
# 如果涨跌幅度超过10%,限制上限,下限为10%
today_price = yesterday_close + sign * yesterday_close * 0.10
return today_price
def change_real_to_another_word(symbol):
"""
将原始真正的股票数据价格列只保留前两天数据,成交量,周几列完全保留
价格列其他数据使用_gen_another_word_price变成另一个世界价格
:param symbol:
:return:
"""
kl_pd = ABuSymbolPd.make_kl_df(symbol)
if kl_pd is not None:
# 原始股票数据也只保留价格,周几,成交量
kl_pig_three = kl_pd.filter(['close', 'date_week', 'volume'])
# 只保留原始头两天的交易收盘价格,其他的的都赋予nan
kl_pig_three['close'][2:] = np.nan
# 将其他nan价格变成猪老三世界中价格使用_gen_another_word_price
_gen_another_word_price(kl_pig_three)
return kl_pig_three
def sample_102(show=True):
"""
10.2 生成猪老三的世界中的映射股票数据
:return:
"""
choice_symbols = ['usNOAH', 'usSFUN', 'usBIDU', 'usAAPL', 'usGOOG',
'usTSLA', 'usWUBA', 'usVIPS']
another_word_dict = {}
real_dict = {}
for symbol in choice_symbols:
# 猪老三世界的股票走势字典
another_word_dict[symbol] = change_real_to_another_word(symbol)
# 真实世界的股票走势字典,这里不考虑运行效率问题
real_dict[symbol] = ABuSymbolPd.make_kl_df(symbol)
if show:
# 表10-1所示
print('another_word_dict[usNOAH].head():\n', another_word_dict['usNOAH'].head())
print('real_dict[usNOAH].head():\n', real_dict['usNOAH'].head().filter(['close', 'date_week', 'volume']))
import itertools
# 4 * 2
_, axs = plt.subplots(nrows=4, ncols=2, figsize=(20, 15))
# 将画布序列拉平
axs_list = list(itertools.chain.from_iterable(axs))
for symbol, ax in zip(choice_symbols, axs_list):
# 绘制猪老三世界的股价走势
another_word_dict[symbol].close.plot(ax=ax)
# 同样的股票在真实世界的股价走势
real_dict[symbol].close.plot(ax=ax)
ax.set_title(symbol)
plt.show()
return another_word_dict
"""
10.3 有监督机器学习
"""
def gen_pig_three_feature(kl_another_word):
"""
猪老三构建特征模型函数
:param kl_another_word: 即上一节使用_gen_another_word_price
生成的dataframe有收盘价,周几,成交量列
:return:
"""
# y值使用close.pct_change即涨跌幅度
kl_another_word['regress_y'] = kl_another_word.close.pct_change()
# 前天收盘价格
kl_another_word['bf_yesterday_close'] = 0
# 昨天收盘价格
kl_another_word['yesterday_close'] = 0
# 昨天收盘成交量
kl_another_word['yesterday_volume'] = 0
# 前天收盘成交量
kl_another_word['bf_yesterday_volume'] = 0
# 对齐特征,前天收盘价格即与今天的收盘错2个时间单位,[2:] = [:-2]
kl_another_word['bf_yesterday_close'][2:] = \
kl_another_word['close'][:-2]
# 对齐特征,前天成交量
kl_another_word['bf_yesterday_volume'][2:] = \
kl_another_word['volume'][:-2]
# 对齐特征,昨天收盘价与今天的收盘错1个时间单位,[1:] = [:-1]
kl_another_word['yesterday_close'][1:] = \
kl_another_word['close'][:-1]
# 对齐特征,昨天成交量
kl_another_word['yesterday_volume'][1:] = \
kl_another_word['volume'][:-1]
# 特征1: 价格差
kl_another_word['feature_price_change'] = \
kl_another_word['yesterday_close'] - \
kl_another_word['bf_yesterday_close']
# 特征2: 成交量差
kl_another_word['feature_volume_Change'] = \
kl_another_word['yesterday_volume'] - \
kl_another_word['bf_yesterday_volume']
# 特征3: 涨跌sign
kl_another_word['feature_sign'] = np.sign(
kl_another_word['feature_price_change'] * kl_another_word[
'feature_volume_Change'])
# 特征4: 周几
kl_another_word['feature_date_week'] = kl_another_word[
'date_week']
"""
构建噪音特征, 因为猪老三也不可能全部分析正确真实的特征因素
这里引入一些噪音特征
"""
# 成交量乘积
kl_another_word['feature_volume_noise'] = \
kl_another_word['yesterday_volume'] * \
kl_another_word['bf_yesterday_volume']
# 价格乘积
kl_another_word['feature_price_noise'] = \
kl_another_word['yesterday_close'] * \
kl_another_word['bf_yesterday_close']
# 将数据标准化
scaler = preprocessing.StandardScaler()
kl_another_word['feature_price_change'] = scaler.fit_transform(
kl_another_word['feature_price_change'].values.reshape(-1, 1))
kl_another_word['feature_volume_Change'] = scaler.fit_transform(
kl_another_word['feature_volume_Change'].values.reshape(-1, 1))
kl_another_word['feature_volume_noise'] = scaler.fit_transform(
kl_another_word['feature_volume_noise'].values.reshape(-1, 1))
kl_another_word['feature_price_noise'] = scaler.fit_transform(
kl_another_word['feature_price_noise'].values.reshape(-1, 1))
# 只筛选feature_开头的特征和regress_y,抛弃前两天数据,即[2:]
kl_pig_three_feature = kl_another_word.filter(
regex='regress_y|feature_*')[2:]
return kl_pig_three_feature
def sample_103_0(show=True):
"""
10.3 生成猪老三的训练集特征示例
:return:
"""
another_word_dict = sample_102(show=False)
pig_three_feature = None
for symbol in another_word_dict:
# 首先拿出对应的走势数据
kl_another_word = another_word_dict[symbol]
# 通过走势数据生成训练集特征通过gen_pig_three_feature
kl_feature = gen_pig_three_feature(kl_another_word)
# 将每个股票的特征数据都拼接起来,形成训练集
pig_three_feature = kl_feature if pig_three_feature is None \
else pig_three_feature.append(kl_feature)
# Dataframe -> matrix
feature_np = pig_three_feature.as_matrix()
# x特征矩阵
train_x = feature_np[:, 1:]
# 回归训练的连续值y
train_y_regress = feature_np[:, 0]
# 分类训练的离散值y,之后分类技术使用
# noinspection PyTypeChecker
train_y_classification = np.where(train_y_regress > 0, 1, 0)
if show:
print('pig_three_feature.shape:', pig_three_feature.shape)
print('pig_three_feature.tail():\n', pig_three_feature.tail())
print('train_x[:5], train_y_regress[:5], train_y_classification[:5]:\n', train_x[:5], train_y_regress[:5],
train_y_classification[:5])
return train_x, train_y_regress, train_y_classification, pig_three_feature
"""
猪老三使用回归预测股价
"""
def sample_1031_1():
"""
10.3.1_1 猪老三使用回归预测股价:生成训练集数据和测试集数据
:return:
"""
# noinspection PyShadowingNames
def gen_feature_from_symbol(symbol):
"""
封装由一个symbol转换为特征矩阵序列函数
:param symbol:
:return:
"""
# 真实世界走势数据转换到老三的世界
kl_another_word = change_real_to_another_word(symbol)
# 由走势转换为特征dataframe通过gen_pig_three_feature
kl_another_word_feature_test = gen_pig_three_feature(kl_another_word)
# 转换为matrix
feature_np_test = kl_another_word_feature_test.as_matrix()
# 从matrix抽取y回归
test_y_regress = feature_np_test[:, 0]
# y回归 -> y分类
# noinspection PyTypeChecker
test_y_classification = np.where(test_y_regress > 0, 1, 0)
# 从matrix抽取x特征矩阵
test_x = feature_np_test[:, 1:]
return test_x, test_y_regress, test_y_classification, kl_another_word_feature_test
# 生成训练集数据
train_x, train_y_regress, train_y_classification, pig_three_feature = sample_103_0(show=False)
# 生成测试集数据
test_x, test_y_regress, test_y_classification, kl_another_word_feature_test = gen_feature_from_symbol('usFB')
print('训练集:{}, 测试集:{}'.format(pig_three_feature.shape[0], kl_another_word_feature_test.shape[0]))
return train_x, train_y_regress, train_y_classification, pig_three_feature, \
test_x, test_y_regress, test_y_classification, kl_another_word_feature_test
def regress_process(estimator, train_x, train_y_regress, test_x,
test_y_regress):
# 训练训练集数据
estimator.fit(train_x, train_y_regress)
# 使用训练好的模型预测测试集对应的y,即根据usFB的走势特征预测股价涨跌幅度
test_y_prdict_regress = estimator.predict(test_x)
# 绘制usFB实际股价涨跌幅度
plt.plot(test_y_regress.cumsum())
# 绘制通过模型预测的usFB股价涨跌幅度
plt.plot(test_y_prdict_regress.cumsum())
# 针对训练集数据做交叉验证
from abupy import cross_val_score
from abupy.CoreBu.ABuFixes import mean_squared_error_scorer
scores = cross_val_score(estimator, train_x,
train_y_regress, cv=10,
scoring=mean_squared_error_scorer)
# mse开方 -> rmse
mean_sc = -np.mean(np.sqrt(-scores))
print('{} RMSE: {}'.format(estimator.__class__.__name__, mean_sc))
def sample_1031_2():
"""
10.3.1_2 猪老三使用回归预测股价:LinearRegressio
:return:
"""
train_x, train_y_regress, train_y_classification, pig_three_feature, \
test_x, test_y_regress, test_y_classification, kl_another_word_feature_test = sample_1031_1()
# 实例化线性回归对象estimator
from sklearn.linear_model import LinearRegression
estimator = LinearRegression()
# 将回归模型对象,训练集x,训练集连续y值,测试集x,测试集连续y传入
regress_process(estimator, train_x, train_y_regress, test_x,
test_y_regress)
plt.show()
from abupy import ABuMLExecute
ABuMLExecute.plot_learning_curve(estimator, train_x, train_y_regress, cv=10)
def sample_1031_3():
"""
10.3.1_3 猪老三使用回归预测股价:PolynomialFeatures
:return:
"""
train_x, train_y_regress, train_y_classification, pig_three_feature, \
test_x, test_y_regress, test_y_classification, kl_another_word_feature_test = sample_1031_1()
from sklearn.pipeline import make_pipeline
from sklearn.preprocessing import PolynomialFeatures
from sklearn.linear_model import LinearRegression
# pipeline套上 degree=3 + LinearRegression
estimator = make_pipeline(PolynomialFeatures(degree=3),
LinearRegression())
# 继续使用regress_process,区别是estimator变了
regress_process(estimator, train_x, train_y_regress, test_x,
test_y_regress)
plt.show()
def sample_1031_4():
"""
10.3.1_4 猪老三使用回归预测股价:使用集成学习算法预测股价AdaBoost与RandomForest
:return:
"""
train_x, train_y_regress, train_y_classification, pig_three_feature, \
test_x, test_y_regress, test_y_classification, kl_another_word_feature_test = sample_1031_1()
# AdaBoost
from sklearn.ensemble import AdaBoostRegressor
estimator = AdaBoostRegressor(n_estimators=100)
regress_process(estimator, train_x, train_y_regress, test_x,
test_y_regress)
plt.show()
# RandomForest
from sklearn.ensemble import RandomForestRegressor
estimator = RandomForestRegressor(n_estimators=100)
regress_process(estimator, train_x, train_y_regress, test_x, test_y_regress)
plt.show()
"""
10.3.2 猪老三使用分类预测股票涨跌
"""
def classification_process(estimator, train_x, train_y_classification,
test_x, test_y_classification):
from sklearn import metrics
# 训练数据,这里分类要所以要使用y_classification
estimator.fit(train_x, train_y_classification)
# 使用训练好的分类模型预测测试集对应的y,即根据usFB的走势特征预测涨跌
test_y_prdict_classification = estimator.predict(test_x)
# 通过metrics.accuracy_score度量预测涨跌的准确率
print("{} accuracy = {:.2f}".format(
estimator.__class__.__name__,
metrics.accuracy_score(test_y_classification,
test_y_prdict_classification)))
from abupy import cross_val_score
# 针对训练集数据做交叉验证scoring='accuracy',cv=10
scores = cross_val_score(estimator, train_x,
train_y_classification,
cv=10,
scoring='accuracy')
# 所有交叉验证的分数取平均值
mean_sc = np.mean(scores)
print('cross validation accuracy mean: {:.2f}'.format(mean_sc))
def sample_1032_1():
"""
10.3.2_1 猪老三使用分类预测股票涨跌:LogisticRegression
:return:
"""
train_x, train_y_regress, train_y_classification, pig_three_feature, \
test_x, test_y_regress, test_y_classification, kl_another_word_feature_test = sample_1031_1()
# 无噪音分类正确100%
from sklearn.linear_model import LogisticRegression
estimator = LogisticRegression(C=1.0, penalty='l1', tol=1e-6)
# 将分类器,训练集x,训练集y分类,测试集,测试集y分别传入函数
classification_process(estimator, train_x, train_y_classification,
test_x, test_y_classification)
# 开启噪音,再来一遍,有噪音正确率93%, 之后的都开启g_with_date_week_noise
global g_with_date_week_noise
g_with_date_week_noise = True
train_x, train_y_regress, train_y_classification, pig_three_feature, \
test_x, test_y_regress, test_y_classification, kl_another_word_feature_test = sample_1031_1()
classification_process(estimator, train_x, train_y_classification,
test_x, test_y_classification)
def sample_1032_2():
"""
10.3.2_2 猪老三使用分类预测股票涨跌:svm
:return:
"""
global g_with_date_week_noise
g_with_date_week_noise = True
train_x, train_y_regress, train_y_classification, pig_three_feature, \
test_x, test_y_regress, test_y_classification, kl_another_word_feature_test = sample_1031_1()
from sklearn.svm import SVC
estimator = SVC(kernel='rbf')
classification_process(estimator, train_x, train_y_classification,
test_x, test_y_classification)
def sample_1032_3():
"""
10.3.2_3 猪老三使用分类预测股票涨跌:RandomForestClassifier
:return:
"""
global g_with_date_week_noise
g_with_date_week_noise = True
train_x, train_y_regress, train_y_classification, pig_three_feature, \
test_x, test_y_regress, test_y_classification, kl_another_word_feature_test = sample_1031_1()
from sklearn.ensemble import RandomForestClassifier
estimator = RandomForestClassifier(n_estimators=100)
classification_process(estimator, train_x, train_y_classification,
test_x, test_y_classification)
def sample_1032_4(show=True):
"""
10.3.2_4 猪老三使用分类预测股票涨跌:train_test_split
:return:
"""
from sklearn import metrics
from abupy import train_test_split
# noinspection PyShadowingNames
def train_test_split_xy(estimator, x, y, test_size=0.5,
random_state=0):
# 通过train_test_split将原始训练集随机切割为新训练集与测试集
train_x, test_x, train_y, test_y = \
train_test_split(x, y, test_size=test_size,
random_state=random_state)
if show:
print(x.shape, y.shape)
print(train_x.shape, train_y.shape)
print(test_x.shape, test_y.shape)
clf = estimator.fit(train_x, train_y)
predictions = clf.predict(test_x)
if show:
# 度量准确率
print("accuracy = %.2f" %
(metrics.accuracy_score(test_y, predictions)))
# 度量查准率
print("precision_score = %.2f" %
(metrics.precision_score(test_y, predictions)))
# 度量回收率
print("recall_score = %.2f" %
(metrics.recall_score(test_y, predictions)))
return test_y, predictions
global g_with_date_week_noise
g_with_date_week_noise = True
train_x, train_y_regress, train_y_classification, pig_three_feature, \
test_x, test_y_regress, test_y_classification, kl_another_word_feature_test = sample_1031_1()
from sklearn.ensemble import RandomForestClassifier
estimator = RandomForestClassifier(n_estimators=100)
test_y, predictions = train_test_split_xy(estimator, train_x, train_y_classification)
return estimator, train_x, train_y_classification, test_y, predictions
def sample_1032_5():
"""
10.3.2_5 猪老三使用分类预测股票涨跌:混淆矩阵和roc曲线
:return:
"""
from sklearn import metrics
# noinspection PyShadowingNames
def confusion_matrix_with_report(test_y, predictions):
confusion_matrix = metrics.confusion_matrix(test_y, predictions)
# print("Confusion Matrix ", confusion_matrix)
print(" Predicted")
print(" | 0 | 1 |")
print(" |-----|-----|")
print(" 0 | %3d | %3d |" % (confusion_matrix[0, 0],
confusion_matrix[0, 1]))
print("Actual |-----|-----|")
print(" 1 | %3d | %3d |" % (confusion_matrix[1, 0],
confusion_matrix[1, 1]))
print(" |-----|-----|")
print(metrics.classification_report(test_y, predictions))
estimator, train_x, train_y_classification, test_y, predictions = sample_1032_4(show=False)
confusion_matrix_with_report(test_y, predictions)
from abupy import ABuMLExecute
ABuMLExecute.plot_roc_estimator(estimator, train_x, train_y_classification)
def sample_1033_1():
"""
10.3.3 通过决策树分类,绘制出决策图
这里需要安装dot graphviz,才能通过os.system("dot -T png graphviz.dot -o graphviz.png")生成png
:return:
"""
from sklearn.tree import DecisionTreeClassifier
from sklearn import tree
import os
estimator = DecisionTreeClassifier(max_depth=2, random_state=1)
# noinspection PyShadowingNames
def graphviz_tree(estimator, features, x, y):
if not hasattr(estimator, 'tree_'):
print('only tree can graphviz!')
return
estimator.fit(x, y)
# 将决策模型导出graphviz.dot文件
tree.export_graphviz(estimator.tree_, out_file='graphviz.dot',
feature_names=features)
# 通过dot将模型绘制决策图,保存png
os.system("dot -T png graphviz.dot -o graphviz.png")
global g_with_date_week_noise
g_with_date_week_noise = True
train_x, train_y_regress, train_y_classification, pig_three_feature, \
test_x, test_y_regress, test_y_classification, kl_another_word_feature_test = sample_1031_1()
# 这里会使用到特征的名称列pig_three_feature.columns[1:]
graphviz_tree(estimator, pig_three_feature.columns[1:], train_x,
train_y_classification)
import PIL.Image
PIL.Image.open('graphviz.png').show()
def sample_1033_2():
"""
10.3.3 特征的重要性排序及支持度评级
:return:
"""
global g_with_date_week_noise
g_with_date_week_noise = True
train_x, train_y_regress, train_y_classification, pig_three_feature, \
test_x, test_y_regress, test_y_classification, kl_another_word_feature_test = sample_1031_1()
# noinspection PyShadowingNames
def importances_coef_pd(estimator):
"""
特征的重要性
"""
if hasattr(estimator, 'feature_importances_'):
# 有feature_importances_的通过sort_values排序
return pd.DataFrame(
{'feature': list(pig_three_feature.columns[1:]),
'importance': estimator.feature_importances_}).sort_values('importance')
elif hasattr(estimator, 'coef_'):
# 有coef_的通过coef排序
return pd.DataFrame(
{"columns": list(pig_three_feature.columns)[1:], "coef": list(estimator.coef_.T)}).sort_values('coef')
else:
print('estimator not hasattr feature_importances_ or coef_!')
# 使用随机森林分类器
from sklearn.ensemble import RandomForestClassifier
estimator = RandomForestClassifier(n_estimators=100)
# 训练数据模型
estimator.fit(train_x, train_y_classification)
# 对训练后的模型特征的重要度进行判定,重要程度由小到大,表10-4所示
print('importances_coef_pd(estimator):\n', importances_coef_pd(estimator))
from sklearn.feature_selection import RFE
# noinspection PyShadowingNames
def feature_selection(estimator, x, y):
"""
支持度评级
"""
selector = RFE(estimator)
selector.fit(x, y)
print('RFE selection')
print(pd.DataFrame(
{'support': selector.support_, 'ranking': selector.ranking_},
index=pig_three_feature.columns[1:]))
print('feature_selection(estimator, train_x, train_y_classification):\n',
feature_selection(estimator, train_x, train_y_classification))
"""
10.4 无监督机器学习
"""
def sample_1041():
"""
10.4.1 使用降维可视化数据
:return:
"""
train_x, train_y_regress, train_y_classification, pig_three_feature, \
test_x, test_y_regress, test_y_classification, kl_another_word_feature_test = sample_1031_1()
from sklearn.decomposition import PCA
from abupy import ABuMLExecute
# noinspection PyShadowingNames
def plot_decision_function(estimator, x, y):
# pca进行降维,只保留2个特征序列
pca_2n = PCA(n_components=2)
x = pca_2n.fit_transform(x)
# 进行训练
estimator.fit(x, y)
plt.scatter(x[:, 0], x[:, 1], c=y, s=50, cmap='spring')
ABuMLExecute.plot_decision_boundary(
lambda p_x: estimator.predict(p_x), x, y)
from sklearn.ensemble import RandomForestClassifier
estimator = RandomForestClassifier(n_estimators=100)
plot_decision_function(estimator, train_x, train_y_classification)
# noinspection PyTypeChecker
def sample_1042():
"""
10.4.2 猪老三使用聚类算法提高正确率
:return:
"""
global g_with_date_week_noise
g_with_date_week_noise = True
train_x, train_y_regress, train_y_classification, pig_three_feature, \
test_x, test_y_regress, test_y_classification, kl_another_word_feature_test = sample_1031_1()
# 使用随机森林作为分类器
from sklearn.ensemble import RandomForestClassifier
estimator = RandomForestClassifier(n_estimators=100)
estimator.fit(train_x, train_y_classification)
test_y_prdict_classification = estimator.predict(test_x)
from sklearn import metrics
print("accuracy = %.2f" % (
metrics.accuracy_score(test_y_classification,
test_y_prdict_classification)))
# 测试集feature即usFB的kl feature
pig_three_kmean_feature = kl_another_word_feature_test
# 测试集真实的涨跌结果test_y_classification
pig_three_kmean_feature['y'] = test_y_classification
# 使用刚刚的随机森林作为分类器的预测涨跌结果test_y_prdict_classification
pig_three_kmean_feature['y_prdict'] = test_y_prdict_classification
# 即生成一列新数据记录预测是否正确
pig_three_kmean_feature['y_same'] = np.where(
pig_three_kmean_feature['y'] ==
pig_three_kmean_feature['y_prdict'], 1, 0)
# 将feature中只保留刚刚得到的y_same
pig_three_kmean_feature = pig_three_kmean_feature.filter(['y_same'])
from sklearn.cluster import KMeans
# 使用刚刚得到的只有y_same列的数据赋值x_kmean
x_kmean = pig_three_kmean_feature.values
# n_clusters=2, 即只聚两类数据
kmean = KMeans(n_clusters=2)
kmean.fit(x_kmean)
# 将聚类标签赋予新的一列cluster
pig_three_kmean_feature['cluster'] = kmean.predict(x_kmean)
# 将周几这个特征合并过来
pig_three_kmean_feature['feature_date_week'] = \
kl_another_word_feature_test['feature_date_week']
# 表10-5所示
print('pig_three_kmean_feature.tail():\n', pig_three_kmean_feature.tail())
# 表10-6所示
print('pd.crosstab(pig_three_kmean_feature.feature_date_week, pig_three_kmean_feature.cluster):\n',
pd.crosstab(pig_three_kmean_feature.feature_date_week, pig_three_kmean_feature.cluster))
"""
10.5 梦醒时分
"""
def sample_105_0():
"""
10.5 AbuML
:return:
"""
global g_with_date_week_noise
g_with_date_week_noise = True
train_x, train_y_regress, train_y_classification, pig_three_feature, \
test_x, test_y_regress, test_y_classification, kl_another_word_feature_test = sample_1031_1()
from abupy import AbuML
# 通过x, y矩阵和特征的DataFrame对象组成AbuML
ml = AbuML(train_x, train_y_classification, pig_three_feature)
# 使用随机森林作为分类器
_ = ml.estimator.random_forest_classifier()
# 交织验证结果的正确率
print('ml.cross_val_accuracy_score():\n', ml.cross_val_accuracy_score())
# 特征的选择
print('ml.feature_selection():\n', ml.feature_selection())
"""
如下内容不能使用沙盒环境, 建议对照阅读:
abu量化文档-第十九节 数据源
第20节 美股交易UMP决策
"""
def sample_1051_0():
"""
10.5.1 回测中生成特征,切分训练测试集,成交买单快照: 数据准备
如果没有运行过abu量化文档-第十九节 数据源:中使用腾讯数据源进行数据更新,需要运行
如果运行过就不要重复运行了:
"""
from abupy import EMarketTargetType, EMarketSourceType, EDataCacheType
# 关闭沙盒数据环境
abupy.env.disable_example_env_ipython()
abupy.env.g_market_source = EMarketSourceType.E_MARKET_SOURCE_tx
abupy.env.g_data_cache_type = EDataCacheType.E_DATA_CACHE_CSV
# 首选这里预下载市场中所有股票的6年数据(做5年回测,需要预先下载6年数据)
abu.run_kl_update(start='2011-08-08', end='2017-08-08', market=EMarketTargetType.E_MARKET_TARGET_US)
def sample_1051_1(from_cache=False, show=True):
"""
10.5.1 回测中生成特征,切分训练测试集,成交买单快照: 数据准备
:return:
"""
from abupy import AbuMetricsBase
from abupy import AbuFactorBuyBreak
from abupy import AbuFactorAtrNStop
from abupy import AbuFactorPreAtrNStop
from abupy import AbuFactorCloseAtrNStop
# 关闭沙盒数据环境
abupy.env.disable_example_env_ipython()
from abupy import EMarketDataFetchMode
# 因为sample_94_1下载了预先数据,使用缓存,设置E_DATA_FETCH_FORCE_LOCAL,实际上run_kl_update最后会把设置set到FORCE_LOCAL
abupy.env.g_data_fetch_mode = EMarketDataFetchMode.E_DATA_FETCH_FORCE_LOCAL
# 设置选股因子,None为不使用选股因子
stock_pickers = None
# 买入因子依然延用向上突破因子
buy_factors = [{'xd': 60, 'class': AbuFactorBuyBreak},
{'xd': 42, 'class': AbuFactorBuyBreak}]
# 卖出因子继续使用上一章使用的因子
sell_factors = [
{'stop_loss_n': 1.0, 'stop_win_n': 3.0,
'class': AbuFactorAtrNStop},
{'class': AbuFactorPreAtrNStop, 'pre_atr_n': 1.5},
{'class': AbuFactorCloseAtrNStop, 'close_atr_n': 1.5}
]
# 回测生成买入时刻特征
abupy.env.g_enable_ml_feature = True
# 回测将symbols切割分为训练集数据和测试集数据
abupy.env.g_enable_train_test_split = True
# 下面设置回测时切割训练集,测试集使用的切割比例参数,默认为10,即切割为10份,9份做为训练,1份做为测试,
# 由于美股股票数量多,所以切割分为4份,3份做为训练集,1份做为测试集
abupy.env.g_split_tt_n_folds = 4
from abupy import EStoreAbu
if from_cache:
abu_result_tuple = \
abu.load_abu_result_tuple(n_folds=5, store_type=EStoreAbu.E_STORE_CUSTOM_NAME,
custom_name='train_us')
else:
# 初始化资金500万,资金管理依然使用默认atr
read_cash = 5000000
# 每笔交易的买入基数资金设置为万分之15
abupy.beta.atr.g_atr_pos_base = 0.0015
# 使用run_loop_back运行策略,因子使用和之前一样,
# choice_symbols=None为全市场回测,5年历史数据回测
abu_result_tuple, _ = abu.run_loop_back(read_cash,
buy_factors, sell_factors,
stock_pickers,
choice_symbols=None,
start='2012-08-08', end='2017-08-08')
# 把运行的结果保存在本地,以便之后分析回测使用,保存回测结果数据代码如下所示
abu.store_abu_result_tuple(abu_result_tuple, n_folds=5, store_type=EStoreAbu.E_STORE_CUSTOM_NAME,
custom_name='train_us')
if show:
metrics = AbuMetricsBase(*abu_result_tuple)
metrics.fit_metrics()
metrics.plot_returns_cmp(only_show_returns=True)
"*****************************************************************"
abupy.env.g_enable_train_test_split = False
# 使用切割好的测试数据
abupy.env.g_enable_last_split_test = True
from abupy import EStoreAbu
if from_cache:
abu_result_tuple_test = \
abu.load_abu_result_tuple(n_folds=5, store_type=EStoreAbu.E_STORE_CUSTOM_NAME,
custom_name='test_us')
else:
read_cash = 5000000
abupy.beta.atr.g_atr_pos_base = 0.007
choice_symbols = None
abu_result_tuple_test, kl_pd_manager_test = abu.run_loop_back(read_cash,
buy_factors, sell_factors, stock_pickers,
choice_symbols=choice_symbols, start='2012-08-08',
end='2017-08-08')
abu.store_abu_result_tuple(abu_result_tuple_test, n_folds=5, store_type=EStoreAbu.E_STORE_CUSTOM_NAME,
custom_name='test_us')
if show:
metrics = AbuMetricsBase(*abu_result_tuple_test)
metrics.fit_metrics()
metrics.plot_returns_cmp(only_show_returns=True)
print(abu_result_tuple.orders_pd[abu_result_tuple.orders_pd.result != 0].head())
return abu_result_tuple, abu_result_tuple_test
# noinspection PyUnresolvedReferences
def sample_1052():
"""
10.5.2 基于特征的交易预测
:return:
"""
# 需要在有缓存的情况下运行
abu_result_tuple, _ = sample_1051_1(from_cache=True, show=False)
from abupy.UmpBu.ABuUmpMainMul import UmpMulFiter
mul = UmpMulFiter(orders_pd=abu_result_tuple.orders_pd, scaler=False)
print('mul.df.head():\n', mul.df.head())
# 默认使用svm作为分类器
print('decision_tree_classifier cv please wait...')
mul.estimator.decision_tree_classifier()
mul.cross_val_accuracy_score()
# 默认使用svm作为分类器
print('knn_classifier cv please wait...')
# 默认使用svm作为分类器, 改分类器knn
mul.estimator.knn_classifier()
mul.cross_val_accuracy_score()
from abupy.UmpBu.ABuUmpMainBase import UmpDegFiter
deg = UmpDegFiter(orders_pd=abu_result_tuple.orders_pd)
print('deg.df.head():\n', deg.df.head())
print('xgb_classifier cv please wait...')
# 分类器使用GradientBoosting
deg.estimator.xgb_classifier()
deg.cross_val_accuracy_score()
print('adaboost_classifier cv please wait...')
# 分类器使用adaboost
deg.estimator.adaboost_classifier(base_estimator=None)
deg.cross_val_accuracy_score()
print('train_test_split_xy please wait...')
deg.train_test_split_xy()
if __name__ == "__main__":
sample_102()
# sample_103_0()
# sample_1031_1()
# sample_1031_2()
# sample_1031_3()
# sample_1031_4()
# sample_1032_1()
# sample_1032_2()
# sample_1032_3()
# sample_1032_4()
# sample_1032_5()
# sample_1033_1()
# sample_1033_2()
# sample_1041()
# sample_1042()
# sample_105_0()
# sample_1051_0()
# sample_1051_1(from_cache=True)
# sample_1051_1(from_cache=False)
# sample_1052()
|
gpl-3.0
|
demianw/dipy
|
dipy/viz/regtools.py
|
6
|
16263
|
import numpy as np
from ..utils.optpkg import optional_package
matplotlib, has_mpl, setup_module = optional_package("matplotlib")
plt, _, _ = optional_package("matplotlib.pyplot")
def _tile_plot(imgs, titles, **kwargs):
"""
Helper function
"""
# Create a new figure and plot the three images
fig, ax = plt.subplots(1, len(imgs))
for ii, a in enumerate(ax):
a.set_axis_off()
a.imshow(imgs[ii], **kwargs)
a.set_title(titles[ii])
return fig
def overlay_images(img0, img1, title0='', title_mid='', title1='', fname=None):
r""" Plot two images one on top of the other using red and green channels.
Creates a figure containing three images: the first image to the left
plotted on the red channel of a color image, the second to the right
plotted on the green channel of a color image and the two given images on
top of each other using the red channel for the first image and the green
channel for the second one. It is assumed that both images have the same
shape. The intended use of this function is to visually assess the quality
of a registration result.
Parameters
----------
img0 : array, shape(R, C)
the image to be plotted on the red channel, to the left of the figure
img1 : array, shape(R, C)
the image to be plotted on the green channel, to the right of the
figure
title0 : string (optional)
the title to be written on top of the image to the left. By default, no
title is displayed.
title_mid : string (optional)
the title to be written on top of the middle image. By default, no
title is displayed.
title1 : string (optional)
the title to be written on top of the image to the right. By default,
no title is displayed.
fname : string (optional)
the file name to write the resulting figure. If None (default), the
image is not saved.
"""
# Normalize the input images to [0,255]
img0 = 255*((img0 - img0.min()) / (img0.max() - img0.min()))
img1 = 255*((img1 - img1.min()) / (img1.max() - img1.min()))
# Create the color images
img0_red = np.zeros(shape=(img0.shape) + (3,), dtype=np.uint8)
img1_green = np.zeros(shape=(img0.shape) + (3,), dtype=np.uint8)
overlay = np.zeros(shape=(img0.shape) + (3,), dtype=np.uint8)
# Copy the normalized intensities into the appropriate channels of the
# color images
img0_red[..., 0] = img0
img1_green[..., 1] = img1
overlay[..., 0] = img0
overlay[..., 1] = img1
fig = _tile_plot([img0_red, overlay, img1_green],
[title0, title_mid, title1])
# If a file name was given, save the figure
if fname is not None:
fig.savefig(fname, bbox_inches='tight')
return fig
def draw_lattice_2d(nrows, ncols, delta):
r"""Create a regular lattice of nrows x ncols squares.
Creates an image (2D array) of a regular lattice of nrows x ncols squares.
The size of each square is delta x delta pixels (not counting the
separation lines). The lines are one pixel width.
Parameters
----------
nrows : int
the number of squares to be drawn vertically
ncols : int
the number of squares to be drawn horizontally
delta : int
the size of each square of the grid. Each square is delta x delta
pixels
Returns
-------
lattice : array, shape (R, C)
the image (2D array) of the segular lattice. The shape (R, C) of the
array is given by
R = 1 + (delta + 1) * nrows
C = 1 + (delta + 1) * ncols
"""
lattice = np.ndarray((1 + (delta + 1) * nrows,
1 + (delta + 1) * ncols),
dtype=np.float64)
# Fill the lattice with "white"
lattice[...] = 127
# Draw the horizontal lines in "black"
for i in range(nrows + 1):
lattice[i*(delta + 1), :] = 0
# Draw the vertical lines in "black"
for j in range(ncols + 1):
lattice[:, j * (delta + 1)] = 0
return lattice
def plot_2d_diffeomorphic_map(mapping, delta=10, fname=None,
direct_grid_shape=None, direct_grid2world=-1,
inverse_grid_shape=None, inverse_grid2world=-1,
show_figure=True):
r"""Draw the effect of warping a regular lattice by a diffeomorphic map.
Draws a diffeomorphic map by showing the effect of the deformation on a
regular grid. The resulting figure contains two images: the direct
transformation is plotted to the left, and the inverse transformation is
plotted to the right.
Parameters
----------
mapping : DiffeomorphicMap object
the diffeomorphic map to be drawn
delta : int, optional
the size (in pixels) of the squares of the regular lattice to be used
to plot the warping effects. Each square will be delta x delta pixels.
By default, the size will be 10 pixels.
fname : string, optional
the name of the file the figure will be written to. If None (default),
the figure will not be saved to disk.
direct_grid_shape : tuple, shape (2,), optional
the shape of the grid image after being deformed by the direct
transformation. By default, the shape of the deformed grid is the
same as the grid of the displacement field, which is by default
equal to the shape of the fixed image. In other words, the resulting
deformed grid (deformed by the direct transformation) will normally
have the same shape as the fixed image.
direct_grid2world : array, shape (3, 3), optional
the affine transformation mapping the direct grid's coordinates to
physical space. By default, this transformation will correspond to
the image-to-world transformation corresponding to the default
direct_grid_shape (in general, if users specify a direct_grid_shape,
they should also specify direct_grid2world).
inverse_grid_shape : tuple, shape (2,), optional
the shape of the grid image after being deformed by the inverse
transformation. By default, the shape of the deformed grid under the
inverse transform is the same as the image used as "moving" when
the diffeomorphic map was generated by a registration algorithm
(so it corresponds to the effect of warping the static image towards
the moving).
inverse_grid2world : array, shape (3, 3), optional
the affine transformation mapping inverse grid's coordinates to
physical space. By default, this transformation will correspond to
the image-to-world transformation corresponding to the default
inverse_grid_shape (in general, if users specify an inverse_grid_shape,
they should also specify inverse_grid2world).
show_figure : bool, optional
if True (default), the deformed grids will be ploted using matplotlib,
else the grids are just returned
Returns
-------
warped_forward : array
Image with grid showing the effect of transforming the moving image to
the static image. Shape will be `direct_grid_shape` if specified,
otherwise the shape of the static image.
warped_backward : array
Image with grid showing the effect of transforming the static image to
the moving image. Shape will be `inverse_grid_shape` if specified,
otherwise the shape of the moving image.
Note
----
The default value for the affine transformation is "-1" to handle the case
in which the user provides "None" as input meaning "identity". If we used
None as default, we wouldn't know if the user specifically wants to use
the identity (specifically passing None) or if it was left unspecified,
meaning to use the apropriate default matrix.
"""
if mapping.is_inverse:
# By default, direct_grid_shape is the codomain grid
if direct_grid_shape is None:
direct_grid_shape = mapping.codomain_shape
if direct_grid2world is -1:
direct_grid2world = mapping.codomain_grid2world
# By default, the inverse grid is the domain grid
if inverse_grid_shape is None:
inverse_grid_shape = mapping.domain_shape
if inverse_grid2world is -1:
inverse_grid2world = mapping.domain_grid2world
else:
# Now by default, direct_grid_shape is the mapping's input grid
if direct_grid_shape is None:
direct_grid_shape = mapping.domain_shape
if direct_grid2world is -1:
direct_grid2world = mapping.domain_grid2world
# By default, the output grid is the mapping's domain grid
if inverse_grid_shape is None:
inverse_grid_shape = mapping.codomain_shape
if inverse_grid2world is -1:
inverse_grid2world = mapping.codomain_grid2world
# The world-to-image (image = drawn lattice on the output grid)
# transformation is the inverse of the output affine
world_to_image = None
if inverse_grid2world is not None:
world_to_image = np.linalg.inv(inverse_grid2world)
# Draw the squares on the output grid
lattice_out = draw_lattice_2d(
(inverse_grid_shape[0] + delta) // (delta + 1),
(inverse_grid_shape[1] + delta) // (delta + 1),
delta)
lattice_out = lattice_out[0:inverse_grid_shape[0], 0:inverse_grid_shape[1]]
# Warp in the forward direction (sampling it on the input grid)
warped_forward = mapping.transform(lattice_out, 'linear', world_to_image,
direct_grid_shape, direct_grid2world)
# Now, the world-to-image (image = drawn lattice on the input grid)
# transformation is the inverse of the input affine
world_to_image = None
if direct_grid2world is not None:
world_to_image = np.linalg.inv(direct_grid2world)
# Draw the squares on the input grid
lattice_in = draw_lattice_2d((direct_grid_shape[0] + delta) // (delta + 1),
(direct_grid_shape[1] + delta) // (delta + 1),
delta)
lattice_in = lattice_in[0:direct_grid_shape[0], 0:direct_grid_shape[1]]
# Warp in the backward direction (sampling it on the output grid)
warped_backward = mapping.transform_inverse(
lattice_in, 'linear', world_to_image, inverse_grid_shape,
inverse_grid2world)
# Now plot the grids
if show_figure:
plt.figure()
plt.subplot(1, 2, 1).set_axis_off()
plt.imshow(warped_forward, cmap=plt.cm.gray)
plt.title('Direct transform')
plt.subplot(1, 2, 2).set_axis_off()
plt.imshow(warped_backward, cmap=plt.cm.gray)
plt.title('Inverse transform')
# Finally, save the figure to disk
if fname is not None:
plt.savefig(fname, bbox_inches='tight')
# Return the deformed grids
return warped_forward, warped_backward
def plot_slices(V, slice_indices=None, fname=None):
r"""Plot 3 slices from the given volume: 1 sagital, 1 coronal and 1 axial
Creates a figure showing the axial, coronal and sagital slices at the
requested positions of the given volume. The requested slices are specified
by slice_indices.
Parameters
----------
V : array, shape (S, R, C)
the 3D volume to extract the slices from
slice_indices : array, shape (3,) (optional)
the indices of the sagital (slice_indices[0]), coronal (slice_indices[1])
and axial (slice_indices[2]) slices to be displayed. If None, the
middle slices along each direction are displayed.
fname : string (optional)
the name of the file to save the figure to. If None (default), the
figure is not saved to disk.
"""
if slice_indices is None:
slice_indices = np.array(V.shape)//2
# Normalize the intensities to [0, 255]
sh = V.shape
V = np.asarray(V, dtype=np.float64)
V = 255 * (V - V.min()) / (V.max() - V.min())
# Extract the middle slices
axial = np.asarray(V[:, :, slice_indices[2]]).astype(np.uint8).T
coronal = np.asarray(V[:, slice_indices[1], :]).astype(np.uint8).T
sagittal = np.asarray(V[slice_indices[0], :, :]).astype(np.uint8).T
fig = _tile_plot([axial, coronal, sagittal],
['Axial', 'Coronal', 'Sagittal'],
cmap=plt.cm.gray, origin='lower')
# Save the figure if requested
if fname is not None:
fig.savefig(fname, bbox_inches='tight')
return fig
def overlay_slices(L, R, slice_index=None, slice_type=1, ltitle='Left',
rtitle='Right', fname=None):
r"""Plot three overlaid slices from the given volumes.
Creates a figure containing three images: the gray scale k-th slice of
the first volume (L) to the left, where k=slice_index, the k-th slice of
the second volume (R) to the right and the k-th slices of the two given
images on top of each other using the red channel for the first volume and
the green channel for the second one. It is assumed that both volumes have
the same shape. The intended use of this function is to visually assess the
quality of a registration result.
Parameters
----------
L : array, shape (S, R, C)
the first volume to extract the slice from, plottet to the left
R : array, shape (S, R, C)
the second volume to extract the slice from, plotted to the right
slice_index : int (optional)
the index of the slices (along the axis given by slice_type) to be
overlaid. If None, the slice along the specified axis is used
slice_type : int (optional)
the type of slice to be extracted:
0=sagital, 1=coronal (default), 2=axial.
ltitle : string (optional)
the string to be written as title of the left image. By default,
no title is displayed.
rtitle : string (optional)
the string to be written as title of the right image. By default,
no title is displayed.
fname : string (optional)
the name of the file to write the image to. If None (default), the
figure is not saved to disk.
"""
# Normalize the intensities to [0,255]
sh = L.shape
L = np.asarray(L, dtype=np.float64)
R = np.asarray(R, dtype=np.float64)
L = 255 * (L - L.min()) / (L.max() - L.min())
R = 255 * (R - R.min()) / (R.max() - R.min())
# Create the color image to draw the overlapped slices into, and extract
# the slices (note the transpositions)
if slice_type is 0:
if slice_index is None:
slice_index = sh[0]//2
colorImage = np.zeros(shape=(sh[2], sh[1], 3), dtype=np.uint8)
ll = np.asarray(L[slice_index, :, :]).astype(np.uint8).T
rr = np.asarray(R[slice_index, :, :]).astype(np.uint8).T
elif slice_type is 1:
if slice_index is None:
slice_index = sh[1]//2
colorImage = np.zeros(shape=(sh[2], sh[0], 3), dtype=np.uint8)
ll = np.asarray(L[:, slice_index, :]).astype(np.uint8).T
rr = np.asarray(R[:, slice_index, :]).astype(np.uint8).T
elif slice_type is 2:
if slice_index is None:
slice_index = sh[2]//2
colorImage = np.zeros(shape=(sh[1], sh[0], 3), dtype=np.uint8)
ll = np.asarray(L[:, :, slice_index]).astype(np.uint8).T
rr = np.asarray(R[:, :, slice_index]).astype(np.uint8).T
else:
print("Slice type must be 0, 1 or 2.")
return
# Draw the intensity images to the appropriate channels of the color image
# The "(ll > ll[0, 0])" condition is just an attempt to eliminate the
# background when its intensity is not exactly zero (the [0,0] corner is
# usually background)
colorImage[..., 0] = ll * (ll > ll[0, 0])
colorImage[..., 1] = rr * (rr > rr[0, 0])
fig = _tile_plot([ll, colorImage, rr],
[ltitle, 'Overlay', rtitle],
cmap=plt.cm.gray, origin='lower')
# Save the figure to disk, if requested
if fname is not None:
fig.savefig(fname, bbox_inches='tight')
return fig
|
bsd-3-clause
|
JasonKessler/scattertext
|
scattertext/ScatterChart.py
|
1
|
28832
|
import logging
import sys
import numpy as np
import pandas as pd
from scipy.stats import rankdata
from scattertext.PValGetter import get_p_vals
from scattertext.Scalers import percentile_min, percentile_alphabetical
from scattertext.ScatterChartData import ScatterChartData
from scattertext.TermDocMatrixFilter import filter_bigrams_by_pmis, \
filter_out_unigrams_that_only_occur_in_one_bigram
from scattertext.termscoring import ScaledFScore
from scattertext.termscoring.RankDifference import RankDifference
from scattertext.termscoring.CornerScore import CornerScore
class NoWordMeetsTermFrequencyRequirementsError(Exception):
pass
class CoordinatesNotRightException(Exception): pass
class TermDocMatrixHasNoMetadataException(Exception): pass
def check_topic_model_string_format(term_dict):
'''
Parameters
----------
term_dict: dict {metadataname: [term1, term2, ....], ...}
Returns
-------
None
'''
if type(term_dict) != dict:
raise TypeError("Argument for term_dict must be a dict, keyed on strings, and contain a list of strings.")
for k, v in term_dict.items():
if type(v) != list:
raise TypeError("Values in term dict must only be lists.")
if sys.version_info[0] == 2:
if type(k) != str and type(k) != unicode:
raise TypeError("Keys in term dict must be of type str or unicode.")
for e in v:
if type(k) != str and type(k) != unicode:
raise TypeError("Values in term lists must be str or unicode.")
if sys.version_info[0] == 3:
if type(k) != str:
raise TypeError("Keys in term dict must be of type str.")
for e in v:
if type(e) != str:
raise TypeError("Values in term lists must be str.")
class NeedToInjectCoordinatesException(Exception):
pass
class ScatterChart:
def __init__(self,
term_doc_matrix,
verbose=False,
**kwargs):
'''
Parameters
----------
term_doc_matrix: term document matrix to create chart from
Remaining parameters are from ScatterChartData
'''
self.term_doc_matrix = term_doc_matrix
self.scatterchartdata = ScatterChartData(**kwargs)
self.x_coords = None
self.y_coords = None
self.original_x = None
self.original_y = None
self._rescale_x = None
self._rescale_y = None
self.used = False
self.metadata_term_lists = None
self.metadata_descriptions = None
self.term_colors = None
self.hidden_terms = None
self.verbose = verbose
def inject_metadata_term_lists(self, term_dict):
'''
Inserts dictionary of meta data terms into object.
Parameters
----------
term_dict: dict {metadataname: [term1, term2, ....], ...}
Returns
-------
self: ScatterChart
'''
check_topic_model_string_format(term_dict)
if not self.term_doc_matrix.metadata_in_use():
raise TermDocMatrixHasNoMetadataException("No metadata is present in the term document matrix")
self.metadata_term_lists = term_dict
return self
def inject_metadata_descriptions(self, term_dict):
'''
Inserts a set of descriptions of meta data terms. These will be displayed
below the scatter plot when a meta data term is clicked. All keys in the term dict
must occur as meta data.
Parameters
----------
term_dict: dict {metadataname: str: 'explanation to insert', ...}
Returns
-------
self: ScatterChart
'''
assert type(term_dict) == dict
if not self.term_doc_matrix.metadata_in_use():
raise TermDocMatrixHasNoMetadataException("No metadata is present in the term document matrix")
# This doesn't seem necessary. If a definition's not in the corpus, it just won't be shown.
# if set(term_dict.keys()) - set(self.term_doc_matrix.get_metadata()) != set():
# raise Exception('The following meta data terms are not present: '
# + ', '.join(list(set(term_dict.keys()) - set(self.term_doc_matrix.get_metadata()))))
if sys.version_info[0] == 2:
assert set([type(v) for v in term_dict.values()]) - set([str, unicode]) == set()
else:
assert set([type(v) for v in term_dict.values()]) - set([str]) == set()
self.metadata_descriptions = term_dict
return self
def inject_term_colors(self, term_to_color_dict):
'''
:param term_to_color_dict: dict, mapping a term to a color
:return: self
'''
self.term_colors = term_to_color_dict
def inject_coordinates(self,
x_coords,
y_coords,
rescale_x=None,
rescale_y=None,
original_x=None,
original_y=None):
'''
Inject custom x and y coordinates for each term into chart.
Parameters
----------
x_coords: array-like
positions on x-axis \in [0,1]
y_coords: array-like
positions on y-axis \in [0,1]
rescale_x: lambda list[0,1]: list[0,1], default identity
Rescales x-axis after filtering
rescale_y: lambda list[0,1]: list[0,1], default identity
Rescales y-axis after filtering
original_x : array-like, optional
Original, unscaled x-values. Defaults to x_coords
original_y : array-like, optional
Original, unscaled y-values. Defaults to y_coords
Returns
-------
self: ScatterChart
'''
self._verify_coordinates(x_coords, 'x')
self._verify_coordinates(y_coords, 'y')
self.x_coords = x_coords
self.y_coords = y_coords
self._rescale_x = rescale_x
self._rescale_y = rescale_y
self.original_x = x_coords if original_x is None else original_x
self.original_y = y_coords if original_y is None else original_y
def _verify_coordinates(self, coords, name):
if self.scatterchartdata.use_non_text_features and len(coords) != len(self.term_doc_matrix.get_metadata()):
raise CoordinatesNotRightException("Length of %s_coords must be the same as the number "
"of non-text features in the term_doc_matrix." % (name))
if not self.scatterchartdata.use_non_text_features and len(coords) != self.term_doc_matrix.get_num_terms():
raise CoordinatesNotRightException("Length of %s_coords must be the same as the number "
"of terms in the term_doc_matrix." % (name))
if max(coords) > 1:
raise CoordinatesNotRightException("Max value of %s_coords must be <= 1." % (name))
if min(coords) < 0:
raise CoordinatesNotRightException("Min value of %s_coords must be >= 0." % (name))
def hide_terms(self, terms):
'''
Mark terms which won't be displayed in the visualization.
:param terms: iter[str]
Terms to mark as hidden.
:return: ScatterChart
'''
self.hidden_terms = set(terms)
return self
def to_dict(self,
category,
category_name=None,
not_category_name=None,
scores=None,
transform=percentile_alphabetical,
title_case_names=False,
not_categories=None,
neutral_categories=None,
extra_categories=None,
background_scorer=None,
use_offsets=False,
**kwargs):
'''
Parameters
----------
category : str
Category to annotate. Exact value of category.
category_name : str, optional
Name of category which will appear on web site. Default None is same as category.
not_category_name : str, optional
Name of ~category which will appear on web site. Default None is same as "not " + category.
scores : np.array, optional
Scores to use for coloring. Defaults to None, or RankDifference scores
transform : function, optional
Function for ranking terms. Defaults to scattertext.Scalers.percentile_lexicographic.
title_case_names : bool, default False
Title case category name and no-category name?
not_categories : list, optional
List of categories to use as "not category". Defaults to all others.
neutral_categories : list, optional
List of categories to use as neutral. Defaults [].
extra_categories : list, optional
List of categories to use as extra. Defaults [].
background_scorer : CharacteristicScorer, optional
Used for bg scores
Returns
-------
Dictionary that encodes the scatter chart
information. The dictionary can be dumped as a json document, and
used in scattertext.html
{info: {category_name: ..., not_category_name},
data: [{term:,
x:frequency [0-1],
y:frequency [0-1],
ox: score,
oy: score,
s: score,
os: original score,
p: p-val,
cat25k: freq per 25k in category,
cat: count in category,
ncat: count in non-category,
catdocs: [docnum, ...],
ncatdocs: [docnum, ...]
ncat25k: freq per 25k in non-category}, ...]}}
'''
if self.used:
raise Exception("Cannot reuse a ScatterChart constructor")
if kwargs is not {} and self.verbose:
logging.info("Excessive arguments passed to ScatterChart.to_dict: " + str(kwargs))
all_categories = self.term_doc_matrix.get_categories()
assert category in all_categories
if not_categories is None:
not_categories = [c for c in all_categories if c != category]
neutral_categories = []
extra_categories = []
elif neutral_categories is None:
neutral_categories = [c for c in all_categories
if c not in [category] + not_categories]
extra_categories = []
elif extra_categories is None:
extra_categories = [c for c in all_categories
if c not in [category] + not_categories + neutral_categories]
all_categories = [category] + not_categories + neutral_categories + extra_categories
df = self._get_term_category_frequencies()
self._add_x_and_y_coords_to_term_df_if_injected(df)
if scores is None:
scores = self._get_default_scores(category, not_categories, df)
category_column_name = category + ' freq'
df['category score'] = CornerScore.get_scores_for_category(
df[category_column_name],
df[[c + ' freq' for c in not_categories]].sum(axis=1)
)
if self.scatterchartdata.term_significance is not None:
df['p'] = get_p_vals(df, category_column_name,
self.scatterchartdata.term_significance)
df['not category score'] = CornerScore.get_scores_for_category(
df[[c + ' freq' for c in not_categories]].sum(axis=1),
df[category_column_name]
)
df['color_scores'] = scores
if self.scatterchartdata.terms_to_include is None:
df = self._filter_bigrams_by_minimum_not_category_term_freq(
category_column_name, not_categories, df)
df = filter_bigrams_by_pmis(
self._filter_by_minimum_term_frequency(all_categories, df),
threshold_coef=self.scatterchartdata.pmi_threshold_coefficient
)
if self.scatterchartdata.filter_unigrams:
df = filter_out_unigrams_that_only_occur_in_one_bigram(df)
if len(df) == 0:
raise NoWordMeetsTermFrequencyRequirementsError()
df['category score rank'] = rankdata(df['category score'], method='ordinal')
df['not category score rank'] = rankdata(df['not category score'], method='ordinal')
if self.scatterchartdata.max_terms and self.scatterchartdata.max_terms < len(df):
assert self.scatterchartdata.max_terms > 0
df = self._limit_max_terms(category, df)
df = df.reset_index()
if self.x_coords is None:
self.x_coords, self.y_coords = self._get_coordinates_from_transform_and_jitter_frequencies \
(category, df, not_categories, transform)
df['x'], df['y'] = self.x_coords, self.y_coords
df['ox'], df['oy'] = self.x_coords, self.y_coords
df['not cat freq'] = df[[x + ' freq' for x in not_categories]].sum(axis=1)
if neutral_categories != []:
df['neut cat freq'] = df[[x + ' freq' for x in neutral_categories]].sum(axis=1).fillna(0)
if extra_categories != []:
df['extra cat freq'] = df[[x + ' freq' for x in extra_categories]].sum(axis=1).fillna(0)
json_df = df[['x', 'y', 'ox', 'oy', 'term']]
if self.scatterchartdata.term_significance:
json_df['p'] = df['p']
self._add_term_freq_to_json_df(json_df, df, category)
json_df['s'] = self.scatterchartdata.score_transform(df['color_scores'])
json_df['os'] = df['color_scores']
if background_scorer:
bg_scores = background_scorer.get_scores(self.term_doc_matrix)
json_df['bg'] = bg_scores[1].loc[json_df.term].values
elif not self.scatterchartdata.use_non_text_features:
json_df['bg'] = self._get_corpus_characteristic_scores(json_df)
self._preform_axis_rescale(json_df, self._rescale_x, 'x')
self._preform_axis_rescale(json_df, self._rescale_y, 'y')
if self.scatterchartdata.terms_to_include is not None:
json_df = self._use_only_selected_terms(json_df)
category_terms = list(json_df.sort_values('s', ascending=False)['term'][:10])
not_category_terms = list(json_df.sort_values('s', ascending=True)['term'][:10])
if category_name is None:
category_name = category
if not_category_name is None:
not_category_name = 'Not ' + category_name
def better_title(x):
if title_case_names:
return ' '.join([t[0].upper() + t[1:].lower() for t in x.split()])
else:
return x
j = {'info': {'category_name': better_title(category_name),
'not_category_name': better_title(not_category_name),
'category_terms': category_terms,
'not_category_terms': not_category_terms,
'category_internal_name': category,
'not_category_internal_names': not_categories,
'categories': self.term_doc_matrix.get_categories(),
'neutral_category_internal_names': neutral_categories,
'extra_category_internal_names': extra_categories}}
if self.metadata_term_lists is not None:
j['metalists'] = self.metadata_term_lists
if self.metadata_descriptions is not None:
j['metadescriptions'] = self.metadata_descriptions
if self.term_colors is not None:
j['info']['term_colors'] = self.term_colors
# j['data'] = json_df.sort_values(by=['x', 'y', 'term']).to_dict(orient='records')
j['data'] = json_df.to_dict(orient='records')
if self.hidden_terms is not None:
for term_obj in j['data']:
if term_obj['term'] in self.hidden_terms:
term_obj['display'] = False
if use_offsets:
j['offsets'] = self.term_doc_matrix.get_offsets()
return j
def _add_x_and_y_coords_to_term_df_if_injected(self, df):
if self.x_coords is not None:
df['x'] = self.x_coords
df['y'] = self.y_coords
if not self.original_x is None:
try:
df['ox'] = self.original_x.values
except AttributeError:
df['ox'] = self.original_x
if not self.original_y is None:
try:
df['oy'] = self.original_y.values
except AttributeError:
df['oy'] = self.original_y
def _get_term_category_frequencies(self):
return self.term_doc_matrix.get_term_category_frequencies(self.scatterchartdata)
def _use_only_selected_terms(self, json_df):
term_df = pd.DataFrame({"term": self.scatterchartdata.terms_to_include})
return pd.merge(json_df, term_df, on='term', how='inner')
def _preform_axis_rescale(self, json_df, rescaler, variable_to_rescale):
if rescaler is not None:
json_df[variable_to_rescale] = rescaler(json_df[variable_to_rescale])
assert json_df[variable_to_rescale].min() >= 0 and json_df[variable_to_rescale].max() <= 1
def _get_corpus_characteristic_scores(self, json_df):
bg_terms = self.term_doc_matrix.get_scaled_f_scores_vs_background()
bg_terms = bg_terms['Scaled f-score']
bg_terms.name = 'bg'
bg_terms = bg_terms.reset_index()
bg_terms.columns = ['term' if x in ['index', 'word'] else x for x in bg_terms.columns]
json_df = pd.merge(json_df, bg_terms, on='term', how='left')
return json_df['bg'].fillna(0)
def _add_term_freq_to_json_df(self, json_df, term_freq_df, category):
json_df['cat25k'] = (((term_freq_df[category + ' freq'] * 1.
/ term_freq_df[category + ' freq'].sum()) * 25000).fillna(0)
.apply(np.round).astype(np.int))
json_df['ncat25k'] = (((term_freq_df['not cat freq'] * 1.
/ term_freq_df['not cat freq'].sum()) * 25000).fillna(0)
.apply(np.round).astype(np.int))
if 'neut cat freq' in term_freq_df:
json_df['neut25k'] = (((term_freq_df['neut cat freq'] * 1.
/ term_freq_df['neut cat freq'].sum()) * 25000).fillna(0)
.apply(np.round).astype(np.int))
json_df['neut'] = term_freq_df['neut cat freq']
else:
json_df['neut25k'] = 0
json_df['neut'] = 0
if 'extra cat freq' in term_freq_df:
json_df['extra25k'] = (((term_freq_df['extra cat freq'] * 1.
/ term_freq_df['extra cat freq'].sum()) * 25000).fillna(0)
.apply(np.round).astype(np.int))
json_df['extra'] = term_freq_df['extra cat freq']
else:
json_df['extra25k'] = 0
json_df['extra'] = 0
def _get_category_names(self, category):
other_categories = [val + ' freq' for val \
in self.term_doc_matrix.get_categories() \
if val != category]
all_categories = other_categories + [category + ' freq']
return all_categories, other_categories
def _get_coordinates_from_transform_and_jitter_frequencies(self,
category,
df,
other_categories,
transform):
not_counts = df[[c + ' freq' for c in other_categories]].sum(axis=1)
counts = df[category + ' freq']
x_data_raw = transform(not_counts, df.index, counts)
y_data_raw = transform(counts, df.index, not_counts)
x_data = self._add_jitter(x_data_raw)
y_data = self._add_jitter(y_data_raw)
return x_data, y_data
def _add_jitter(self, vec):
"""
:param vec: array to jitter
:return: array, jittered version of arrays
"""
if self.scatterchartdata.jitter == 0 or self.scatterchartdata.jitter is None:
return vec
return vec + np.random.rand(1, len(vec))[0] * self.scatterchartdata.jitter
def _term_rank_score_and_frequency_df(self, all_categories, category, other_categories, scores):
df = self._get_term_category_frequencies()
self._add_x_and_y_coords_to_term_df_if_injected(df)
if scores is None:
scores = self._get_default_scores(category, other_categories, df)
# np.array(self.term_doc_matrix.get_rudder_scores(category))
# convention_df['category score'] = np.array(self.term_doc_matrix.get_rudder_scores(category))
category_column_name = category + ' freq'
df['category score'] = CornerScore.get_scores_for_category(
df[category_column_name],
df[[c + ' freq' for c in other_categories]].sum(axis=1)
)
if self.scatterchartdata.term_significance is not None:
df['p'] = get_p_vals(df, category_column_name,
self.scatterchartdata.term_significance)
df['not category score'] = CornerScore.get_scores_for_category(
df[[c + ' freq' for c in other_categories]].sum(axis=1),
df[category_column_name]
)
df['color_scores'] = scores
if self.scatterchartdata.terms_to_include is None:
df = self._filter_bigrams_by_minimum_not_category_term_freq(
category_column_name, other_categories, df)
df = filter_bigrams_by_pmis(
self._filter_by_minimum_term_frequency(all_categories, df),
threshold_coef=self.scatterchartdata.pmi_threshold_coefficient
)
if self.scatterchartdata.filter_unigrams:
df = filter_out_unigrams_that_only_occur_in_one_bigram(df)
if len(df) == 0:
raise NoWordMeetsTermFrequencyRequirementsError()
df['category score rank'] = rankdata(df['category score'], method='ordinal')
df['not category score rank'] = rankdata(df['not category score'], method='ordinal')
if self.scatterchartdata.max_terms and self.scatterchartdata.max_terms < len(df):
assert self.scatterchartdata.max_terms > 0
df = self._limit_max_terms(category, df)
df = df.reset_index()
return df
def _filter_bigrams_by_minimum_not_category_term_freq(self, category_column_name, other_categories, df):
if self.scatterchartdata.terms_to_include is None:
return df[(df[category_column_name] > 0)
| (df[[c + ' freq' for c in other_categories]].sum(axis=1)
>= self.scatterchartdata.minimum_not_category_term_frequency)]
else:
return df
def _filter_by_minimum_term_frequency(self, all_categories, df):
if self.scatterchartdata.terms_to_include is None:
return df[df[[c + ' freq' for c in all_categories]].sum(axis=1)
> self.scatterchartdata.minimum_term_frequency]
else:
return df
def _limit_max_terms(self, category, df):
df['score'] = self._term_importance_ranks(category, df)
df = df.loc[df.sort_values('score').iloc[:self.scatterchartdata.max_terms].index]
return df[[c for c in df.columns if c != 'score']]
def _get_default_scores(self, category, other_categories, df):
category_column_name = category + ' freq'
cat_word_counts = df[category_column_name]
not_cat_word_counts = df[[c + ' freq' for c in other_categories]].sum(axis=1)
# scores = ScaledFScore.get_scores(cat_word_counts, not_cat_word_counts)
scores = RankDifference().get_scores(cat_word_counts, not_cat_word_counts)
return scores
def _term_importance_ranks(self, category, df):
return np.array([df['category score rank'], df['not category score rank']]).min(axis=0)
def draw(self,
category,
num_top_words_to_annotate=4,
words_to_annotate=[],
scores=None,
transform=percentile_alphabetical):
'''Outdated. MPLD3 drawing.
Parameters
----------
category
num_top_words_to_annotate
words_to_annotate
scores
transform
Returns
-------
pd.DataFrame, html of fgure
'''
try:
import matplotlib.pyplot as plt
except:
raise Exception("matplotlib and mpld3 need to be installed to use this function.")
try:
from mpld3 import plugins, fig_to_html
except:
raise Exception("mpld3 need to be installed to use this function.")
all_categories, other_categories = self._get_category_names(category)
df = self._term_rank_score_and_frequency_df(all_categories, category, other_categories, scores)
if self.x_coords is None:
df['x'], df['y'] = self._get_coordinates_from_transform_and_jitter_frequencies \
(category, df, other_categories, transform)
df_to_annotate = df[(df['not category score rank'] <= num_top_words_to_annotate)
| (df['category score rank'] <= num_top_words_to_annotate)
| df['term'].isin(words_to_annotate)]
words = list(df['term'])
font = {'family': 'sans-serif',
'color': 'black',
'weight': 'normal',
'size': 'large'}
fig, ax = plt.subplots()
plt.figure(figsize=(10, 10))
plt.gcf().subplots_adjust(bottom=0.2)
plt.gcf().subplots_adjust(right=0.2)
points = ax.scatter(self.x_coords,
self.y_coords,
c=-df['color_scores'],
cmap='seismic',
s=10,
edgecolors='none',
alpha=0.9)
tooltip = plugins.PointHTMLTooltip(points,
['<span id=a>%s</span>' % w for w in words],
css='#a {background-color: white;}')
plugins.connect(fig, tooltip)
ax.set_ylim([-.2, 1.2])
ax.set_xlim([-.2, 1.2])
ax.xaxis.set_ticks([0., 0.5, 1.])
ax.yaxis.set_ticks([0., 0.5, 1.])
ax.set_ylabel(category.title() + ' Frequency Percentile', fontdict=font, labelpad=20)
ax.set_xlabel('Not ' + category.title() + ' Frequency Percentile', fontdict=font, labelpad=20)
for i, row in df_to_annotate.iterrows():
# alignment_criteria = row['category score rank'] < row['not category score rank']
alignment_criteria = i % 2 == 0
horizontalalignment = 'right' if alignment_criteria else 'left'
verticalalignment = 'bottom' if alignment_criteria else 'top'
term = row['term']
ax.annotate(term,
(self.x_coords[i], self.y_data[i]),
size=15,
horizontalalignment=horizontalalignment,
verticalalignment=verticalalignment,
)
# texts.append(
# ax.text(row['dem freq scaled'], row['rep freq scaled'], row['word'])
# )
# adjust_text(texts, arrowprops=dict(arrowstyle="->", color='r', lw=0.5))
plt.show()
return df, fig_to_html(fig)
def to_dict_without_categories(self):
if self.y_coords is None or self.x_coords is None or self.original_x is None or self.original_y is None:
raise NeedToInjectCoordinatesException(
"This function requires you run inject_coordinates."
)
json_df = (self.term_doc_matrix
.get_term_count_df()
.rename(columns={'corpus': 'cat'}))
json_df['cat25k'] = (((json_df['cat'] * 1.
/ json_df['cat'].sum()) * 25000)
.apply(np.round).astype(np.int))
self._add_x_and_y_coords_to_term_df_if_injected(json_df)
j = {}
j['data'] = json_df.reset_index().sort_values(by=['x', 'y', 'term']).to_dict(orient='records')
return j
|
apache-2.0
|
cdegroc/scikit-learn
|
sklearn/datasets/mldata.py
|
2
|
6501
|
"""Automatically download MLdata datasets."""
# Copyright (c) 2011 Pietro Berkes
# License: Simplified BSD
import os
from os.path import join, exists
import re
from scipy import io
from shutil import copyfileobj
import urllib2
from .base import get_data_home, Bunch
MLDATA_BASE_URL = "http://mldata.org/repository/data/download/matlab/%s"
def mldata_filename(dataname):
"""Convert a raw name for a data set in a mldata.org filename."""
dataname = dataname.lower().replace(' ', '-')
return re.sub(r'[().]', '', dataname)
def fetch_mldata(dataname, target_name='label', data_name='data',
transpose_data=True, data_home=None):
"""Fetch an mldata.org data set
If the file does not exist yet, it is downloaded from mldata.org .
mldata.org does not have an enforced convention for storing data or
naming the columns in a data set. The default behavior of this function
works well with the most common cases:
1) data values are stored in the column 'data', and target values in the
column 'label'
2) alternatively, the first column stores target values, and the second
data values
3) the data array is stored as `n_features x n_samples` , and thus needs
to be transposed to match the `sklearn` standard
Keyword arguments allow to adapt these defaults to specific data sets
(see parameters `target_name`, `data_name`, `transpose_data`, and
the examples below).
mldata.org data sets may have multiple columns, which are stored in the
Bunch object with their original name.
Parameters
----------
dataname:
Name of the data set on mldata.org,
e.g.: "leukemia", "Whistler Daily Snowfall", etc.
The raw name is automatically converted to a mldata.org URL .
target_name: optional, default: 'label'
Name or index of the column containing the target values.
data_name: optional, default: 'data'
Name or index of the column containing the data.
transpose_data: optional, default: True
If True, transpose the downloaded data array.
data_home: optional, default: None
Specify another download and cache folder for the data sets. By default
all scikit learn data is stored in '~/scikit_learn_data' subfolders.
Returns
-------
data : Bunch
Dictionary-like object, the interesting attributes are:
'data', the data to learn, 'target', the classification labels,
'DESCR', the full description of the dataset, and
'COL_NAMES', the original names of the dataset columns.
Examples
--------
Load the 'iris' dataset from mldata.org:
>>> from sklearn.datasets.mldata import fetch_mldata
>>> iris = fetch_mldata('iris')
>>> iris.target[0]
1
>>> print iris.data[0]
[-0.555556 0.25 -0.864407 -0.916667]
Load the 'leukemia' dataset from mldata.org, which respects the
sklearn axes convention:
>>> leuk = fetch_mldata('leukemia', transpose_data=False)
>>> print leuk.data.shape[0]
7129
Load an alternative 'iris' dataset, which has different names for the
columns:
>>> iris2 = fetch_mldata('datasets-UCI iris', target_name=1,
... data_name=0)
>>> iris3 = fetch_mldata('datasets-UCI iris',
... target_name='class', data_name='double0')
"""
# normalize dataset name
dataname = mldata_filename(dataname)
# check if this data set has been already downloaded
data_home = get_data_home(data_home=data_home)
data_home = join(data_home, 'mldata')
if not exists(data_home):
os.makedirs(data_home)
matlab_name = dataname + '.mat'
filename = join(data_home, matlab_name)
# if the file does not exist, download it
if not exists(filename):
urlname = MLDATA_BASE_URL % urllib2.quote(dataname)
try:
mldata_url = urllib2.urlopen(urlname)
except urllib2.HTTPError as e:
if e.code == 404:
e.msg = "Dataset '%s' not found on mldata.org." % dataname
raise
# store Matlab file
try:
with open(filename, 'w+b') as matlab_file:
copyfileobj(mldata_url, matlab_file)
except:
os.remove(filename)
raise
mldata_url.close()
# load dataset matlab file
with open(filename, 'rb') as matlab_file:
matlab_dict = io.loadmat(matlab_file, struct_as_record=True)
# -- extract data from matlab_dict
# flatten column names
col_names = [str(descr[0])
for descr in matlab_dict['mldata_descr_ordering'][0]]
# if target or data names are indices, transform then into names
if isinstance(target_name, int):
target_name = col_names[target_name]
if isinstance(data_name, int):
data_name = col_names[data_name]
# rules for making sense of the mldata.org data format
# (earlier ones have priority):
# 1) there is only one array => it is "data"
# 2) there are multiple arrays
# a) copy all columns in the bunch, using their column name
# b) if there is a column called `target_name`, set "target" to it,
# otherwise set "target" to first column
# c) if there is a column called `data_name`, set "data" to it,
# otherwise set "data" to second column
dataset = {'DESCR': 'mldata.org dataset: %s' % dataname,
'COL_NAMES': col_names}
# 1) there is only one array => it is considered data
if len(col_names) == 1:
data_name = col_names[0]
dataset['data'] = matlab_dict[data_name]
# 2) there are multiple arrays
else:
for name in col_names:
dataset[name] = matlab_dict[name]
if target_name in col_names:
del dataset[target_name]
dataset['target'] = matlab_dict[target_name]
else:
del dataset[col_names[0]]
dataset['target'] = matlab_dict[col_names[0]]
if data_name in col_names:
del dataset[data_name]
dataset['data'] = matlab_dict[data_name]
else:
del dataset[col_names[1]]
dataset['data'] = matlab_dict[col_names[1]]
# set axes to sklearn conventions
if transpose_data:
dataset['data'] = dataset['data'].T
if 'target' in dataset:
dataset['target'] = dataset['target'].squeeze()
return Bunch(**dataset)
|
bsd-3-clause
|
yufengg/tensorflow
|
tensorflow/python/estimator/canned/dnn_test.py
|
20
|
16058
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for dnn.py."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import shutil
import tempfile
import numpy as np
import six
from tensorflow.core.example import example_pb2
from tensorflow.core.example import feature_pb2
from tensorflow.python.estimator.canned import dnn
from tensorflow.python.estimator.canned import dnn_testing_utils
from tensorflow.python.estimator.canned import prediction_keys
from tensorflow.python.estimator.export import export
from tensorflow.python.estimator.inputs import numpy_io
from tensorflow.python.estimator.inputs import pandas_io
from tensorflow.python.feature_column import feature_column
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.ops import data_flow_ops
from tensorflow.python.ops import parsing_ops
from tensorflow.python.platform import gfile
from tensorflow.python.platform import test
from tensorflow.python.summary.writer import writer_cache
from tensorflow.python.training import input as input_lib
from tensorflow.python.training import queue_runner
try:
# pylint: disable=g-import-not-at-top
import pandas as pd
HAS_PANDAS = True
except IOError:
# Pandas writes a temporary file during import. If it fails, don't use pandas.
HAS_PANDAS = False
except ImportError:
HAS_PANDAS = False
def _dnn_classifier_fn(*args, **kwargs):
return dnn.DNNClassifier(*args, **kwargs)
class DNNModelFnTest(dnn_testing_utils.BaseDNNModelFnTest, test.TestCase):
def __init__(self, methodName='runTest'): # pylint: disable=invalid-name
test.TestCase.__init__(self, methodName)
dnn_testing_utils.BaseDNNModelFnTest.__init__(self, dnn._dnn_model_fn)
class DNNClassifierEvaluateTest(
dnn_testing_utils.BaseDNNClassifierEvaluateTest, test.TestCase):
def __init__(self, methodName='runTest'): # pylint: disable=invalid-name
test.TestCase.__init__(self, methodName)
dnn_testing_utils.BaseDNNClassifierEvaluateTest.__init__(
self, _dnn_classifier_fn)
class DNNClassifierPredictTest(
dnn_testing_utils.BaseDNNClassifierPredictTest, test.TestCase):
def __init__(self, methodName='runTest'): # pylint: disable=invalid-name
test.TestCase.__init__(self, methodName)
dnn_testing_utils.BaseDNNClassifierPredictTest.__init__(
self, _dnn_classifier_fn)
class DNNClassifierTrainTest(
dnn_testing_utils.BaseDNNClassifierTrainTest, test.TestCase):
def __init__(self, methodName='runTest'): # pylint: disable=invalid-name
test.TestCase.__init__(self, methodName)
dnn_testing_utils.BaseDNNClassifierTrainTest.__init__(
self, _dnn_classifier_fn)
def _dnn_regressor_fn(*args, **kwargs):
return dnn.DNNRegressor(*args, **kwargs)
class DNNRegressorEvaluateTest(
dnn_testing_utils.BaseDNNRegressorEvaluateTest, test.TestCase):
def __init__(self, methodName='runTest'): # pylint: disable=invalid-name
test.TestCase.__init__(self, methodName)
dnn_testing_utils.BaseDNNRegressorEvaluateTest.__init__(
self, _dnn_regressor_fn)
class DNNRegressorPredictTest(
dnn_testing_utils.BaseDNNRegressorPredictTest, test.TestCase):
def __init__(self, methodName='runTest'): # pylint: disable=invalid-name
test.TestCase.__init__(self, methodName)
dnn_testing_utils.BaseDNNRegressorPredictTest.__init__(
self, _dnn_regressor_fn)
class DNNRegressorTrainTest(
dnn_testing_utils.BaseDNNRegressorTrainTest, test.TestCase):
def __init__(self, methodName='runTest'): # pylint: disable=invalid-name
test.TestCase.__init__(self, methodName)
dnn_testing_utils.BaseDNNRegressorTrainTest.__init__(
self, _dnn_regressor_fn)
def _queue_parsed_features(feature_map):
tensors_to_enqueue = []
keys = []
for key, tensor in six.iteritems(feature_map):
keys.append(key)
tensors_to_enqueue.append(tensor)
queue_dtypes = [x.dtype for x in tensors_to_enqueue]
input_queue = data_flow_ops.FIFOQueue(capacity=100, dtypes=queue_dtypes)
queue_runner.add_queue_runner(
queue_runner.QueueRunner(
input_queue,
[input_queue.enqueue(tensors_to_enqueue)]))
dequeued_tensors = input_queue.dequeue()
return {keys[i]: dequeued_tensors[i] for i in range(len(dequeued_tensors))}
class DNNRegressorIntegrationTest(test.TestCase):
def setUp(self):
self._model_dir = tempfile.mkdtemp()
def tearDown(self):
if self._model_dir:
writer_cache.FileWriterCache.clear()
shutil.rmtree(self._model_dir)
def _test_complete_flow(
self, train_input_fn, eval_input_fn, predict_input_fn, input_dimension,
label_dimension, batch_size):
feature_columns = [
feature_column.numeric_column('x', shape=(input_dimension,))]
est = dnn.DNNRegressor(
hidden_units=(2, 2),
feature_columns=feature_columns,
label_dimension=label_dimension,
model_dir=self._model_dir)
# TRAIN
num_steps = 10
est.train(train_input_fn, steps=num_steps)
# EVALUTE
scores = est.evaluate(eval_input_fn)
self.assertEqual(num_steps, scores[ops.GraphKeys.GLOBAL_STEP])
self.assertIn('loss', six.iterkeys(scores))
# PREDICT
predictions = np.array([
x[prediction_keys.PredictionKeys.PREDICTIONS]
for x in est.predict(predict_input_fn)
])
self.assertAllEqual((batch_size, label_dimension), predictions.shape)
# EXPORT
feature_spec = feature_column.make_parse_example_spec(feature_columns)
serving_input_receiver_fn = export.build_parsing_serving_input_receiver_fn(
feature_spec)
export_dir = est.export_savedmodel(tempfile.mkdtemp(),
serving_input_receiver_fn)
self.assertTrue(gfile.Exists(export_dir))
def test_numpy_input_fn(self):
"""Tests complete flow with numpy_input_fn."""
label_dimension = 2
batch_size = 10
data = np.linspace(0., 2., batch_size * label_dimension, dtype=np.float32)
data = data.reshape(batch_size, label_dimension)
# learn y = x
train_input_fn = numpy_io.numpy_input_fn(
x={'x': data},
y=data,
batch_size=batch_size,
num_epochs=None,
shuffle=True)
eval_input_fn = numpy_io.numpy_input_fn(
x={'x': data},
y=data,
batch_size=batch_size,
shuffle=False)
predict_input_fn = numpy_io.numpy_input_fn(
x={'x': data},
batch_size=batch_size,
shuffle=False)
self._test_complete_flow(
train_input_fn=train_input_fn,
eval_input_fn=eval_input_fn,
predict_input_fn=predict_input_fn,
input_dimension=label_dimension,
label_dimension=label_dimension,
batch_size=batch_size)
def test_pandas_input_fn(self):
"""Tests complete flow with pandas_input_fn."""
if not HAS_PANDAS:
return
label_dimension = 1
batch_size = 10
data = np.linspace(0., 2., batch_size, dtype=np.float32)
x = pd.DataFrame({'x': data})
y = pd.Series(data)
train_input_fn = pandas_io.pandas_input_fn(
x=x,
y=y,
batch_size=batch_size,
num_epochs=None,
shuffle=True)
eval_input_fn = pandas_io.pandas_input_fn(
x=x,
y=y,
batch_size=batch_size,
shuffle=False)
predict_input_fn = pandas_io.pandas_input_fn(
x=x,
batch_size=batch_size,
shuffle=False)
self._test_complete_flow(
train_input_fn=train_input_fn,
eval_input_fn=eval_input_fn,
predict_input_fn=predict_input_fn,
input_dimension=label_dimension,
label_dimension=label_dimension,
batch_size=batch_size)
def test_input_fn_from_parse_example(self):
"""Tests complete flow with input_fn constructed from parse_example."""
label_dimension = 2
batch_size = 10
data = np.linspace(0., 2., batch_size * label_dimension, dtype=np.float32)
data = data.reshape(batch_size, label_dimension)
serialized_examples = []
for datum in data:
example = example_pb2.Example(features=feature_pb2.Features(
feature={
'x': feature_pb2.Feature(
float_list=feature_pb2.FloatList(value=datum)),
'y': feature_pb2.Feature(
float_list=feature_pb2.FloatList(value=datum)),
}))
serialized_examples.append(example.SerializeToString())
feature_spec = {
'x': parsing_ops.FixedLenFeature([label_dimension], dtypes.float32),
'y': parsing_ops.FixedLenFeature([label_dimension], dtypes.float32),
}
def _train_input_fn():
feature_map = parsing_ops.parse_example(serialized_examples, feature_spec)
features = _queue_parsed_features(feature_map)
labels = features.pop('y')
return features, labels
def _eval_input_fn():
feature_map = parsing_ops.parse_example(
input_lib.limit_epochs(serialized_examples, num_epochs=1),
feature_spec)
features = _queue_parsed_features(feature_map)
labels = features.pop('y')
return features, labels
def _predict_input_fn():
feature_map = parsing_ops.parse_example(
input_lib.limit_epochs(serialized_examples, num_epochs=1),
feature_spec)
features = _queue_parsed_features(feature_map)
features.pop('y')
return features, None
self._test_complete_flow(
train_input_fn=_train_input_fn,
eval_input_fn=_eval_input_fn,
predict_input_fn=_predict_input_fn,
input_dimension=label_dimension,
label_dimension=label_dimension,
batch_size=batch_size)
class DNNClassifierIntegrationTest(test.TestCase):
def setUp(self):
self._model_dir = tempfile.mkdtemp()
def tearDown(self):
if self._model_dir:
writer_cache.FileWriterCache.clear()
shutil.rmtree(self._model_dir)
def _as_label(self, data_in_float):
return np.rint(data_in_float).astype(np.int64)
def _test_complete_flow(
self, train_input_fn, eval_input_fn, predict_input_fn, input_dimension,
n_classes, batch_size):
feature_columns = [
feature_column.numeric_column('x', shape=(input_dimension,))]
est = dnn.DNNClassifier(
hidden_units=(2, 2),
feature_columns=feature_columns,
n_classes=n_classes,
model_dir=self._model_dir)
# TRAIN
num_steps = 10
est.train(train_input_fn, steps=num_steps)
# EVALUTE
scores = est.evaluate(eval_input_fn)
self.assertEqual(num_steps, scores[ops.GraphKeys.GLOBAL_STEP])
self.assertIn('loss', six.iterkeys(scores))
# PREDICT
predicted_proba = np.array([
x[prediction_keys.PredictionKeys.PROBABILITIES]
for x in est.predict(predict_input_fn)
])
self.assertAllEqual((batch_size, n_classes), predicted_proba.shape)
# EXPORT
feature_spec = feature_column.make_parse_example_spec(feature_columns)
serving_input_receiver_fn = export.build_parsing_serving_input_receiver_fn(
feature_spec)
export_dir = est.export_savedmodel(tempfile.mkdtemp(),
serving_input_receiver_fn)
self.assertTrue(gfile.Exists(export_dir))
def test_numpy_input_fn(self):
"""Tests complete flow with numpy_input_fn."""
n_classes = 3
input_dimension = 2
batch_size = 10
data = np.linspace(
0., n_classes - 1., batch_size * input_dimension, dtype=np.float32)
x_data = data.reshape(batch_size, input_dimension)
y_data = np.reshape(self._as_label(data[:batch_size]), (batch_size, 1))
# learn y = x
train_input_fn = numpy_io.numpy_input_fn(
x={'x': x_data},
y=y_data,
batch_size=batch_size,
num_epochs=None,
shuffle=True)
eval_input_fn = numpy_io.numpy_input_fn(
x={'x': x_data},
y=y_data,
batch_size=batch_size,
shuffle=False)
predict_input_fn = numpy_io.numpy_input_fn(
x={'x': x_data},
batch_size=batch_size,
shuffle=False)
self._test_complete_flow(
train_input_fn=train_input_fn,
eval_input_fn=eval_input_fn,
predict_input_fn=predict_input_fn,
input_dimension=input_dimension,
n_classes=n_classes,
batch_size=batch_size)
def test_pandas_input_fn(self):
"""Tests complete flow with pandas_input_fn."""
if not HAS_PANDAS:
return
input_dimension = 1
n_classes = 3
batch_size = 10
data = np.linspace(0., n_classes - 1., batch_size, dtype=np.float32)
x = pd.DataFrame({'x': data})
y = pd.Series(self._as_label(data))
train_input_fn = pandas_io.pandas_input_fn(
x=x,
y=y,
batch_size=batch_size,
num_epochs=None,
shuffle=True)
eval_input_fn = pandas_io.pandas_input_fn(
x=x,
y=y,
batch_size=batch_size,
shuffle=False)
predict_input_fn = pandas_io.pandas_input_fn(
x=x,
batch_size=batch_size,
shuffle=False)
self._test_complete_flow(
train_input_fn=train_input_fn,
eval_input_fn=eval_input_fn,
predict_input_fn=predict_input_fn,
input_dimension=input_dimension,
n_classes=n_classes,
batch_size=batch_size)
def test_input_fn_from_parse_example(self):
"""Tests complete flow with input_fn constructed from parse_example."""
input_dimension = 2
n_classes = 3
batch_size = 10
data = np.linspace(
0., n_classes - 1., batch_size * input_dimension, dtype=np.float32)
data = data.reshape(batch_size, input_dimension)
serialized_examples = []
for datum in data:
example = example_pb2.Example(features=feature_pb2.Features(
feature={
'x':
feature_pb2.Feature(float_list=feature_pb2.FloatList(
value=datum)),
'y':
feature_pb2.Feature(int64_list=feature_pb2.Int64List(
value=self._as_label(datum[:1]))),
}))
serialized_examples.append(example.SerializeToString())
feature_spec = {
'x': parsing_ops.FixedLenFeature([input_dimension], dtypes.float32),
'y': parsing_ops.FixedLenFeature([1], dtypes.int64),
}
def _train_input_fn():
feature_map = parsing_ops.parse_example(serialized_examples, feature_spec)
features = _queue_parsed_features(feature_map)
labels = features.pop('y')
return features, labels
def _eval_input_fn():
feature_map = parsing_ops.parse_example(
input_lib.limit_epochs(serialized_examples, num_epochs=1),
feature_spec)
features = _queue_parsed_features(feature_map)
labels = features.pop('y')
return features, labels
def _predict_input_fn():
feature_map = parsing_ops.parse_example(
input_lib.limit_epochs(serialized_examples, num_epochs=1),
feature_spec)
features = _queue_parsed_features(feature_map)
features.pop('y')
return features, None
self._test_complete_flow(
train_input_fn=_train_input_fn,
eval_input_fn=_eval_input_fn,
predict_input_fn=_predict_input_fn,
input_dimension=input_dimension,
n_classes=n_classes,
batch_size=batch_size)
if __name__ == '__main__':
test.main()
|
apache-2.0
|
yaojenkuo/BuildingMachineLearningSystemsWithPython
|
ch02/heldout.py
|
24
|
1377
|
# This code is supporting material for the book
# Building Machine Learning Systems with Python
# by Willi Richert and Luis Pedro Coelho
# published by PACKT Publishing
#
# It is made available under the MIT License
# This script demonstrates the difference between the training accuracy and
# testing (held-out) accuracy.
import numpy as np
from sklearn.datasets import load_iris
from threshold import fit_model, accuracy
data = load_iris()
features = data['data']
labels = data['target_names'][data['target']]
# We are going to remove the setosa examples as they are too easy:
is_setosa = (labels == 'setosa')
features = features[~is_setosa]
labels = labels[~is_setosa]
# Now we classify virginica vs non-virginica
is_virginica = (labels == 'virginica')
# Split the data in two: testing and training
testing = np.tile([True, False], 50) # testing = [True,False,True,False,True,False...]
# Training is the negation of testing: i.e., datapoints not used for testing,
# will be used for training
training = ~testing
model = fit_model(features[training], is_virginica[training])
train_accuracy = accuracy(features[training], is_virginica[training], model)
test_accuracy = accuracy(features[testing], is_virginica[testing], model)
print('''\
Training accuracy was {0:.1%}.
Testing accuracy was {1:.1%} (N = {2}).
'''.format(train_accuracy, test_accuracy, testing.sum()))
|
mit
|
davidpvilaca/TEP
|
aula8/tarefa2.py
|
1
|
4118
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Tue Sep 19 23:21:28 2017
@author: davidpvilaca
"""
import matplotlib.pyplot as plt
import numpy as np
import cv2
DIGITS_LOOKUP = {
#0 1 2 3 4 5 6
(1, 1, 1, 0, 1, 1, 1): 0,
(0, 0, 1, 0, 0, 1, 0): 1,
(0, 1, 0, 0, 1, 0, 0): 1,
(1, 0, 1, 1, 1, 0, 1): 2,
(1, 0, 1, 1, 0, 1, 1): 3,
(1, 0, 0, 1, 0, 0, 1): 3,
(1, 0, 1, 1, 0, 0, 1): 3,
(0, 1, 1, 1, 0, 1, 0): 4,
(1, 1, 0, 1, 0, 1, 1): 5,
(1, 1, 0, 1, 1, 1, 1): 6,
(1, 0, 1, 0, 0, 1, 0): 7,
(1, 0, 1, 0, 0, 0, 0): 7,
(1, 1, 1, 1, 1, 1, 1): 8,
(1, 1, 1, 1, 0, 1, 1): 9,
(1, 1, 1, 1, 0, 1, 0): 9
}
def segmentDigits(path, typeThresh = cv2.THRESH_BINARY):
img_o = cv2.imread(path)
img = cv2.cvtColor(img_o.copy(), cv2.COLOR_BGR2GRAY)
thresh1 = cv2.threshold(img, 0, 255, typeThresh + cv2.THRESH_OTSU)[1]
kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (1, 5))
thresh2 = cv2.morphologyEx(thresh1, cv2.MORPH_OPEN, kernel) # abertura
closing = cv2.morphologyEx(thresh2, cv2.MORPH_CLOSE, kernel) # fechamento
cnts = cv2.findContours(closing.copy(), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)[1]
digitCnts = []
# loop over the digit area candidates
for c in cnts:
# compute the bounding box of the contour
(x, y, w, h) = cv2.boundingRect(c)
# if the contour is sufficiently large, it must be a digit
if w >= 10 and (h >= 3):
digitCnts.append(c)
cv2.rectangle(img, (x, y), (x+w, y+h), (0, 255, 0), 2)
#plt.imshow(img, cmap=plt.cm.Greys_r)
digits = []
rois_w_avg = 0
# loop over each of the digits
for c in digitCnts:
# extract the digit ROI
(x, y, w, h) = cv2.boundingRect(c)
roi = thresh2[y:y + h, x:x + w]
#plt.figure()
#plt.imshow(roi, cmap=plt.cm.Greys_r)
# compute the width and height of each of the 7 segments
(roiH, roiW) = roi.shape
(dW, dH) = (int(roiW * 0.25), int(roiH * 0.15))
dHC = int(roiH * 0.05)
digits.append(roi)
segments = [((0, 0), (w, dH)), # top
((0, 0), (dW, h // 2)), # top-left
((w - dW, 0), (w, h // 2)), # top-right
((0, (h // 2) - dHC) , (w, (h // 2) + dHC)), # center
((0, h // 2), (dW, h)), # bottom-left
((w - dW, h // 2), (w, h)), # bottom-right
((0, h - dH), (w, h)) # bottom
]
on = [0] * len(segments)
# loop over the segments
for (i, ((xA, yA), (xB, yB))) in enumerate(segments):
segROI = roi[yA:yB, xA:xB]
total = cv2.countNonZero(segROI)
area = (xB - xA) * (yB - yA)
# if the total number of non-zero pixels is greater than
# 50% of the area, mark the segment as "on"
if (total / float(area)) >= 0.4:
on[i]= 1
# lookup the digit and draw it on the image
lookup = tuple(on)
digit = DIGITS_LOOKUP[lookup] if lookup in DIGITS_LOOKUP else ''
if ( rois_w_avg != 0 and (w/rois_w_avg) < 0.5 ):
digit = 1
else:
rois_w_avg = h if rois_w_avg == 0 else np.average([rois_w_avg, w])
digits.append(digit)
#print(str(lookup) + ': ' + str(digit))
#print(tuple(on))
#print(digit)
cv2.rectangle(img_o, (x, y), (x + w, y + h), (0, 255, 0), 1)
cv2.putText(img_o, str(digit), (x, y +20),
cv2.FONT_HERSHEY_SIMPLEX, 0.65, (0, 255, 0), 2)
plt.figure()
plt.imshow(cv2.cvtColor(img_o, cv2.COLOR_BGR2RGB))
def main():
inputs = [
{ 'path': 'ex.png', 'type': cv2.THRESH_BINARY_INV },
{ 'path': 'ex2.png', 'type': cv2.THRESH_BINARY },
{ 'path': 'ex3.png', 'type': cv2.THRESH_BINARY }
]
for i in inputs:
segmentDigits(i['path'], i['type'])
return 0
if __name__ == '__main__':
main()
|
mit
|
sameersingh/ml-discussions
|
week4/mltools/datagen.py
|
1
|
4337
|
import numpy as np
from numpy import loadtxt as loadtxt
from numpy import asarray as arr
from numpy import asmatrix as mat
from numpy import atleast_2d as twod
from scipy.linalg import sqrtm
################################################################################
## Methods for creating / sampling synthetic datasets ##########################
################################################################################
def data_gauss(N0, N1=None, mu0=arr([0, 0]), mu1=arr([1, 1]), sig0=np.eye(2), sig1=np.eye(2)):
"""Sample data from a two-component Gaussian mixture model.
Args:
N0 (int): Number of data to sample for class -1.
N1 :(int) Number of data to sample for class 1.
mu0 (arr): numpy array
mu1 (arr): numpy array
sig0 (arr): numpy array
sig1 (arr): numpy array
Returns:
X (array): Array of sampled data
Y (array): Array of class values that correspond to the data points in X.
TODO: test more
"""
if not N1:
N1 = N0
d1,d2 = twod(mu0).shape[1],twod(mu1).shape[1]
if d1 != d2 or np.any(twod(sig0).shape != arr([d1, d1])) or np.any(twod(sig1).shape != arr([d1, d1])):
raise ValueError('data_gauss: dimensions should agree')
X0 = np.dot(np.random.randn(N0, d1), sqrtm(sig0))
X0 += np.ones((N0,1)) * mu0
Y0 = -np.ones(N0)
X1 = np.dot(np.random.randn(N1, d1), sqrtm(sig1))
X1 += np.ones((N1,1)) * mu1
Y1 = np.ones(N1)
X = np.row_stack((X0,X1))
Y = np.concatenate((Y0,Y1))
return X,Y
def data_GMM(N, C, D=2, get_Z=False):
"""Sample data from a Gaussian mixture model.
Builds a random GMM with C components and draws M data x^{(i)} from a mixture
of Gaussians in D dimensions
Args:
N (int): Number of data to be drawn from a mixture of Gaussians.
C (int): Number of clusters.
D (int): Number of dimensions.
get_Z (bool): If True, returns a an array indicating the cluster from which each
data point was drawn.
Returns:
X (arr): N x D array of data.
Z (arr): 1 x N array of cluster ids; returned also only if get_Z=True
TODO: test more; N vs M
"""
C += 1
pi = np.zeros(C)
for c in range(C):
pi[c] = gamrand(10, 0.5)
pi = pi / np.sum(pi)
cpi = np.cumsum(pi)
rho = np.random.rand(D, D)
rho = rho + twod(rho).T
rho = rho + D * np.eye(D)
rho = sqrtm(rho)
mu = mat(np.random.randn(c, D)) * mat(rho)
ccov = []
for i in range(C):
tmp = np.random.rand(D, D)
tmp = tmp + tmp.T
tmp = 0.5 * (tmp + D * np.eye(D))
ccov.append(sqrtm(tmp))
p = np.random.rand(N)
Z = np.ones(N)
for c in range(C - 1):
Z[p > cpi[c]] = c
Z = Z.astype(int)
X = mu[Z,:]
for c in range(C):
X[Z == c,:] = X[Z == c,:] + mat(np.random.randn(np.sum(Z == c), D)) * mat(ccov[c])
if get_Z:
return (arr(X),Z)
else:
return arr(X)
def gamrand(alpha, lmbda):
"""Gamma(alpha, lmbda) generator using the Marsaglia and Tsang method
Args:
alpha (float): scalar
lambda (float): scalar
Returns:
(float) : scalar
TODO: test more
"""
# (algorithm 4.33).
if alpha > 1:
d = alpha - 1 / 3
c = 1 / np.sqrt(9 * d)
flag = 1
while flag:
Z = np.random.randn()
if Z > -1 / c:
V = (1 + c * Z)**3
U = np.random.rand()
flag = np.log(U) > (0.5 * Z**2 + d - d * V + d * np.log(V))
return d * V / lmbda
else:
x = gamrand(alpha + 1, lmbda)
return x * np.random.rand()**(1 / alpha)
def data_mouse():
"""Simple by-hand data generation using the GUI
Opens a matplotlib plot window, and allows the user to specify points with the mouse.
Each button is its own class (1,2,3); close the window when done creating data.
Returns:
X (arr): Mx2 array of data locations
Y (arr): Mx1 array of labels (buttons)
"""
import matplotlib.pyplot as plt
fig = plt.figure()
ax = fig.add_subplot(111, xlim=(-1,2), ylim=(-1,2))
X = np.zeros( (0,2) )
Y = np.zeros( (0,) )
col = ['bs','gx','ro']
def on_click(event):
X.resize( (X.shape[0]+1,X.shape[1]) )
X[-1,:] = [event.xdata,event.ydata]
Y.resize( (Y.shape[0]+1,) )
Y[-1] = event.button
ax.plot( event.xdata, event.ydata, col[event.button-1])
fig.canvas.draw()
fig.canvas.mpl_connect('button_press_event',on_click)
inter=plt.isinteractive()
hld=plt.ishold()
plt.ioff()
plt.hold(True)
plt.show();
if inter: plt.ion();
if not hld: plt.hold(False);
return X,Y
|
apache-2.0
|
brianlorenz/COSMOS_IMACS_Redshifts
|
PlotCodes/Plot_Re_lmass_V2.py
|
1
|
12389
|
#Creates a UVJ diagram split by good, low, and bad measurements for specified lines
import numpy as np
import matplotlib.pyplot as plt
from astropy.io import ascii
import sys, os, string
import pandas as pd
from astropy.io import fits
import collections
from astropy.cosmology import WMAP9 as cosmo
from astropy.stats import biweight_midvariance
from scipy.optimize import curve_fit
#import lnr
#Folder to save the figures
figout = '/Users/galaxies-air/COSMOS/Images/'
#The location with the file for all of our data
fluxdatapath = '/Users/galaxies-air/COSMOS/COSMOSData/lineflux_red.txt'
#Location of the equivalent width data
#ewdata = '/Users/galaxies-air/COSMOS/COSMOSData/lineew.txt'
#Read in the ew of the lines
#ew_df = ascii.read(ewdata).to_pandas()
#The location to store the scale and its stddev of each line
qualdatapath = '/Users/galaxies-air/COSMOS/COSMOSData/dataqual.txt'
#Read in the scale of the lines
dataqual = ascii.read(qualdatapath).to_pandas()
d = {'True': True, 'False': False}
#File with the error array
errdatapath = '/Users/galaxies-air/COSMOS/COSMOSData/errs.txt'
#Read in the scale of the galaxies-air
err_df = ascii.read(errdatapath,data_start=1,header_start=0,format='csv').to_pandas()
#File with the error array
errreddatapath = '/Users/galaxies-air/COSMOS/COSMOSData/errs_red.txt'
#Read in the scale of the lines
err_dfred = ascii.read(errreddatapath,data_start=1,header_start=0,format='csv').to_pandas()
#Read the datafile:
fluxdata = ascii.read(fluxdatapath).to_pandas()
#File with the structural properties
spropdatapath = '/Users/galaxies-air/COSMOS/COSMOSData/struct_prop.txt'
#Read in the scale of the lines
sprop_df = ascii.read(spropdatapath).to_pandas()
sprop_df = sprop_df.rename(columns={'id':'OBJID'})
fluxdata = pd.merge(fluxdata,sprop_df)
#The location with the file for the filter data
filtdatapath = '/Users/galaxies-air/COSMOS/COSMOSData/all_c_hasinger.txt'
#Read in the data
filtdata = ascii.read(filtdatapath).to_pandas()
cordata = filtdata[['id','Ks','eKs','Ks_tot','eKs_tot']]
cordata = cordata.rename(columns={'id':'OBJID'})
fluxdata = pd.merge(fluxdata,cordata,on='OBJID',how='inner')
fluxdata = fluxdata.drop_duplicates()
fluxdata = fluxdata.reset_index()
#Read in the sfr file
sfdata = '/Users/galaxies-air/COSMOS/COSMOSData/sfrs.txt'
sfr_df = ascii.read(sfdata).to_pandas()
fluxdata = pd.merge(fluxdata,sfr_df,on='fluxfile')
#Fontsizes for plotting
axisfont = 24
ticksize = 18
ticks = 8
titlefont = 24
legendfont = 16
textfont = 16
#Division function
def divz(X,Y):
return X/np.where(Y,Y,Y+1)*np.not_equal(Y,0)
HbHg = 0
HaHg = 0
if HbHg: lines = ['4861']
elif HaHg: lines = ['6563_fix']
else: lines=['6563_fix']
'''
#Set low objects to an upper limit
for line in lines:
for i in range(0,len(fluxdata)):
if (fluxdata.iloc[i][line+'_flux'] == 0) and (dataqual[line+'_low']=='True'):
print 'Fixing'
fluxdata.at[line+'_flux',i] = err_df.iloc[i][line+'_err']
'''
#Filter the data
goodlines = [dataqual[line+'_good'].map(d) for line in lines]
#Needs to be good in all lines to be good
allgood = np.logical_and.reduce(goodlines)
#Needs to be bad in any line to be bad
badlines = [dataqual[line+'_bad'].map(d) for line in lines]
baddata = np.logical_or.reduce(badlines)
lowlines = [dataqual[line+'_low'].map(d) for line in lines]
#Needs to be low in any line to be low, and also not bad in a line
somelow = np.logical_and(np.logical_or.reduce(lowlines),np.logical_not(baddata))
dupids = [item for item, count in collections.Counter(fluxdata[allgood]['OBJID']).items() if count > 1]
dupobjsarr = []
weight_df = pd.DataFrame()
weight_df['fluxfile'] = fluxdata.fluxfile
for i in range(len(fluxdata)):
weight_df.at[i,'Weight'] = 1.0
for obj in dupids:
dupobjsarr.append(fluxdata[fluxdata.OBJID == obj].OBJID)
for i in range(0,len(dupids)):
ndup = len(dupobjsarr[i])
for j in range(0,ndup):
weight_df.at[dupobjsarr[i].index[j],'Weight'] = 1.0/ndup
fluxdata = pd.merge(fluxdata,weight_df,on='fluxfile')
combinemass = 1
paper = 0
showkel = 0
shows = 0
cutofflow = 0
showmeds = 0
filtSFR = fluxdata['SFR']<10000000
ms=12
lwbw=2
notbad = np.logical_not(baddata)
ssfr = 1
if not paper:
fig = plt.figure(figsize = (19.5,7))
ax = fig.add_axes((0.15,0.15,0.315,0.8))
else: fig,ax = plt.subplots(figsize = (8,7))
c=0
msbw = 12
lwbw = 3
colormed2 = 'black'
key = ''
if HbHg: key = '_HbHg'
elif HaHg: key = '_HaHg'
for w in range(0,2):
#llim1 = (np.log10(fluxdata['sSFR'+key]) > -10.7)
#if cutofflow: ax.plot((0,20),(-10.7,-10.7),ls='--',color='black',label='sSFR cutoff for completeness')
if cutofflow: ax.axhspan(-10.7,-15, color='indianred', alpha=0.1,label='Incomplete, discarding for analysis')
if c in [0,3]:
col = 'good'
filt = allgood
color='cornflowerblue'
mark2 = 'o'
label2 = 'Significant H$\\alpha$ detection'
elif c in [1,4]:
col = 'low'
filt = somelow
color='cornflowerblue'
mark2 = 'v'
label2 = '5$\\sigma$ Upper limit on SSFR'
else:
col = 'bad'
filt = baddata
color='red'
filt = np.logical_and(filt,filtSFR)
xdata = fluxdata[filt]['re_kpc']
ydata = np.log10(fluxdata[filt]['SFR'+key])
mdata = fluxdata[filt]['LMASS']
ax.set_xlabel('log(Stellar Mass) (M$_\odot)$',fontsize = axisfont)
ax.set_ylabel('log(SFR) (M$_{sun})$/yr)',fontsize = axisfont)
if ssfr:
ydata = np.log10(fluxdata[filt]['sSFR'+key])
#Upper error
yerru = np.log10(fluxdata[filt]['sSFR']+fluxdata[filt]['ssfr_err_u'])-np.log10(fluxdata[filt]['sSFR'])
#If lower error is 0 or negative, set it to be very large
ldiff = fluxdata[filt]['sSFR']-fluxdata[filt]['ssfr_err_d']
ldiff.loc[ldiff<=0] = 10000
yerrd = np.abs(np.log10(fluxdata[filt]['sSFR'])-np.log10(ldiff))
ax.set_ylabel('log(sSFR) (yr$^{-1}$)',fontsize = axisfont)
kelmodelcolor = 'orange'
kelmodelw = 4
kelz = 100
if (c==0 and showkel): pkel = ax.plot((-100,100),(-9.46,-9.46),color=kelmodelcolor,ls='-',label='Model (Kelson 2014)',zorder=kelz,lw=kelmodelw)
if (c==0 and showkel): ax.plot((-100,100),(-9.86,-9.86),color=kelmodelcolor,ls='--',label=None,zorder=kelz,lw=kelmodelw)
if (c==0 and showkel): ax.plot((-100,100),(-9.06,-9.06),color=kelmodelcolor,ls='--',label=None,zorder=kelz,lw=kelmodelw)
smodelcolor = 'orange'
smodelw = 4
sz = 100
x2 = np.arange(9,9.4,0.01)
x2b = np.arange(9.4,10,0.01)
y2 = -0.17*(x2-10)-9.65
y2b = -0.53*(x2b-10)-9.87
if (c==0 and shows): psal = ax.plot(x2,y2,color=smodelcolor,ls='-',label='Fit to SDSS z<0.1 Galaxies (Salim+ 2007)',zorder=sz,lw=smodelw)
if (c==0 and shows): ax.plot(x2b,y2b,color=smodelcolor,ls='-',label=None,zorder=sz,lw=smodelw)
fluxdata['lsSFR'] = np.log10(fluxdata['sSFR'+key])
mr1 = (fluxdata[notbad]['LMASS']<9.25)
mr2 = np.logical_and(fluxdata[notbad]['LMASS']>=9.25,fluxdata[notbad]['LMASS']<9.5)
mr3 = np.logical_and(fluxdata[notbad]['LMASS']>=9.5,fluxdata[notbad]['LMASS']<9.75)
mr4 = (fluxdata[notbad]['LMASS']>=9.75)
mrs = [mr1,mr2,mr3,mr4]
if cutofflow:
llim = (np.log10(fluxdata[notbad]['sSFR']) > -10.7)
mrs = [np.logical_and(i,llim) for i in mrs]
def getWmed(fluxdata, mr):
sflux = fluxdata[notbad][mr].sort_values('lsSFR')
cumsum = sflux.Weight.cumsum()
cutoff = sflux.Weight.sum()/2.0
median = sflux.lsSFR[cumsum>=cutoff].iloc[0]
return median
def geteWmed(fluxdata, mr):
sflux = fluxdata.sort_values('sSFR')
cumsum = sflux.Weight.cumsum()
cutoff = sflux.Weight.sum()/2.0
median = sflux.sSFR[cumsum>=cutoff].iloc[0]
fluxdata['absSFR']=np.abs(fluxdata['sSFR']-median)
sflux = fluxdata[notbad][mr].sort_values('absSFR')
cumsum = sflux.Weight.cumsum()
cutoff = sflux.Weight.sum()/2.0
median = sflux.absSFR[cumsum>=cutoff].iloc[0]
return median
meds = np.array([getWmed(fluxdata,i) for i in mrs])
emeds = 1.49*np.array([geteWmed(fluxdata,i) for i in mrs])
emeds = (emeds/np.median(10**fluxdata['lsSFR'][notbad]))/2.303
bins = np.array([9.125,9.375,9.625,9.875])
#bins=(np.arange(1,17,2)/16.0)+9
msbw = 12
lwbw = 3
colormed2 = 'black'
if c==0:
if showmeds: pmed = ax.errorbar(bins,meds,yerr=emeds,marker='o',ms=msbw,lw=lwbw,ls='None',zorder=1000,markerfacecolor='None', markeredgecolor=colormed2,mew=3,ecolor=colormed2,label='Median in bin, log(sSFR)>-10.7')
def linefit(x,m,b):
y=m*x+b
return y
coeff,pcov = curve_fit(linefit,bins,meds,sigma=np.array(emeds)/100)
perr = np.sqrt(np.diag(pcov))
sbins = np.array([8.5,10.5])
if showmeds: ax.plot(sbins,linefit(sbins,coeff[0],coeff[1]),color='red',lw=4,ls='-',label='Fit to median',zorder=4)
if c==0:
#pcir = ax.errorbar(ydata,xdata,xerr=np.array([yerrd,yerru]),color=color,marker=mark2,ms=4,lw=0.5,ls='None',zorder=10,label='Significant H$\\alpha$ detection')
errfilt = yerru<0.1
pcirdark = ax.scatter(mdata[errfilt],xdata[errfilt],c=ydata[errfilt],label='Significant H$\\alpha$ detection (error <0.1 dex)')
else:
#ptri = ax.plot(ydata,xdata,color=color,marker=mark2,mfc='None',ms=6,lw=0.5,ls='None',zorder=10,label=label2)
#if HbHg: a = ax.plot((0,0),(0,0),color=color,marker='o',ms=4,lw=0.5,ls='None',zorder=1,label='Significant H$\\beta$ detection')
#else: a = ax.plot((0,0),(0,0),color=color,marker='o',ms=4,lw=0.5,ls='None',zorder=1,label='Significant H$\\alpha$ detection')
#b = ax.plot((0,0),(0,0),color=color,marker=mark2,mfc='None',ms=6,lw=0.5,ls='None',zorder=2,label=label2)
#if showmeds: c1 = ax.errorbar(0,0,yerr=0.4,marker='o',ms=msbw,lw=lwbw,ls='None',zorder=3,markerfacecolor='None', markeredgecolor=colormed2,mew=3,ecolor=colormed2,label='Median in bin, log(sSFR)>-10.7')
#if showkel: d = ax.plot((-100,0),(-9.46,-9.46),color=kelmodelcolor,ls='-',label='Model (Kelson 2014)',zorder=4,lw=kelmodelw)
#if shows: e = ax.plot((0,0),(1,1),color=smodelcolor,ls='-',label='Empirical Fit (Salim 2007)',zorder=5,lw=smodelw)
#if showmeds: f = ax.plot((0,0),(1,1),color='red',lw=4,ls='-',label='Fit to median',zorder=6)
handles, labels = ax.get_legend_handles_labels()
if not paper:
'''
hand = [a[0],b[0]]
if showmeds: hand.append(c1[0])
if showmeds: hand.append(f[0])
if showkel: hand.append(d[0])
if shows: hand.append(e[0])
'''
if (showmeds) and (showkel or shows): hand = [handles[-1],handles[-2],handles[2],handles[3],handles[5],handles[1],handles[0]]
elif (showmeds) and (cutofflow): hand = [handles[-1],handles[-2],handles[1],handles[3],handles[4],handles[0]]
elif (cutofflow): hand = [handles[-1],handles[-2],handles[0],handles[1]]
#else: hand = [handles[-1],handles[-2],handles[0]]
#ax.legend(handles=hand,fontsize=axisfont-2,bbox_to_anchor=(1.01, 0.5))
else:
pass
#hand = [handles[-1],handles[-2],handles[2],handles[3],handles[5],handles[1],handles[0]]
#ax.legend(handles=hand,fontsize=legendfont-6,loc=1,frameon=False)
ax.tick_params(labelsize = ticksize, size=ticks)
ax.set_xlim(8.95,10.05)
ax.set_ylim(0,12)
if ssfr:
if not paper:
if HbHg: ax.set_ylim(-13,-6)
else:
ax.set_xlim(8.95,10.05)
ax.set_ylim(0,12)
else:
ax.set_xlim(8.95,10.05)
ax.set_ylim(0,12)
c=c+1
fig.tight_layout()
if ssfr:
if HbHg: fig.savefig(figout + 'sSFR_Mass_HbHg.pdf')
elif HaHg: fig.savefig(figout + 'sSFR_Mass_HaHg.pdf')
else: fig.savefig(figout + 're_lmass_V2.pdf')
else: fig.savefig(figout + 'SFR_Mass.pdf')
plt.close(fig)
|
mit
|
bekkblando/tiy-project
|
bitcoin_trade/trade_engine/views.py
|
3
|
14875
|
from urllib.request import urlopen
import json
from django.contrib.auth.models import User
from matplotlib import pylab
from pylab import *
import numpy as np
import pandas as pd
from django_pandas.io import read_frame
from django.db.models import Avg
from django.shortcuts import redirect
from django.core.urlresolvers import reverse_lazy, reverse
from django.shortcuts import render, render_to_response
from django.http import HttpResponseRedirect
from django.views.generic.edit import CreateView, DeleteView, UpdateView
from trade_engine.mixins import AddActiveOrderFormMixin
from trade_engine.models import UserAccount, DepositAddress, ActiveOrder, ActiveOrderTicker, Balance, BalanceTicker, Trade, TradeTicker, CancelOrder, CancelOrderTicker, TradeHistory, TradeHistoryTicker, TransHistory, TransHistoryTicker, WithdrawCoin, WithdrawTicker, Ticker, Depth
from django.contrib.auth.forms import UserCreationForm
from django.template import RequestContext
from trade_engine.forms import BalanceForm, ActiveOrderForm, TradeForm, CancelOrderForm, TradeHistoryForm, TransHistoryForm, WithdrawForm
from trade_engine.converter import *
def base(request):
context = {"balance_ticker": BalanceTicker.objects.all()[BalanceTicker.objects.count()-1],
"active_order_ticker": ActiveOrderTicker.objects.all()[ActiveOrderTicker.objects.count()-1],
"ticker": Ticker.objects.all()[Ticker.objects.count()-1],
"depth": Depth.objects.all()[Depth.objects.count()-1]}
return render_to_response("base.html", context, context_instance=RequestContext(request))
def indicators(request):
bull = False
null = False
bear = False
lastprice = Ticker.objects.all()[Ticker.objects.count()-1]
twoprice = Ticker.objects.all()[Ticker.objects.count()-2]
threeprice = Ticker.objects.all()[Ticker.objects.count()-3]
fourprice = Ticker.objects.all()[Ticker.objects.count()-4]
one = lastprice.last
two = twoprice.last
three = threeprice.last
four = fourprice.last
if one > two and one > three and one > four:
bull = True
null = False
bear = False
elif one < two and one < three and one < four:
bull = False
null = False
bear = True
else:
bull = False
null = True
bear = False
bull_long = False
null_long = False
bear_long = False
allpricemean = Ticker.objects.all().aggregate(Avg('last')).pop('last__avg', 0)
print(allpricemean)
if one > (allpricemean-(0.05*allpricemean)):
bull_long = True
null_long = False
bear_long = False
elif one < (allpricemean+(0.05*allpricemean)):
bull_long = False
null_long = False
bear_long = True
else:
bull_long = False
null_long = True
bear_long = False
context = {"bull": bull,
"null": null,
"bear": bear,
"bull_long": bull_long,
"null_long": null_long,
"bear_long": bear_long}
qs = Ticker.objects.all()
df = read_frame(qs, coerce_float=True).convert_objects(convert_numeric=True, convert_dates=True)
x = Depth.objects.all()[Depth.objects.count()-1].split_bids
list1_bids = []
list2_bids = []
for item in x:
itemlist = item.split(',')
list1_bids.append(float(itemlist[0]))
list2_bids.append(float(itemlist[1]))
y = Depth.objects.all()[Depth.objects.count()-1].split_asks
list1_asks = []
list2_asks = []
for item in y:
itemlist = item.split(',')
list1_asks.append(float(itemlist[0]))
list2_asks.append(float(itemlist[1]))
df1 = {'x': list1_bids,
'y': list2_bids}
df2 = {'x': list1_asks,
'y': list2_asks}
bids_frame = pd.DataFrame(df1)
asks_frame = pd.DataFrame(df2)
graph_one = scatter_to_base64(df, "plot_current_price")
graph_two = scatter_to_base64(df, "plot_high")
graph_low = scatter_to_base64(df, "plot_low")
graph_avg = scatter_to_base64(df, "plot_avg")
graph_vol = scatter_to_base64(df, "plot_vol")
graph_three = scatter_to_base64(bids_frame, "plot_bids")
graph_four = scatter_to_base64(asks_frame, "plot_asks")
context["graph_one"] = graph_one
context["graph_two"] = graph_two
context["graph_low"] = graph_low
context["graph_avg"] = graph_avg
context["graph_vol"] = graph_vol
context["graph_three"] = graph_three
context["graph_four"] = graph_four
return render_to_response("indicators.html", context, context_instance=RequestContext(request))
def user_registration(request):
if request.POST:
username = request.POST['username']
password1 = request.POST['password1']
password2 = request.POST['password2']
form = UserCreationForm({
'username': username,
'password1': password1,
'password2': password2,
})
try:
form.save(commit=True)
return HttpResponseRedirect("/")
except ValueError:
return render_to_response("registration/create_user.html",
{'form': form},
context_instance=RequestContext(request)
)
return render_to_response("registration/create_user.html",
{'form': UserCreationForm()},
context_instance=RequestContext(request)
)
def account_settings(request):
context = {"balance_ticker": BalanceTicker.objects.all()[BalanceTicker.objects.count()-1],
"active_order_ticker": ActiveOrderTicker.objects.all()[ActiveOrderTicker.objects.count()-1],
"ticker": Ticker.objects.all()[Ticker.objects.count()-1],
"depth": Depth.objects.all()[Depth.objects.count()-1]}
return render_to_response('account_settings.html', context, context_instance=RequestContext(request))
def history(request):
context = {"balance_ticker": BalanceTicker.objects.all()[BalanceTicker.objects.count()-1],
"active_order_ticker": ActiveOrderTicker.objects.all()[ActiveOrderTicker.objects.count()-1],
"ticker": Ticker.objects.all()[Ticker.objects.count()-1],
"depth": Depth.objects.all()[Depth.objects.count()-1]}
return render_to_response('history.html', context, context_instance=RequestContext(request))
def ticker_view(request):
btce_prices = urlopen('https://btc-e.com/api/2/btc_usd/ticker')
str_response = btce_prices.readall().decode('utf-8')
btcejson = json.loads(str_response)
ticker_obj = btcejson['ticker']
Ticker.objects.create(**ticker_obj)
return redirect('base')
def depth_view(request):
btce_depth = urlopen('https://btc-e.com/api/3/depth/btc_usd')
str_response = btce_depth.readall().decode('utf-8')
btcejson = json.loads(str_response)
depth_obj = btcejson['btc_usd']
print(depth_obj)
Depth.objects.create(**depth_obj)
return redirect('base')
class CreateBalanceFormView(CreateView):
model = Balance
template_name = 'base.html'
success_url = reverse_lazy('base')
form_class = BalanceForm
def form_valid(self, form):
form.instance.user = self.request.user
return super().form_valid(form)
class CreateActiveOrderFormView(CreateView):
model = ActiveOrder
template_name = 'base.html'
success_url = reverse_lazy('base')
form_class = ActiveOrderForm
def form_valid(self, form):
form.instance.user = self.request.user
return super().form_valid(form)
class CreateTradeFormView(CreateView):
model = Trade
template_name = 'trade.html'
success_url = reverse_lazy('base')
form_class = TradeForm
def form_valid(self, form):
form.instance.user = self.request.user
return super().form_valid(form)
def get_context_data(self, **kwargs):
ctx = super(CreateTradeFormView, self).get_context_data(**kwargs)
ctx['balance_ticker'] = BalanceTicker.objects.all()[BalanceTicker.objects.count()-1]
ctx['active_order_ticker'] = ActiveOrderTicker.objects.all()[ActiveOrderTicker.objects.count()-1]
ctx['ticker'] = Ticker.objects.all()[Ticker.objects.count()-1]
ctx['depth'] = Depth.objects.all()[Depth.objects.count()-1]
return ctx
class CreateCancelOrderView(CreateView):
model = CancelOrder
template_name = 'cancel_trade.html'
success_url = reverse_lazy('base')
form_class = CancelOrderForm
def form_valid(self, form):
form.instance.user = self.request.user
return super().form_valid(form)
def get_context_data(self, **kwargs):
ctx = super(CreateCancelOrderView, self).get_context_data(**kwargs)
ctx['balance_ticker'] = BalanceTicker.objects.all()[BalanceTicker.objects.count()-1]
ctx['active_order_ticker'] = ActiveOrderTicker.objects.all()[ActiveOrderTicker.objects.count()-1]
ctx['ticker'] = Ticker.objects.all()[Ticker.objects.count()-1]
ctx['depth'] = Depth.objects.all()[Depth.objects.count()-1]
return ctx
class CreateTradeHistoryView(CreateView):
model = TradeHistory
template_name = 'trade_history.html'
success_url = reverse_lazy('base')
form_class = TradeHistoryForm
def form_valid(self, form):
form.instance.user = self.request.user
return super().form_valid(form)
def get_context_data(self, **kwargs):
ctx = super(CreateTradeHistoryView, self).get_context_data(**kwargs)
ctx['balance_ticker'] = BalanceTicker.objects.all()[BalanceTicker.objects.count()-1]
ctx['active_order_ticker'] = ActiveOrderTicker.objects.all()[ActiveOrderTicker.objects.count()-1]
ctx['ticker'] = Ticker.objects.all()[Ticker.objects.count()-1]
ctx['depth'] = Depth.objects.all()[Depth.objects.count()-1]
return ctx
class CreateTransHistoryView(CreateView):
model = TransHistory
template_name = 'trans_history.html'
success_url = reverse_lazy('base')
form_class = TransHistoryForm
def form_valid(self, form):
form.instance.user = self.request.user
return super().form_valid(form)
def get_context_data(self, **kwargs):
ctx = super(CreateTransHistoryView, self).get_context_data(**kwargs)
ctx['balance_ticker'] = BalanceTicker.objects.all()[BalanceTicker.objects.count()-1]
ctx['active_order_ticker'] = ActiveOrderTicker.objects.all()[ActiveOrderTicker.objects.count()-1]
ctx['ticker'] = Ticker.objects.all()[Ticker.objects.count()-1]
ctx['depth'] = Depth.objects.all()[Depth.objects.count()-1]
return ctx
class CreateWithdrawCoinView(CreateView):
model = WithdrawCoin
template_name = 'withdraw_coin.html'
success_url = reverse_lazy('base')
form_class = WithdrawForm
def form_valid(self, form):
form.instance.user = self.request.user
return super().form_valid(form)
def get_context_data(self, **kwargs):
ctx = super(CreateWithdrawCoinView, self).get_context_data(**kwargs)
ctx['balance_ticker'] = BalanceTicker.objects.all()[BalanceTicker.objects.count()-1]
ctx['active_order_ticker'] = ActiveOrderTicker.objects.all()[ActiveOrderTicker.objects.count()-1]
ctx['ticker'] = Ticker.objects.all()[Ticker.objects.count()-1]
ctx['depth'] = Depth.objects.all()[Depth.objects.count()-1]
return ctx
class CreateUserAccountView(CreateView):
model = UserAccount
template_name = "create_user_account.html"
success_url = reverse_lazy('base')
fields = ["api_key", "secret"]
def form_valid(self, form):
form.instance.user = self.request.user
return super().form_valid(form)
def get_context_data(self, **kwargs):
ctx = super(CreateUserAccountView, self).get_context_data(**kwargs)
ctx['balance_ticker'] = BalanceTicker.objects.all()[BalanceTicker.objects.count()-1]
ctx['active_order_ticker'] = ActiveOrderTicker.objects.all()[ActiveOrderTicker.objects.count()-1]
ctx['ticker'] = Ticker.objects.all()[Ticker.objects.count()-1]
ctx['depth'] = Depth.objects.all()[Depth.objects.count()-1]
return ctx
class DeleteUserAccountView(DeleteView):
model = UserAccount
success_url = reverse_lazy('base')
class UpdateUserAccountView(UpdateView):
model = UserAccount
template_name = "update_user_account.html"
fields = ["api_key", "secret"]
success_url = reverse_lazy('base')
def form_valid(self, form):
form.instance.user = self.request.user
return super().form_valid(form)
def get_context_data(self, **kwargs):
ctx = super(UpdateUserAccountView, self).get_context_data(**kwargs)
ctx['balance_ticker'] = BalanceTicker.objects.all()[BalanceTicker.objects.count()-1]
ctx['active_order_ticker'] = ActiveOrderTicker.objects.all()[ActiveOrderTicker.objects.count()-1]
ctx['ticker'] = Ticker.objects.all()[Ticker.objects.count()-1]
ctx['depth'] = Depth.objects.all()[Depth.objects.count()-1]
return ctx
class CreateUserAddressView(CreateView):
model = DepositAddress
template_name = "create_user_address.html"
fields = ["address"]
success_url = reverse_lazy('base')
def form_valid(self, form):
form.instance.user = self.request.user
return super().form_valid(form)
def get_context_data(self, **kwargs):
ctx = super(CreateUserAddressView, self).get_context_data(**kwargs)
ctx['balance_ticker'] = BalanceTicker.objects.all()[BalanceTicker.objects.count()-1]
ctx['active_order_ticker'] = ActiveOrderTicker.objects.all()[ActiveOrderTicker.objects.count()-1]
ctx['ticker'] = Ticker.objects.all()[Ticker.objects.count()-1]
ctx['depth'] = Depth.objects.all()[Depth.objects.count()-1]
return ctx
class DeleteUserAddressView(DeleteView):
model = DepositAddress
success_url = reverse_lazy('base')
class UpdateUserAddressView(UpdateView):
model = DepositAddress
template_name = "update_user_address.html"
fields = ["address"]
success_url = reverse_lazy('base')
def form_valid(self, form):
form.instance.user = self.request.user
return super().form_valid(form)
def get_context_data(self, **kwargs):
ctx = super(UpdateUserAddressView, self).get_context_data(**kwargs)
ctx['balance_ticker'] = BalanceTicker.objects.all()[BalanceTicker.objects.count()-1]
ctx['active_order_ticker'] = ActiveOrderTicker.objects.all()[ActiveOrderTicker.objects.count()-1]
ctx['ticker'] = Ticker.objects.all()[Ticker.objects.count()-1]
ctx['depth'] = Depth.objects.all()[Depth.objects.count()-1]
return ctx
|
gpl-3.0
|
cybernet14/scikit-learn
|
sklearn/metrics/cluster/tests/test_supervised.py
|
206
|
7643
|
import numpy as np
from sklearn.metrics.cluster import adjusted_rand_score
from sklearn.metrics.cluster import homogeneity_score
from sklearn.metrics.cluster import completeness_score
from sklearn.metrics.cluster import v_measure_score
from sklearn.metrics.cluster import homogeneity_completeness_v_measure
from sklearn.metrics.cluster import adjusted_mutual_info_score
from sklearn.metrics.cluster import normalized_mutual_info_score
from sklearn.metrics.cluster import mutual_info_score
from sklearn.metrics.cluster import expected_mutual_information
from sklearn.metrics.cluster import contingency_matrix
from sklearn.metrics.cluster import entropy
from sklearn.utils.testing import assert_raise_message
from nose.tools import assert_almost_equal
from nose.tools import assert_equal
from numpy.testing import assert_array_almost_equal
score_funcs = [
adjusted_rand_score,
homogeneity_score,
completeness_score,
v_measure_score,
adjusted_mutual_info_score,
normalized_mutual_info_score,
]
def test_error_messages_on_wrong_input():
for score_func in score_funcs:
expected = ('labels_true and labels_pred must have same size,'
' got 2 and 3')
assert_raise_message(ValueError, expected, score_func,
[0, 1], [1, 1, 1])
expected = "labels_true must be 1D: shape is (2"
assert_raise_message(ValueError, expected, score_func,
[[0, 1], [1, 0]], [1, 1, 1])
expected = "labels_pred must be 1D: shape is (2"
assert_raise_message(ValueError, expected, score_func,
[0, 1, 0], [[1, 1], [0, 0]])
def test_perfect_matches():
for score_func in score_funcs:
assert_equal(score_func([], []), 1.0)
assert_equal(score_func([0], [1]), 1.0)
assert_equal(score_func([0, 0, 0], [0, 0, 0]), 1.0)
assert_equal(score_func([0, 1, 0], [42, 7, 42]), 1.0)
assert_equal(score_func([0., 1., 0.], [42., 7., 42.]), 1.0)
assert_equal(score_func([0., 1., 2.], [42., 7., 2.]), 1.0)
assert_equal(score_func([0, 1, 2], [42, 7, 2]), 1.0)
def test_homogeneous_but_not_complete_labeling():
# homogeneous but not complete clustering
h, c, v = homogeneity_completeness_v_measure(
[0, 0, 0, 1, 1, 1],
[0, 0, 0, 1, 2, 2])
assert_almost_equal(h, 1.00, 2)
assert_almost_equal(c, 0.69, 2)
assert_almost_equal(v, 0.81, 2)
def test_complete_but_not_homogeneous_labeling():
# complete but not homogeneous clustering
h, c, v = homogeneity_completeness_v_measure(
[0, 0, 1, 1, 2, 2],
[0, 0, 1, 1, 1, 1])
assert_almost_equal(h, 0.58, 2)
assert_almost_equal(c, 1.00, 2)
assert_almost_equal(v, 0.73, 2)
def test_not_complete_and_not_homogeneous_labeling():
# neither complete nor homogeneous but not so bad either
h, c, v = homogeneity_completeness_v_measure(
[0, 0, 0, 1, 1, 1],
[0, 1, 0, 1, 2, 2])
assert_almost_equal(h, 0.67, 2)
assert_almost_equal(c, 0.42, 2)
assert_almost_equal(v, 0.52, 2)
def test_non_consicutive_labels():
# regression tests for labels with gaps
h, c, v = homogeneity_completeness_v_measure(
[0, 0, 0, 2, 2, 2],
[0, 1, 0, 1, 2, 2])
assert_almost_equal(h, 0.67, 2)
assert_almost_equal(c, 0.42, 2)
assert_almost_equal(v, 0.52, 2)
h, c, v = homogeneity_completeness_v_measure(
[0, 0, 0, 1, 1, 1],
[0, 4, 0, 4, 2, 2])
assert_almost_equal(h, 0.67, 2)
assert_almost_equal(c, 0.42, 2)
assert_almost_equal(v, 0.52, 2)
ari_1 = adjusted_rand_score([0, 0, 0, 1, 1, 1], [0, 1, 0, 1, 2, 2])
ari_2 = adjusted_rand_score([0, 0, 0, 1, 1, 1], [0, 4, 0, 4, 2, 2])
assert_almost_equal(ari_1, 0.24, 2)
assert_almost_equal(ari_2, 0.24, 2)
def uniform_labelings_scores(score_func, n_samples, k_range, n_runs=10,
seed=42):
# Compute score for random uniform cluster labelings
random_labels = np.random.RandomState(seed).random_integers
scores = np.zeros((len(k_range), n_runs))
for i, k in enumerate(k_range):
for j in range(n_runs):
labels_a = random_labels(low=0, high=k - 1, size=n_samples)
labels_b = random_labels(low=0, high=k - 1, size=n_samples)
scores[i, j] = score_func(labels_a, labels_b)
return scores
def test_adjustment_for_chance():
# Check that adjusted scores are almost zero on random labels
n_clusters_range = [2, 10, 50, 90]
n_samples = 100
n_runs = 10
scores = uniform_labelings_scores(
adjusted_rand_score, n_samples, n_clusters_range, n_runs)
max_abs_scores = np.abs(scores).max(axis=1)
assert_array_almost_equal(max_abs_scores, [0.02, 0.03, 0.03, 0.02], 2)
def test_adjusted_mutual_info_score():
# Compute the Adjusted Mutual Information and test against known values
labels_a = np.array([1, 1, 1, 1, 1, 1, 2, 2, 2, 2, 2, 2, 3, 3, 3, 3, 3])
labels_b = np.array([1, 1, 1, 1, 2, 1, 2, 2, 2, 2, 3, 1, 3, 3, 3, 2, 2])
# Mutual information
mi = mutual_info_score(labels_a, labels_b)
assert_almost_equal(mi, 0.41022, 5)
# Expected mutual information
C = contingency_matrix(labels_a, labels_b)
n_samples = np.sum(C)
emi = expected_mutual_information(C, n_samples)
assert_almost_equal(emi, 0.15042, 5)
# Adjusted mutual information
ami = adjusted_mutual_info_score(labels_a, labels_b)
assert_almost_equal(ami, 0.27502, 5)
ami = adjusted_mutual_info_score([1, 1, 2, 2], [2, 2, 3, 3])
assert_equal(ami, 1.0)
# Test with a very large array
a110 = np.array([list(labels_a) * 110]).flatten()
b110 = np.array([list(labels_b) * 110]).flatten()
ami = adjusted_mutual_info_score(a110, b110)
# This is not accurate to more than 2 places
assert_almost_equal(ami, 0.37, 2)
def test_entropy():
ent = entropy([0, 0, 42.])
assert_almost_equal(ent, 0.6365141, 5)
assert_almost_equal(entropy([]), 1)
def test_contingency_matrix():
labels_a = np.array([1, 1, 1, 1, 1, 1, 2, 2, 2, 2, 2, 2, 3, 3, 3, 3, 3])
labels_b = np.array([1, 1, 1, 1, 2, 1, 2, 2, 2, 2, 3, 1, 3, 3, 3, 2, 2])
C = contingency_matrix(labels_a, labels_b)
C2 = np.histogram2d(labels_a, labels_b,
bins=(np.arange(1, 5),
np.arange(1, 5)))[0]
assert_array_almost_equal(C, C2)
C = contingency_matrix(labels_a, labels_b, eps=.1)
assert_array_almost_equal(C, C2 + .1)
def test_exactly_zero_info_score():
# Check numerical stability when information is exactly zero
for i in np.logspace(1, 4, 4).astype(np.int):
labels_a, labels_b = np.ones(i, dtype=np.int),\
np.arange(i, dtype=np.int)
assert_equal(normalized_mutual_info_score(labels_a, labels_b), 0.0)
assert_equal(v_measure_score(labels_a, labels_b), 0.0)
assert_equal(adjusted_mutual_info_score(labels_a, labels_b), 0.0)
assert_equal(normalized_mutual_info_score(labels_a, labels_b), 0.0)
def test_v_measure_and_mutual_information(seed=36):
# Check relation between v_measure, entropy and mutual information
for i in np.logspace(1, 4, 4).astype(np.int):
random_state = np.random.RandomState(seed)
labels_a, labels_b = random_state.random_integers(0, 10, i),\
random_state.random_integers(0, 10, i)
assert_almost_equal(v_measure_score(labels_a, labels_b),
2.0 * mutual_info_score(labels_a, labels_b) /
(entropy(labels_a) + entropy(labels_b)), 0)
|
bsd-3-clause
|
kcompher/FreeDiscovUI
|
freediscovery/ingestion.py
|
1
|
10510
|
# -*- coding: utf-8 -*-
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import os.path
import os
import pandas as pd
from .exceptions import (DatasetNotFound, InitException, NotFound, WrongParameter)
def _list_filenames(data_dir, dir_pattern=None, file_pattern=None):
""" List all files in a data_dir"""
import re
# parse all files in the folder
filenames = []
for root, subdirs, files in os.walk(data_dir):
if dir_pattern is None or re.match(dir_pattern, root):
for fname in files:
if file_pattern is None or re.match(file_pattern, fname):
filenames.append(os.path.normpath(os.path.join(root, fname)))
# make sure that sorting order is deterministic
return sorted(filenames)
class DocumentIndex(object):
def __init__(self, data_dir, data, filenames):
self.data_dir = data_dir
self.data = data
self.filenames = filenames
def _check_index(self, keys=None):
""" Given a list of keys check which keys will be used for indexing
and whether these keys could be used for an index
Parameters
----------
keys : list
one or multiple choices among "internal_id", "document_id", "rendition_id", "file_path".
default=["internal_id"]
Returns
-------
index_cols : list
a subset of keys that would be used for an index
"""
if keys is None:
keys = ['internal_id']
if "internal_id" in keys:
index_cols = ['internal_id',]
elif "document_id" in keys and \
"document_id" in self.data.columns and \
"rendition_id" in keys and \
"rendition_id" in self.data.columns:
index_cols = ['document_id', 'rendition_id']
elif "document_id" in keys and \
"document_id" in self.data.columns:
if self.data.document_id.is_unique:
index_cols = ['document_id',]
else:
raise ValueError('document_id cannot be used as an index, since it has duplicates'
' (and rendition_id has duplicates)')
elif "file_path" in keys and \
"file_path" in self.data.columns:
index_cols = ['file_path']
else:
raise ValueError('The query columns {} cannot be used as an index'.format(list(keys)))
if len(index_cols) == 1:
index_cols = index_cols[0]
# make sure we can use the selected columns as an index
self.data.set_index(index_cols, verify_integrity=True)
return index_cols
def search(self, query, strict=True, drop=True):
"""Search the filenames given by some user query
Parameters
----------
query : pandas.DataFrame
a DataFrame with one of the following fields "internal_id",
("document_id", "rendition_id"), "document_id", "file_path"
strict : bool
raise an error if some documents are not found
drop : bool
drop columns not in the dataset
Returns
-------
df : pd.DataFrame
the response dataframe with fields
"internal_id", "file_path" and optionally "document_id" and "rendition_id"
"""
if not isinstance(query, pd.DataFrame):
raise ValueError('The query {} must be a pandas DataFrame')
if not query.shape[0]:
raise ValueError('Query has zero element!')
index_cols = self._check_index(query.columns)
query['sort_order'] = query.index.values
res = self.data.merge(query, on=index_cols, how='inner', suffixes=('', '_query'))
# make sure we preserve the original order in the query
res.sort_values(by='sort_order', inplace=True)
del res['sort_order']
if res.shape[0] != query.shape[0]:
# some documents were not found
msg = ['Query elements not found:']
for index, row in query.iterrows():
if row[index_cols] not in self.data[index_cols].values:
msg.append(' * {}'.format(row.to_dict()))
if strict:
raise NotFound('\n'.join(msg))
else:
print('Warning: '+ '\n'.join(msg))
if drop:
# ignore all additional columns
res = res[self.data.columns]
return res
def _search_filenames(self, filenames):
""" A helper function that reproduces the previous behaviour in FeaturesVectorizer"""
query = pd.DataFrame(filenames, columns=['file_path'])
res = self.search(query)
return res.internal_id.values
def render_dict(self, res=None, return_file_path=False):
"""Render a pandas dataframe as a list of dicts
Parameters
----------
res : {pandas.DataFrame, None}
some dataset with additional data that must contain the 'internal_id' key
return_file_path : bool
return the file paths, default: False
Results
-------
out : dict
"""
if res is not None:
res = res.set_index('internal_id', drop=False)
db = self.data.set_index('internal_id', drop=False)
if not return_file_path:
base_keys = [key for key in self.data.columns if key != 'file_path']
else:
base_keys = list(self.data.columns)
if res is not None:
res_keys = [key for key in res if key not in base_keys]
if not return_file_path and 'file_path' in res_keys:
res_keys.remove('file_path')
db = db[base_keys]
out = []
if res is not None:
for index, row in res[res_keys].iterrows():
row_dict = row.to_dict()
db_sel = db.loc[index]
row_dict.update(db_sel.to_dict())
out.append(row_dict)
else:
for index, row in db.iterrows():
row_dict = row.to_dict()
out.append(row_dict)
return out
def render_list(self, res=None, return_file_path=False):
"""Render a pandas dataframe as a dict of lists
Parameters
----------
res : {pandas.DataFrame, None}
some dataset with additional data that must contain the 'internal_id' key
return_file_path : bool
return the file paths, default: False
Results
-------
out : dict
"""
if res is not None:
res = res.set_index('internal_id', drop=False)
db = self.data.set_index('internal_id', drop=False)
if not return_file_path:
base_keys = [key for key in self.data.columns if key != 'file_path']
else:
base_keys = list(self.data.columns)
db = db[base_keys]
if res is not None:
res_keys = [key for key in res if key not in base_keys]
if not return_file_path:
if 'file_path' in res_keys:
res_keys.remove('file_path')
else:
res_keys = []
out = {}
for key in base_keys + res_keys:
out[key] = []
if res is not None:
for index, row in res[res_keys].iterrows():
db_sel_dict = db.loc[index].to_dict()
for key, val in db_sel_dict.items():
out[key].append(val)
for key, val in row.to_dict().items():
out[key].append(val)
else:
for index, row in db.iterrows():
row_dict = row.to_dict()
for key, val in row_dict.items():
out[key].append(val)
return out
@classmethod
def from_list(cls, metadata):
""" Create a DocumentIndex from a list of dictionaries, for instance
{
document_id: 1,
rendition_id: 4,
file_path: "c:\dev\1.txt"
}
Parmaters
---------
metadata : list of dicts
a list of dictionaries with keys ['file_path', 'document_id', 'rendition_id']
describing the data ingestion (this overwrites data_dir)
Returns
-------
result : DocumentIndex
a DocumentIndex object
"""
metadata = sorted(metadata, key=lambda x: x['file_path'])
filenames = [el['file_path'] for el in metadata]
data_dir = cls._detect_data_dir(filenames)
if not filenames: # no files were found
raise WrongParameter('No files to process were found!')
filenames_rel = [os.path.relpath(el, data_dir) for el in filenames]
# modify the metadata list inplace
for idx, (db_el, file_path) in enumerate(zip(metadata, filenames_rel)):
db_el['file_path'] = file_path
db_el['internal_id'] = idx
db = pd.DataFrame(metadata)
return cls(data_dir, db, filenames)
@staticmethod
def _detect_data_dir(filenames):
data_dir = os.path.commonprefix(filenames)
data_dir = os.path.normpath(data_dir)
if os.path.exists(data_dir):
return data_dir
elif os.path.exists(os.path.dirname(data_dir)):
return os.path.dirname(data_dir)
else:
raise IOError('data_dir={} does not exist!'.format(data_dir))
@classmethod
def from_folder(cls, data_dir, file_pattern=None, dir_pattern=None):
""" Create a DocumentIndex from files in data_dir
Parmaters
---------
data_dir : str
path to the data directory (used only if metadata not provided), default: None
Returns
-------
result : DocumentIndex
a DocumentIndex object
"""
data_dir = os.path.normpath(data_dir)
if not os.path.exists(data_dir):
raise NotFound('data_dir={} does not exist'.format(data_dir))
filenames = _list_filenames(data_dir, dir_pattern, file_pattern)
filenames_rel = [os.path.relpath(el, data_dir) for el in filenames]
db = [{'file_path': file_path, 'internal_id': idx} \
for idx, file_path in enumerate(filenames_rel)]
db = pd.DataFrame(db)
return cls(data_dir, db, filenames)
|
bsd-3-clause
|
jakobworldpeace/scikit-learn
|
benchmarks/bench_glmnet.py
|
111
|
3890
|
"""
To run this, you'll need to have installed.
* glmnet-python
* scikit-learn (of course)
Does two benchmarks
First, we fix a training set and increase the number of
samples. Then we plot the computation time as function of
the number of samples.
In the second benchmark, we increase the number of dimensions of the
training set. Then we plot the computation time as function of
the number of dimensions.
In both cases, only 10% of the features are informative.
"""
import numpy as np
import gc
from time import time
from sklearn.datasets.samples_generator import make_regression
alpha = 0.1
# alpha = 0.01
def rmse(a, b):
return np.sqrt(np.mean((a - b) ** 2))
def bench(factory, X, Y, X_test, Y_test, ref_coef):
gc.collect()
# start time
tstart = time()
clf = factory(alpha=alpha).fit(X, Y)
delta = (time() - tstart)
# stop time
print("duration: %0.3fs" % delta)
print("rmse: %f" % rmse(Y_test, clf.predict(X_test)))
print("mean coef abs diff: %f" % abs(ref_coef - clf.coef_.ravel()).mean())
return delta
if __name__ == '__main__':
from glmnet.elastic_net import Lasso as GlmnetLasso
from sklearn.linear_model import Lasso as ScikitLasso
# Delayed import of matplotlib.pyplot
import matplotlib.pyplot as plt
scikit_results = []
glmnet_results = []
n = 20
step = 500
n_features = 1000
n_informative = n_features / 10
n_test_samples = 1000
for i in range(1, n + 1):
print('==================')
print('Iteration %s of %s' % (i, n))
print('==================')
X, Y, coef_ = make_regression(
n_samples=(i * step) + n_test_samples, n_features=n_features,
noise=0.1, n_informative=n_informative, coef=True)
X_test = X[-n_test_samples:]
Y_test = Y[-n_test_samples:]
X = X[:(i * step)]
Y = Y[:(i * step)]
print("benchmarking scikit-learn: ")
scikit_results.append(bench(ScikitLasso, X, Y, X_test, Y_test, coef_))
print("benchmarking glmnet: ")
glmnet_results.append(bench(GlmnetLasso, X, Y, X_test, Y_test, coef_))
plt.clf()
xx = range(0, n * step, step)
plt.title('Lasso regression on sample dataset (%d features)' % n_features)
plt.plot(xx, scikit_results, 'b-', label='scikit-learn')
plt.plot(xx, glmnet_results, 'r-', label='glmnet')
plt.legend()
plt.xlabel('number of samples to classify')
plt.ylabel('Time (s)')
plt.show()
# now do a benchmark where the number of points is fixed
# and the variable is the number of features
scikit_results = []
glmnet_results = []
n = 20
step = 100
n_samples = 500
for i in range(1, n + 1):
print('==================')
print('Iteration %02d of %02d' % (i, n))
print('==================')
n_features = i * step
n_informative = n_features / 10
X, Y, coef_ = make_regression(
n_samples=(i * step) + n_test_samples, n_features=n_features,
noise=0.1, n_informative=n_informative, coef=True)
X_test = X[-n_test_samples:]
Y_test = Y[-n_test_samples:]
X = X[:n_samples]
Y = Y[:n_samples]
print("benchmarking scikit-learn: ")
scikit_results.append(bench(ScikitLasso, X, Y, X_test, Y_test, coef_))
print("benchmarking glmnet: ")
glmnet_results.append(bench(GlmnetLasso, X, Y, X_test, Y_test, coef_))
xx = np.arange(100, 100 + n * step, step)
plt.figure('scikit-learn vs. glmnet benchmark results')
plt.title('Regression in high dimensional spaces (%d samples)' % n_samples)
plt.plot(xx, scikit_results, 'b-', label='scikit-learn')
plt.plot(xx, glmnet_results, 'r-', label='glmnet')
plt.legend()
plt.xlabel('number of features')
plt.ylabel('Time (s)')
plt.axis('tight')
plt.show()
|
bsd-3-clause
|
rusucosmin/courses
|
ubb/ai/lab1.py
|
1
|
2284
|
'''
Aims:
To perform a random search for a given problem, to compute a quality measure for the
solution candidates, to make some statistical analysis to a sample set of possible solutions.
Task:
Develop an application in python that has the following functionalities:
1. Randomly creates possible solutions for your assigned problem (Random Candidate
Solution Generator) (25p).
2. Check if a candidate to the solution is indeed a viable solution (25p).
3. Assigns a measure of quality (a positive value) to a candidate solution - where zero
marks a correct one and, as the candidate is more and more farther from the correct
solution the number grows (Fitness Function) (25p).
4. For a sample set (of size n) of random generated solutions, the mean and the standard
deviation of their quality measures is computed (25p)
11. Caravan
A caravan of n camels is traveling through desert in a single line. To break the monotony
of the long traveling days, each other day the camels are settled so it doesn't see the
same camel in front of her before. Generate possibilities to arrange the camels, knowing
how were place in the first day
Solution:
- initially, suppose the camles were arranged
0, 1, 2, .. n - 1 (so it's easier)
- A candidate solution is any possible permutation of
- the camels (no need for check if a solution is valid)
- A fitness function can be:
- nr of camels - nr of camels that see another camel in from - 1
'''
import numpy
import random
import matplotlib.pyplot as pyplot
def generate_camels(n):
x = range(n)
random.shuffle(x)
return x
def fitness_function(p):
cnt = len(p) - 1
for i in range(len(p) - 1):
if p[i] + 1 != p[i + 1]:
cnt -= 1
return cnt
def generate_samples(n, m):
samples = [generate_camels(n) for _ in range(m)]
fitness = [fitness_function(x) for x in samples]
print("\n".join([str(samples[i]) + " fitness = " + str(fitness[i]) for i in range(len(samples))]))
pyplot.hist(fitness)
pyplot.draw()
print("mean: %.6f" % numpy.mean(fitness))
print("std dev: %.6f" % numpy.std(fitness))
pyplot.show()
def main():
n = int(input("number of camles: = "))
m = int(input("sample size: = "))
generate_samples(n, m)
main()
|
mit
|
Jimmy-Morzaria/scikit-learn
|
sklearn/linear_model/tests/test_randomized_l1.py
|
214
|
4690
|
# Authors: Alexandre Gramfort <[email protected]>
# License: BSD 3 clause
import numpy as np
from scipy import sparse
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_raises
from sklearn.linear_model.randomized_l1 import (lasso_stability_path,
RandomizedLasso,
RandomizedLogisticRegression)
from sklearn.datasets import load_diabetes, load_iris
from sklearn.feature_selection import f_regression, f_classif
from sklearn.preprocessing import StandardScaler
from sklearn.linear_model.base import center_data
diabetes = load_diabetes()
X = diabetes.data
y = diabetes.target
X = StandardScaler().fit_transform(X)
X = X[:, [2, 3, 6, 7, 8]]
# test that the feature score of the best features
F, _ = f_regression(X, y)
def test_lasso_stability_path():
# Check lasso stability path
# Load diabetes data and add noisy features
scaling = 0.3
coef_grid, scores_path = lasso_stability_path(X, y, scaling=scaling,
random_state=42,
n_resampling=30)
assert_array_equal(np.argsort(F)[-3:],
np.argsort(np.sum(scores_path, axis=1))[-3:])
def test_randomized_lasso():
# Check randomized lasso
scaling = 0.3
selection_threshold = 0.5
# or with 1 alpha
clf = RandomizedLasso(verbose=False, alpha=1, random_state=42,
scaling=scaling,
selection_threshold=selection_threshold)
feature_scores = clf.fit(X, y).scores_
assert_array_equal(np.argsort(F)[-3:], np.argsort(feature_scores)[-3:])
# or with many alphas
clf = RandomizedLasso(verbose=False, alpha=[1, 0.8], random_state=42,
scaling=scaling,
selection_threshold=selection_threshold)
feature_scores = clf.fit(X, y).scores_
assert_equal(clf.all_scores_.shape, (X.shape[1], 2))
assert_array_equal(np.argsort(F)[-3:], np.argsort(feature_scores)[-3:])
X_r = clf.transform(X)
X_full = clf.inverse_transform(X_r)
assert_equal(X_r.shape[1], np.sum(feature_scores > selection_threshold))
assert_equal(X_full.shape, X.shape)
clf = RandomizedLasso(verbose=False, alpha='aic', random_state=42,
scaling=scaling)
feature_scores = clf.fit(X, y).scores_
assert_array_equal(feature_scores, X.shape[1] * [1.])
clf = RandomizedLasso(verbose=False, scaling=-0.1)
assert_raises(ValueError, clf.fit, X, y)
clf = RandomizedLasso(verbose=False, scaling=1.1)
assert_raises(ValueError, clf.fit, X, y)
def test_randomized_logistic():
# Check randomized sparse logistic regression
iris = load_iris()
X = iris.data[:, [0, 2]]
y = iris.target
X = X[y != 2]
y = y[y != 2]
F, _ = f_classif(X, y)
scaling = 0.3
clf = RandomizedLogisticRegression(verbose=False, C=1., random_state=42,
scaling=scaling, n_resampling=50,
tol=1e-3)
X_orig = X.copy()
feature_scores = clf.fit(X, y).scores_
assert_array_equal(X, X_orig) # fit does not modify X
assert_array_equal(np.argsort(F), np.argsort(feature_scores))
clf = RandomizedLogisticRegression(verbose=False, C=[1., 0.5],
random_state=42, scaling=scaling,
n_resampling=50, tol=1e-3)
feature_scores = clf.fit(X, y).scores_
assert_array_equal(np.argsort(F), np.argsort(feature_scores))
def test_randomized_logistic_sparse():
# Check randomized sparse logistic regression on sparse data
iris = load_iris()
X = iris.data[:, [0, 2]]
y = iris.target
X = X[y != 2]
y = y[y != 2]
# center here because sparse matrices are usually not centered
X, y, _, _, _ = center_data(X, y, True, True)
X_sp = sparse.csr_matrix(X)
F, _ = f_classif(X, y)
scaling = 0.3
clf = RandomizedLogisticRegression(verbose=False, C=1., random_state=42,
scaling=scaling, n_resampling=50,
tol=1e-3)
feature_scores = clf.fit(X, y).scores_
clf = RandomizedLogisticRegression(verbose=False, C=1., random_state=42,
scaling=scaling, n_resampling=50,
tol=1e-3)
feature_scores_sp = clf.fit(X_sp, y).scores_
assert_array_equal(feature_scores, feature_scores_sp)
|
bsd-3-clause
|
btabibian/scikit-learn
|
sklearn/tests/test_isotonic.py
|
24
|
14350
|
import warnings
import numpy as np
import pickle
import copy
from sklearn.isotonic import (check_increasing, isotonic_regression,
IsotonicRegression)
from sklearn.utils.testing import (assert_raises, assert_array_equal,
assert_true, assert_false, assert_equal,
assert_array_almost_equal,
assert_warns_message, assert_no_warnings)
from sklearn.utils import shuffle
def test_permutation_invariance():
# check that fit is permutation invariant.
# regression test of missing sorting of sample-weights
ir = IsotonicRegression()
x = [1, 2, 3, 4, 5, 6, 7]
y = [1, 41, 51, 1, 2, 5, 24]
sample_weight = [1, 2, 3, 4, 5, 6, 7]
x_s, y_s, sample_weight_s = shuffle(x, y, sample_weight, random_state=0)
y_transformed = ir.fit_transform(x, y, sample_weight=sample_weight)
y_transformed_s = ir.fit(x_s, y_s, sample_weight=sample_weight_s).transform(x)
assert_array_equal(y_transformed, y_transformed_s)
def test_check_increasing_small_number_of_samples():
x = [0, 1, 2]
y = [1, 1.1, 1.05]
is_increasing = assert_no_warnings(check_increasing, x, y)
assert_true(is_increasing)
def test_check_increasing_up():
x = [0, 1, 2, 3, 4, 5]
y = [0, 1.5, 2.77, 8.99, 8.99, 50]
# Check that we got increasing=True and no warnings
is_increasing = assert_no_warnings(check_increasing, x, y)
assert_true(is_increasing)
def test_check_increasing_up_extreme():
x = [0, 1, 2, 3, 4, 5]
y = [0, 1, 2, 3, 4, 5]
# Check that we got increasing=True and no warnings
is_increasing = assert_no_warnings(check_increasing, x, y)
assert_true(is_increasing)
def test_check_increasing_down():
x = [0, 1, 2, 3, 4, 5]
y = [0, -1.5, -2.77, -8.99, -8.99, -50]
# Check that we got increasing=False and no warnings
is_increasing = assert_no_warnings(check_increasing, x, y)
assert_false(is_increasing)
def test_check_increasing_down_extreme():
x = [0, 1, 2, 3, 4, 5]
y = [0, -1, -2, -3, -4, -5]
# Check that we got increasing=False and no warnings
is_increasing = assert_no_warnings(check_increasing, x, y)
assert_false(is_increasing)
def test_check_ci_warn():
x = [0, 1, 2, 3, 4, 5]
y = [0, -1, 2, -3, 4, -5]
# Check that we got increasing=False and CI interval warning
is_increasing = assert_warns_message(UserWarning, "interval",
check_increasing,
x, y)
assert_false(is_increasing)
def test_isotonic_regression():
y = np.array([3, 7, 5, 9, 8, 7, 10])
y_ = np.array([3, 6, 6, 8, 8, 8, 10])
assert_array_equal(y_, isotonic_regression(y))
y = np.array([10, 0, 2])
y_ = np.array([4, 4, 4])
assert_array_equal(y_, isotonic_regression(y))
x = np.arange(len(y))
ir = IsotonicRegression(y_min=0., y_max=1.)
ir.fit(x, y)
assert_array_equal(ir.fit(x, y).transform(x), ir.fit_transform(x, y))
assert_array_equal(ir.transform(x), ir.predict(x))
# check that it is immune to permutation
perm = np.random.permutation(len(y))
ir = IsotonicRegression(y_min=0., y_max=1.)
assert_array_equal(ir.fit_transform(x[perm], y[perm]),
ir.fit_transform(x, y)[perm])
assert_array_equal(ir.transform(x[perm]), ir.transform(x)[perm])
# check we don't crash when all x are equal:
ir = IsotonicRegression()
assert_array_equal(ir.fit_transform(np.ones(len(x)), y), np.mean(y))
def test_isotonic_regression_ties_min():
# Setup examples with ties on minimum
x = [1, 1, 2, 3, 4, 5]
y = [1, 2, 3, 4, 5, 6]
y_true = [1.5, 1.5, 3, 4, 5, 6]
# Check that we get identical results for fit/transform and fit_transform
ir = IsotonicRegression()
ir.fit(x, y)
assert_array_equal(ir.fit(x, y).transform(x), ir.fit_transform(x, y))
assert_array_equal(y_true, ir.fit_transform(x, y))
def test_isotonic_regression_ties_max():
# Setup examples with ties on maximum
x = [1, 2, 3, 4, 5, 5]
y = [1, 2, 3, 4, 5, 6]
y_true = [1, 2, 3, 4, 5.5, 5.5]
# Check that we get identical results for fit/transform and fit_transform
ir = IsotonicRegression()
ir.fit(x, y)
assert_array_equal(ir.fit(x, y).transform(x), ir.fit_transform(x, y))
assert_array_equal(y_true, ir.fit_transform(x, y))
def test_isotonic_regression_ties_secondary_():
"""
Test isotonic regression fit, transform and fit_transform
against the "secondary" ties method and "pituitary" data from R
"isotone" package, as detailed in: J. d. Leeuw, K. Hornik, P. Mair,
Isotone Optimization in R: Pool-Adjacent-Violators Algorithm
(PAVA) and Active Set Methods
Set values based on pituitary example and
the following R command detailed in the paper above:
> library("isotone")
> data("pituitary")
> res1 <- gpava(pituitary$age, pituitary$size, ties="secondary")
> res1$x
`isotone` version: 1.0-2, 2014-09-07
R version: R version 3.1.1 (2014-07-10)
"""
x = [8, 8, 8, 10, 10, 10, 12, 12, 12, 14, 14]
y = [21, 23.5, 23, 24, 21, 25, 21.5, 22, 19, 23.5, 25]
y_true = [22.22222, 22.22222, 22.22222, 22.22222, 22.22222, 22.22222,
22.22222, 22.22222, 22.22222, 24.25, 24.25]
# Check fit, transform and fit_transform
ir = IsotonicRegression()
ir.fit(x, y)
assert_array_almost_equal(ir.transform(x), y_true, 4)
assert_array_almost_equal(ir.fit_transform(x, y), y_true, 4)
def test_isotonic_regression_reversed():
y = np.array([10, 9, 10, 7, 6, 6.1, 5])
y_ = IsotonicRegression(increasing=False).fit_transform(
np.arange(len(y)), y)
assert_array_equal(np.ones(y_[:-1].shape), ((y_[:-1] - y_[1:]) >= 0))
def test_isotonic_regression_auto_decreasing():
# Set y and x for decreasing
y = np.array([10, 9, 10, 7, 6, 6.1, 5])
x = np.arange(len(y))
# Create model and fit_transform
ir = IsotonicRegression(increasing='auto')
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter("always")
y_ = ir.fit_transform(x, y)
# work-around for pearson divide warnings in scipy <= 0.17.0
assert_true(all(["invalid value encountered in "
in str(warn.message) for warn in w]))
# Check that relationship decreases
is_increasing = y_[0] < y_[-1]
assert_false(is_increasing)
def test_isotonic_regression_auto_increasing():
# Set y and x for decreasing
y = np.array([5, 6.1, 6, 7, 10, 9, 10])
x = np.arange(len(y))
# Create model and fit_transform
ir = IsotonicRegression(increasing='auto')
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter("always")
y_ = ir.fit_transform(x, y)
# work-around for pearson divide warnings in scipy <= 0.17.0
assert_true(all(["invalid value encountered in "
in str(warn.message) for warn in w]))
# Check that relationship increases
is_increasing = y_[0] < y_[-1]
assert_true(is_increasing)
def test_assert_raises_exceptions():
ir = IsotonicRegression()
rng = np.random.RandomState(42)
assert_raises(ValueError, ir.fit, [0, 1, 2], [5, 7, 3], [0.1, 0.6])
assert_raises(ValueError, ir.fit, [0, 1, 2], [5, 7])
assert_raises(ValueError, ir.fit, rng.randn(3, 10), [0, 1, 2])
assert_raises(ValueError, ir.transform, rng.randn(3, 10))
def test_isotonic_sample_weight_parameter_default_value():
# check if default value of sample_weight parameter is one
ir = IsotonicRegression()
# random test data
rng = np.random.RandomState(42)
n = 100
x = np.arange(n)
y = rng.randint(-50, 50, size=(n,)) + 50. * np.log(1 + np.arange(n))
# check if value is correctly used
weights = np.ones(n)
y_set_value = ir.fit_transform(x, y, sample_weight=weights)
y_default_value = ir.fit_transform(x, y)
assert_array_equal(y_set_value, y_default_value)
def test_isotonic_min_max_boundaries():
# check if min value is used correctly
ir = IsotonicRegression(y_min=2, y_max=4)
n = 6
x = np.arange(n)
y = np.arange(n)
y_test = [2, 2, 2, 3, 4, 4]
y_result = np.round(ir.fit_transform(x, y))
assert_array_equal(y_result, y_test)
def test_isotonic_sample_weight():
ir = IsotonicRegression()
x = [1, 2, 3, 4, 5, 6, 7]
y = [1, 41, 51, 1, 2, 5, 24]
sample_weight = [1, 2, 3, 4, 5, 6, 7]
expected_y = [1, 13.95, 13.95, 13.95, 13.95, 13.95, 24]
received_y = ir.fit_transform(x, y, sample_weight=sample_weight)
assert_array_equal(expected_y, received_y)
def test_isotonic_regression_oob_raise():
# Set y and x
y = np.array([3, 7, 5, 9, 8, 7, 10])
x = np.arange(len(y))
# Create model and fit
ir = IsotonicRegression(increasing='auto', out_of_bounds="raise")
ir.fit(x, y)
# Check that an exception is thrown
assert_raises(ValueError, ir.predict, [min(x) - 10, max(x) + 10])
def test_isotonic_regression_oob_clip():
# Set y and x
y = np.array([3, 7, 5, 9, 8, 7, 10])
x = np.arange(len(y))
# Create model and fit
ir = IsotonicRegression(increasing='auto', out_of_bounds="clip")
ir.fit(x, y)
# Predict from training and test x and check that min/max match.
y1 = ir.predict([min(x) - 10, max(x) + 10])
y2 = ir.predict(x)
assert_equal(max(y1), max(y2))
assert_equal(min(y1), min(y2))
def test_isotonic_regression_oob_nan():
# Set y and x
y = np.array([3, 7, 5, 9, 8, 7, 10])
x = np.arange(len(y))
# Create model and fit
ir = IsotonicRegression(increasing='auto', out_of_bounds="nan")
ir.fit(x, y)
# Predict from training and test x and check that we have two NaNs.
y1 = ir.predict([min(x) - 10, max(x) + 10])
assert_equal(sum(np.isnan(y1)), 2)
def test_isotonic_regression_oob_bad():
# Set y and x
y = np.array([3, 7, 5, 9, 8, 7, 10])
x = np.arange(len(y))
# Create model and fit
ir = IsotonicRegression(increasing='auto', out_of_bounds="xyz")
# Make sure that we throw an error for bad out_of_bounds value
assert_raises(ValueError, ir.fit, x, y)
def test_isotonic_regression_oob_bad_after():
# Set y and x
y = np.array([3, 7, 5, 9, 8, 7, 10])
x = np.arange(len(y))
# Create model and fit
ir = IsotonicRegression(increasing='auto', out_of_bounds="raise")
# Make sure that we throw an error for bad out_of_bounds value in transform
ir.fit(x, y)
ir.out_of_bounds = "xyz"
assert_raises(ValueError, ir.transform, x)
def test_isotonic_regression_pickle():
y = np.array([3, 7, 5, 9, 8, 7, 10])
x = np.arange(len(y))
# Create model and fit
ir = IsotonicRegression(increasing='auto', out_of_bounds="clip")
ir.fit(x, y)
ir_ser = pickle.dumps(ir, pickle.HIGHEST_PROTOCOL)
ir2 = pickle.loads(ir_ser)
np.testing.assert_array_equal(ir.predict(x), ir2.predict(x))
def test_isotonic_duplicate_min_entry():
x = [0, 0, 1]
y = [0, 0, 1]
ir = IsotonicRegression(increasing=True, out_of_bounds="clip")
ir.fit(x, y)
all_predictions_finite = np.all(np.isfinite(ir.predict(x)))
assert_true(all_predictions_finite)
def test_isotonic_ymin_ymax():
# Test from @NelleV's issue:
# https://github.com/scikit-learn/scikit-learn/issues/6921
x = np.array([1.263, 1.318, -0.572, 0.307, -0.707, -0.176, -1.599, 1.059,
1.396, 1.906, 0.210, 0.028, -0.081, 0.444, 0.018, -0.377,
-0.896, -0.377, -1.327, 0.180])
y = isotonic_regression(x, y_min=0., y_max=0.1)
assert(np.all(y >= 0))
assert(np.all(y <= 0.1))
# Also test decreasing case since the logic there is different
y = isotonic_regression(x, y_min=0., y_max=0.1, increasing=False)
assert(np.all(y >= 0))
assert(np.all(y <= 0.1))
# Finally, test with only one bound
y = isotonic_regression(x, y_min=0., increasing=False)
assert(np.all(y >= 0))
def test_isotonic_zero_weight_loop():
# Test from @ogrisel's issue:
# https://github.com/scikit-learn/scikit-learn/issues/4297
# Get deterministic RNG with seed
rng = np.random.RandomState(42)
# Create regression and samples
regression = IsotonicRegression()
n_samples = 50
x = np.linspace(-3, 3, n_samples)
y = x + rng.uniform(size=n_samples)
# Get some random weights and zero out
w = rng.uniform(size=n_samples)
w[5:8] = 0
regression.fit(x, y, sample_weight=w)
# This will hang in failure case.
regression.fit(x, y, sample_weight=w)
def test_fast_predict():
# test that the faster prediction change doesn't
# affect out-of-sample predictions:
# https://github.com/scikit-learn/scikit-learn/pull/6206
rng = np.random.RandomState(123)
n_samples = 10 ** 3
# X values over the -10,10 range
X_train = 20.0 * rng.rand(n_samples) - 10
y_train = np.less(
rng.rand(n_samples),
1.0 / (1.0 + np.exp(-X_train))
).astype('int64')
weights = rng.rand(n_samples)
# we also want to test that everything still works when some weights are 0
weights[rng.rand(n_samples) < 0.1] = 0
slow_model = IsotonicRegression(y_min=0, y_max=1, out_of_bounds="clip")
fast_model = IsotonicRegression(y_min=0, y_max=1, out_of_bounds="clip")
# Build interpolation function with ALL input data, not just the
# non-redundant subset. The following 2 lines are taken from the
# .fit() method, without removing unnecessary points
X_train_fit, y_train_fit = slow_model._build_y(X_train, y_train,
sample_weight=weights,
trim_duplicates=False)
slow_model._build_f(X_train_fit, y_train_fit)
# fit with just the necessary data
fast_model.fit(X_train, y_train, sample_weight=weights)
X_test = 20.0 * rng.rand(n_samples) - 10
y_pred_slow = slow_model.predict(X_test)
y_pred_fast = fast_model.predict(X_test)
assert_array_equal(y_pred_slow, y_pred_fast)
def test_isotonic_copy_before_fit():
# https://github.com/scikit-learn/scikit-learn/issues/6628
ir = IsotonicRegression()
copy.copy(ir)
|
bsd-3-clause
|
JPMoresmau/aifh
|
vol3/vol3-python-examples/examples/example_som_colors.py
|
2
|
1195
|
#!/usr/bin/env python
"""
Artificial Intelligence for Humans
Volume 3: Deep Learning and Neural Networks
Python Version
http://www.aifh.org
http://www.jeffheaton.com
Code repository:
https://github.com/jeffheaton/aifh
Copyright 2015 by Jeff Heaton
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
For more information on Heaton Research copyrights, licenses
and trademarks visit:
http://www.heatonresearch.com/copyright
"""
import time
import numpy as np
import matplotlib.pyplot as plt
plt.axis([0, 1000, 0, 1])
plt.ion()
plt.show()
for i in range(1000):
y = np.random.random()
plt.scatter(i, y)
plt.draw()
time.sleep(0.05)
|
apache-2.0
|
arahuja/scikit-learn
|
sklearn/linear_model/ridge.py
|
5
|
38517
|
"""
Ridge regression
"""
# Author: Mathieu Blondel <[email protected]>
# Reuben Fletcher-Costin <[email protected]>
# Fabian Pedregosa <[email protected]>
# Michael Eickenberg <[email protected]>
# License: BSD 3 clause
from abc import ABCMeta, abstractmethod
import warnings
import numpy as np
from scipy import linalg
from scipy import sparse
from scipy.sparse import linalg as sp_linalg
from .base import LinearClassifierMixin, LinearModel
from ..base import RegressorMixin
from ..utils.extmath import safe_sparse_dot
from ..utils import check_X_y
from ..utils import compute_sample_weight
from ..utils import column_or_1d
from ..preprocessing import LabelBinarizer
from ..grid_search import GridSearchCV
from ..externals import six
from ..metrics.scorer import check_scoring
def _solve_sparse_cg(X, y, alpha, max_iter=None, tol=1e-3, verbose=0):
n_samples, n_features = X.shape
X1 = sp_linalg.aslinearoperator(X)
coefs = np.empty((y.shape[1], n_features))
if n_features > n_samples:
def create_mv(curr_alpha):
def _mv(x):
return X1.matvec(X1.rmatvec(x)) + curr_alpha * x
return _mv
else:
def create_mv(curr_alpha):
def _mv(x):
return X1.rmatvec(X1.matvec(x)) + curr_alpha * x
return _mv
for i in range(y.shape[1]):
y_column = y[:, i]
mv = create_mv(alpha[i])
if n_features > n_samples:
# kernel ridge
# w = X.T * inv(X X^t + alpha*Id) y
C = sp_linalg.LinearOperator(
(n_samples, n_samples), matvec=mv, dtype=X.dtype)
coef, info = sp_linalg.cg(C, y_column, tol=tol)
coefs[i] = X1.rmatvec(coef)
else:
# linear ridge
# w = inv(X^t X + alpha*Id) * X.T y
y_column = X1.rmatvec(y_column)
C = sp_linalg.LinearOperator(
(n_features, n_features), matvec=mv, dtype=X.dtype)
coefs[i], info = sp_linalg.cg(C, y_column, maxiter=max_iter,
tol=tol)
if info < 0:
raise ValueError("Failed with error code %d" % info)
if max_iter is None and info > 0 and verbose:
warnings.warn("sparse_cg did not converge after %d iterations." %
info)
return coefs
def _solve_lsqr(X, y, alpha, max_iter=None, tol=1e-3):
n_samples, n_features = X.shape
coefs = np.empty((y.shape[1], n_features))
# According to the lsqr documentation, alpha = damp^2.
sqrt_alpha = np.sqrt(alpha)
for i in range(y.shape[1]):
y_column = y[:, i]
coefs[i] = sp_linalg.lsqr(X, y_column, damp=sqrt_alpha[i],
atol=tol, btol=tol, iter_lim=max_iter)[0]
return coefs
def _solve_cholesky(X, y, alpha):
# w = inv(X^t X + alpha*Id) * X.T y
n_samples, n_features = X.shape
n_targets = y.shape[1]
A = safe_sparse_dot(X.T, X, dense_output=True)
Xy = safe_sparse_dot(X.T, y, dense_output=True)
one_alpha = np.array_equal(alpha, len(alpha) * [alpha[0]])
if one_alpha:
A.flat[::n_features + 1] += alpha[0]
return linalg.solve(A, Xy, sym_pos=True,
overwrite_a=True).T
else:
coefs = np.empty([n_targets, n_features])
for coef, target, current_alpha in zip(coefs, Xy.T, alpha):
A.flat[::n_features + 1] += current_alpha
coef[:] = linalg.solve(A, target, sym_pos=True,
overwrite_a=False).ravel()
A.flat[::n_features + 1] -= current_alpha
return coefs
def _solve_cholesky_kernel(K, y, alpha, sample_weight=None, copy=False):
# dual_coef = inv(X X^t + alpha*Id) y
n_samples = K.shape[0]
n_targets = y.shape[1]
if copy:
K = K.copy()
alpha = np.atleast_1d(alpha)
one_alpha = (alpha == alpha[0]).all()
has_sw = isinstance(sample_weight, np.ndarray) \
or sample_weight not in [1.0, None]
if has_sw:
# Unlike other solvers, we need to support sample_weight directly
# because K might be a pre-computed kernel.
sw = np.sqrt(np.atleast_1d(sample_weight))
y = y * sw[:, np.newaxis]
K *= np.outer(sw, sw)
if one_alpha:
# Only one penalty, we can solve multi-target problems in one time.
K.flat[::n_samples + 1] += alpha[0]
try:
# Note: we must use overwrite_a=False in order to be able to
# use the fall-back solution below in case a LinAlgError
# is raised
dual_coef = linalg.solve(K, y, sym_pos=True,
overwrite_a=False)
except np.linalg.LinAlgError:
warnings.warn("Singular matrix in solving dual problem. Using "
"least-squares solution instead.")
dual_coef = linalg.lstsq(K, y)[0]
# K is expensive to compute and store in memory so change it back in
# case it was user-given.
K.flat[::n_samples + 1] -= alpha[0]
if has_sw:
dual_coef *= sw[:, np.newaxis]
return dual_coef
else:
# One penalty per target. We need to solve each target separately.
dual_coefs = np.empty([n_targets, n_samples])
for dual_coef, target, current_alpha in zip(dual_coefs, y.T, alpha):
K.flat[::n_samples + 1] += current_alpha
dual_coef[:] = linalg.solve(K, target, sym_pos=True,
overwrite_a=False).ravel()
K.flat[::n_samples + 1] -= current_alpha
if has_sw:
dual_coefs *= sw[np.newaxis, :]
return dual_coefs.T
def _solve_svd(X, y, alpha):
U, s, Vt = linalg.svd(X, full_matrices=False)
idx = s > 1e-15 # same default value as scipy.linalg.pinv
s_nnz = s[idx][:, np.newaxis]
UTy = np.dot(U.T, y)
d = np.zeros((s.size, alpha.size))
d[idx] = s_nnz / (s_nnz ** 2 + alpha)
d_UT_y = d * UTy
return np.dot(Vt.T, d_UT_y).T
def _rescale_data(X, y, sample_weight):
"""Rescale data so as to support sample_weight"""
n_samples = X.shape[0]
sample_weight = sample_weight * np.ones(n_samples)
sample_weight = np.sqrt(sample_weight)
sw_matrix = sparse.dia_matrix((sample_weight, 0),
shape=(n_samples, n_samples))
X = safe_sparse_dot(sw_matrix, X)
y = safe_sparse_dot(sw_matrix, y)
return X, y
def ridge_regression(X, y, alpha, sample_weight=None, solver='auto',
max_iter=None, tol=1e-3, verbose=0):
"""Solve the ridge equation by the method of normal equations.
Parameters
----------
X : {array-like, sparse matrix, LinearOperator},
shape = [n_samples, n_features]
Training data
y : array-like, shape = [n_samples] or [n_samples, n_targets]
Target values
alpha : {float, array-like},
shape = [n_targets] if array-like
The l_2 penalty to be used. If an array is passed, penalties are
assumed to be specific to targets
max_iter : int, optional
Maximum number of iterations for conjugate gradient solver.
The default value is determined by scipy.sparse.linalg.
sample_weight : float or numpy array of shape [n_samples]
Individual weights for each sample. If sample_weight is set, then
the solver will automatically be set to 'cholesky'
solver : {'auto', 'svd', 'cholesky', 'lsqr', 'sparse_cg'}
Solver to use in the computational routines:
- 'auto' chooses the solver automatically based on the type of data.
- 'svd' uses a Singular Value Decomposition of X to compute the Ridge
coefficients. More stable for singular matrices than
'cholesky'.
- 'cholesky' uses the standard scipy.linalg.solve function to
obtain a closed-form solution via a Cholesky decomposition of
dot(X.T, X)
- 'sparse_cg' uses the conjugate gradient solver as found in
scipy.sparse.linalg.cg. As an iterative algorithm, this solver is
more appropriate than 'cholesky' for large-scale data
(possibility to set `tol` and `max_iter`).
- 'lsqr' uses the dedicated regularized least-squares routine
scipy.sparse.linalg.lsqr. It is the fatest but may not be available
in old scipy versions. It also uses an iterative procedure.
All three solvers support both dense and sparse data.
tol : float
Precision of the solution.
verbose : int
Verbosity level. Setting verbose > 0 will display additional information
depending on the solver used.
Returns
-------
coef : array, shape = [n_features] or [n_targets, n_features]
Weight vector(s).
Notes
-----
This function won't compute the intercept.
"""
n_samples, n_features = X.shape
if y.ndim > 2:
raise ValueError("Target y has the wrong shape %s" % str(y.shape))
ravel = False
if y.ndim == 1:
y = y.reshape(-1, 1)
ravel = True
n_samples_, n_targets = y.shape
if n_samples != n_samples_:
raise ValueError("Number of samples in X and y does not correspond:"
" %d != %d" % (n_samples, n_samples_))
has_sw = sample_weight is not None
if solver == 'auto':
# cholesky if it's a dense array and cg in
# any other case
if not sparse.issparse(X) or has_sw:
solver = 'cholesky'
else:
solver = 'sparse_cg'
elif solver == 'lsqr' and not hasattr(sp_linalg, 'lsqr'):
warnings.warn("""lsqr not available on this machine, falling back
to sparse_cg.""")
solver = 'sparse_cg'
if has_sw:
if np.atleast_1d(sample_weight).ndim > 1:
raise ValueError("Sample weights must be 1D array or scalar")
# Sample weight can be implemented via a simple rescaling.
X, y = _rescale_data(X, y, sample_weight)
# There should be either 1 or n_targets penalties
alpha = np.asarray(alpha).ravel()
if alpha.size not in [1, n_targets]:
raise ValueError("Number of targets and number of penalties "
"do not correspond: %d != %d"
% (alpha.size, n_targets))
if alpha.size == 1 and n_targets > 1:
alpha = np.repeat(alpha, n_targets)
if solver not in ('sparse_cg', 'cholesky', 'svd', 'lsqr'):
raise ValueError('Solver %s not understood' % solver)
if solver == 'sparse_cg':
coef = _solve_sparse_cg(X, y, alpha, max_iter, tol, verbose)
elif solver == "lsqr":
coef = _solve_lsqr(X, y, alpha, max_iter, tol)
elif solver == 'cholesky':
if n_features > n_samples:
K = safe_sparse_dot(X, X.T, dense_output=True)
try:
dual_coef = _solve_cholesky_kernel(K, y, alpha)
coef = safe_sparse_dot(X.T, dual_coef, dense_output=True).T
except linalg.LinAlgError:
# use SVD solver if matrix is singular
solver = 'svd'
else:
try:
coef = _solve_cholesky(X, y, alpha)
except linalg.LinAlgError:
# use SVD solver if matrix is singular
solver = 'svd'
if solver == 'svd':
if sparse.issparse(X):
raise TypeError('SVD solver does not support sparse'
' inputs currently')
coef = _solve_svd(X, y, alpha)
if ravel:
# When y was passed as a 1d-array, we flatten the coefficients.
coef = coef.ravel()
return coef
class _BaseRidge(six.with_metaclass(ABCMeta, LinearModel)):
@abstractmethod
def __init__(self, alpha=1.0, fit_intercept=True, normalize=False,
copy_X=True, max_iter=None, tol=1e-3, solver="auto"):
self.alpha = alpha
self.fit_intercept = fit_intercept
self.normalize = normalize
self.copy_X = copy_X
self.max_iter = max_iter
self.tol = tol
self.solver = solver
def fit(self, X, y, sample_weight=None):
X, y = check_X_y(X, y, ['csr', 'csc', 'coo'], dtype=np.float,
multi_output=True, y_numeric=True)
if ((sample_weight is not None) and
np.atleast_1d(sample_weight).ndim > 1):
raise ValueError("Sample weights must be 1D array or scalar")
X, y, X_mean, y_mean, X_std = self._center_data(
X, y, self.fit_intercept, self.normalize, self.copy_X,
sample_weight=sample_weight)
self.coef_ = ridge_regression(X, y,
alpha=self.alpha,
sample_weight=sample_weight,
max_iter=self.max_iter,
tol=self.tol,
solver=self.solver)
self._set_intercept(X_mean, y_mean, X_std)
return self
class Ridge(_BaseRidge, RegressorMixin):
"""Linear least squares with l2 regularization.
This model solves a regression model where the loss function is
the linear least squares function and regularization is given by
the l2-norm. Also known as Ridge Regression or Tikhonov regularization.
This estimator has built-in support for multi-variate regression
(i.e., when y is a 2d-array of shape [n_samples, n_targets]).
Parameters
----------
alpha : {float, array-like}
shape = [n_targets]
Small positive values of alpha improve the conditioning of the problem
and reduce the variance of the estimates. Alpha corresponds to
``(2*C)^-1`` in other linear models such as LogisticRegression or
LinearSVC. If an array is passed, penalties are assumed to be specific
to the targets. Hence they must correspond in number.
copy_X : boolean, optional, default True
If True, X will be copied; else, it may be overwritten.
fit_intercept : boolean
Whether to calculate the intercept for this model. If set
to false, no intercept will be used in calculations
(e.g. data is expected to be already centered).
max_iter : int, optional
Maximum number of iterations for conjugate gradient solver.
The default value is determined by scipy.sparse.linalg.
normalize : boolean, optional, default False
If True, the regressors X will be normalized before regression.
solver : {'auto', 'svd', 'cholesky', 'lsqr', 'sparse_cg'}
Solver to use in the computational routines:
- 'auto' chooses the solver automatically based on the type of data.
- 'svd' uses a Singular Value Decomposition of X to compute the Ridge
coefficients. More stable for singular matrices than
'cholesky'.
- 'cholesky' uses the standard scipy.linalg.solve function to
obtain a closed-form solution.
- 'sparse_cg' uses the conjugate gradient solver as found in
scipy.sparse.linalg.cg. As an iterative algorithm, this solver is
more appropriate than 'cholesky' for large-scale data
(possibility to set `tol` and `max_iter`).
- 'lsqr' uses the dedicated regularized least-squares routine
scipy.sparse.linalg.lsqr. It is the fatest but may not be available
in old scipy versions. It also uses an iterative procedure.
All three solvers support both dense and sparse data.
tol : float
Precision of the solution.
Attributes
----------
coef_ : array, shape = [n_features] or [n_targets, n_features]
Weight vector(s).
See also
--------
RidgeClassifier, RidgeCV, KernelRidge
Examples
--------
>>> from sklearn.linear_model import Ridge
>>> import numpy as np
>>> n_samples, n_features = 10, 5
>>> np.random.seed(0)
>>> y = np.random.randn(n_samples)
>>> X = np.random.randn(n_samples, n_features)
>>> clf = Ridge(alpha=1.0)
>>> clf.fit(X, y) # doctest: +NORMALIZE_WHITESPACE
Ridge(alpha=1.0, copy_X=True, fit_intercept=True, max_iter=None,
normalize=False, solver='auto', tol=0.001)
"""
def __init__(self, alpha=1.0, fit_intercept=True, normalize=False,
copy_X=True, max_iter=None, tol=1e-3, solver="auto"):
super(Ridge, self).__init__(alpha=alpha, fit_intercept=fit_intercept,
normalize=normalize, copy_X=copy_X,
max_iter=max_iter, tol=tol, solver=solver)
def fit(self, X, y, sample_weight=None):
"""Fit Ridge regression model
Parameters
----------
X : {array-like, sparse matrix}, shape = [n_samples, n_features]
Training data
y : array-like, shape = [n_samples] or [n_samples, n_targets]
Target values
sample_weight : float or numpy array of shape [n_samples]
Individual weights for each sample
Returns
-------
self : returns an instance of self.
"""
return super(Ridge, self).fit(X, y, sample_weight=sample_weight)
class RidgeClassifier(LinearClassifierMixin, _BaseRidge):
"""Classifier using Ridge regression.
Parameters
----------
alpha : float
Small positive values of alpha improve the conditioning of the problem
and reduce the variance of the estimates. Alpha corresponds to
``(2*C)^-1`` in other linear models such as LogisticRegression or
LinearSVC.
class_weight : dict, optional
Weights associated with classes in the form
``{class_label : weight}``. If not given, all classes are
supposed to have weight one.
copy_X : boolean, optional, default True
If True, X will be copied; else, it may be overwritten.
fit_intercept : boolean
Whether to calculate the intercept for this model. If set to false, no
intercept will be used in calculations (e.g. data is expected to be
already centered).
max_iter : int, optional
Maximum number of iterations for conjugate gradient solver.
The default value is determined by scipy.sparse.linalg.
normalize : boolean, optional, default False
If True, the regressors X will be normalized before regression.
solver : {'auto', 'svd', 'cholesky', 'lsqr', 'sparse_cg'}
Solver to use in the computational
routines. 'svd' will use a Singular value decomposition to obtain
the solution, 'cholesky' will use the standard
scipy.linalg.solve function, 'sparse_cg' will use the
conjugate gradient solver as found in
scipy.sparse.linalg.cg while 'auto' will chose the most
appropriate depending on the matrix X. 'lsqr' uses
a direct regularized least-squares routine provided by scipy.
tol : float
Precision of the solution.
Attributes
----------
coef_ : array, shape = [n_features] or [n_classes, n_features]
Weight vector(s).
See also
--------
Ridge, RidgeClassifierCV
Notes
-----
For multi-class classification, n_class classifiers are trained in
a one-versus-all approach. Concretely, this is implemented by taking
advantage of the multi-variate response support in Ridge.
"""
def __init__(self, alpha=1.0, fit_intercept=True, normalize=False,
copy_X=True, max_iter=None, tol=1e-3, class_weight=None,
solver="auto"):
super(RidgeClassifier, self).__init__(
alpha=alpha, fit_intercept=fit_intercept, normalize=normalize,
copy_X=copy_X, max_iter=max_iter, tol=tol, solver=solver)
self.class_weight = class_weight
def fit(self, X, y):
"""Fit Ridge regression model.
Parameters
----------
X : {array-like, sparse matrix}, shape = [n_samples,n_features]
Training data
y : array-like, shape = [n_samples]
Target values
Returns
-------
self : returns an instance of self.
"""
self._label_binarizer = LabelBinarizer(pos_label=1, neg_label=-1)
Y = self._label_binarizer.fit_transform(y)
if not self._label_binarizer.y_type_.startswith('multilabel'):
y = column_or_1d(y, warn=True)
if self.class_weight:
# get the class weight corresponding to each sample
sample_weight = compute_sample_weight(self.class_weight, y)
else:
sample_weight = None
super(RidgeClassifier, self).fit(X, Y, sample_weight=sample_weight)
return self
@property
def classes_(self):
return self._label_binarizer.classes_
class _RidgeGCV(LinearModel):
"""Ridge regression with built-in Generalized Cross-Validation
It allows efficient Leave-One-Out cross-validation.
This class is not intended to be used directly. Use RidgeCV instead.
Notes
-----
We want to solve (K + alpha*Id)c = y,
where K = X X^T is the kernel matrix.
Let G = (K + alpha*Id)^-1.
Dual solution: c = Gy
Primal solution: w = X^T c
Compute eigendecomposition K = Q V Q^T.
Then G = Q (V + alpha*Id)^-1 Q^T,
where (V + alpha*Id) is diagonal.
It is thus inexpensive to inverse for many alphas.
Let loov be the vector of prediction values for each example
when the model was fitted with all examples but this example.
loov = (KGY - diag(KG)Y) / diag(I-KG)
Let looe be the vector of prediction errors for each example
when the model was fitted with all examples but this example.
looe = y - loov = c / diag(G)
References
----------
http://cbcl.mit.edu/projects/cbcl/publications/ps/MIT-CSAIL-TR-2007-025.pdf
http://www.mit.edu/~9.520/spring07/Classes/rlsslides.pdf
"""
def __init__(self, alphas=(0.1, 1.0, 10.0),
fit_intercept=True, normalize=False,
scoring=None, copy_X=True,
gcv_mode=None, store_cv_values=False):
self.alphas = np.asarray(alphas)
self.fit_intercept = fit_intercept
self.normalize = normalize
self.scoring = scoring
self.copy_X = copy_X
self.gcv_mode = gcv_mode
self.store_cv_values = store_cv_values
def _pre_compute(self, X, y):
# even if X is very sparse, K is usually very dense
K = safe_sparse_dot(X, X.T, dense_output=True)
v, Q = linalg.eigh(K)
QT_y = np.dot(Q.T, y)
return v, Q, QT_y
def _decomp_diag(self, v_prime, Q):
# compute diagonal of the matrix: dot(Q, dot(diag(v_prime), Q^T))
return (v_prime * Q ** 2).sum(axis=-1)
def _diag_dot(self, D, B):
# compute dot(diag(D), B)
if len(B.shape) > 1:
# handle case where B is > 1-d
D = D[(slice(None), ) + (np.newaxis, ) * (len(B.shape) - 1)]
return D * B
def _errors(self, alpha, y, v, Q, QT_y):
# don't construct matrix G, instead compute action on y & diagonal
w = 1.0 / (v + alpha)
c = np.dot(Q, self._diag_dot(w, QT_y))
G_diag = self._decomp_diag(w, Q)
# handle case where y is 2-d
if len(y.shape) != 1:
G_diag = G_diag[:, np.newaxis]
return (c / G_diag) ** 2, c
def _values(self, alpha, y, v, Q, QT_y):
# don't construct matrix G, instead compute action on y & diagonal
w = 1.0 / (v + alpha)
c = np.dot(Q, self._diag_dot(w, QT_y))
G_diag = self._decomp_diag(w, Q)
# handle case where y is 2-d
if len(y.shape) != 1:
G_diag = G_diag[:, np.newaxis]
return y - (c / G_diag), c
def _pre_compute_svd(self, X, y):
if sparse.issparse(X):
raise TypeError("SVD not supported for sparse matrices")
U, s, _ = linalg.svd(X, full_matrices=0)
v = s ** 2
UT_y = np.dot(U.T, y)
return v, U, UT_y
def _errors_svd(self, alpha, y, v, U, UT_y):
w = ((v + alpha) ** -1) - (alpha ** -1)
c = np.dot(U, self._diag_dot(w, UT_y)) + (alpha ** -1) * y
G_diag = self._decomp_diag(w, U) + (alpha ** -1)
if len(y.shape) != 1:
# handle case where y is 2-d
G_diag = G_diag[:, np.newaxis]
return (c / G_diag) ** 2, c
def _values_svd(self, alpha, y, v, U, UT_y):
w = ((v + alpha) ** -1) - (alpha ** -1)
c = np.dot(U, self._diag_dot(w, UT_y)) + (alpha ** -1) * y
G_diag = self._decomp_diag(w, U) + (alpha ** -1)
if len(y.shape) != 1:
# handle case when y is 2-d
G_diag = G_diag[:, np.newaxis]
return y - (c / G_diag), c
def fit(self, X, y, sample_weight=None):
"""Fit Ridge regression model
Parameters
----------
X : {array-like, sparse matrix}, shape = [n_samples, n_features]
Training data
y : array-like, shape = [n_samples] or [n_samples, n_targets]
Target values
sample_weight : float or array-like of shape [n_samples]
Sample weight
Returns
-------
self : Returns self.
"""
X, y = check_X_y(X, y, ['csr', 'csc', 'coo'], dtype=np.float,
multi_output=True, y_numeric=True)
n_samples, n_features = X.shape
X, y, X_mean, y_mean, X_std = LinearModel._center_data(
X, y, self.fit_intercept, self.normalize, self.copy_X,
sample_weight=sample_weight)
gcv_mode = self.gcv_mode
with_sw = len(np.shape(sample_weight))
if gcv_mode is None or gcv_mode == 'auto':
if sparse.issparse(X) or n_features > n_samples or with_sw:
gcv_mode = 'eigen'
else:
gcv_mode = 'svd'
elif gcv_mode == "svd" and with_sw:
# FIXME non-uniform sample weights not yet supported
warnings.warn("non-uniform sample weights unsupported for svd, "
"forcing usage of eigen")
gcv_mode = 'eigen'
if gcv_mode == 'eigen':
_pre_compute = self._pre_compute
_errors = self._errors
_values = self._values
elif gcv_mode == 'svd':
# assert n_samples >= n_features
_pre_compute = self._pre_compute_svd
_errors = self._errors_svd
_values = self._values_svd
else:
raise ValueError('bad gcv_mode "%s"' % gcv_mode)
v, Q, QT_y = _pre_compute(X, y)
n_y = 1 if len(y.shape) == 1 else y.shape[1]
cv_values = np.zeros((n_samples * n_y, len(self.alphas)))
C = []
scorer = check_scoring(self, scoring=self.scoring, allow_none=True)
error = scorer is None
for i, alpha in enumerate(self.alphas):
weighted_alpha = (sample_weight * alpha
if sample_weight is not None
else alpha)
if error:
out, c = _errors(weighted_alpha, y, v, Q, QT_y)
else:
out, c = _values(weighted_alpha, y, v, Q, QT_y)
cv_values[:, i] = out.ravel()
C.append(c)
if error:
best = cv_values.mean(axis=0).argmin()
else:
# The scorer want an object that will make the predictions but
# they are already computed efficiently by _RidgeGCV. This
# identity_estimator will just return them
def identity_estimator():
pass
identity_estimator.decision_function = lambda y_predict: y_predict
identity_estimator.predict = lambda y_predict: y_predict
out = [scorer(identity_estimator, y.ravel(), cv_values[:, i])
for i in range(len(self.alphas))]
best = np.argmax(out)
self.alpha_ = self.alphas[best]
self.dual_coef_ = C[best]
self.coef_ = safe_sparse_dot(self.dual_coef_.T, X)
self._set_intercept(X_mean, y_mean, X_std)
if self.store_cv_values:
if len(y.shape) == 1:
cv_values_shape = n_samples, len(self.alphas)
else:
cv_values_shape = n_samples, n_y, len(self.alphas)
self.cv_values_ = cv_values.reshape(cv_values_shape)
return self
class _BaseRidgeCV(LinearModel):
def __init__(self, alphas=(0.1, 1.0, 10.0),
fit_intercept=True, normalize=False, scoring=None,
cv=None, gcv_mode=None,
store_cv_values=False):
self.alphas = alphas
self.fit_intercept = fit_intercept
self.normalize = normalize
self.scoring = scoring
self.cv = cv
self.gcv_mode = gcv_mode
self.store_cv_values = store_cv_values
def fit(self, X, y, sample_weight=None):
"""Fit Ridge regression model
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Training data
y : array-like, shape = [n_samples] or [n_samples, n_targets]
Target values
sample_weight : float or array-like of shape [n_samples]
Sample weight
Returns
-------
self : Returns self.
"""
if self.cv is None:
estimator = _RidgeGCV(self.alphas,
fit_intercept=self.fit_intercept,
normalize=self.normalize,
scoring=self.scoring,
gcv_mode=self.gcv_mode,
store_cv_values=self.store_cv_values)
estimator.fit(X, y, sample_weight=sample_weight)
self.alpha_ = estimator.alpha_
if self.store_cv_values:
self.cv_values_ = estimator.cv_values_
else:
if self.store_cv_values:
raise ValueError("cv!=None and store_cv_values=True "
" are incompatible")
parameters = {'alpha': self.alphas}
# FIXME: sample_weight must be split into training/validation data
# too!
#fit_params = {'sample_weight' : sample_weight}
fit_params = {}
gs = GridSearchCV(Ridge(fit_intercept=self.fit_intercept),
parameters, fit_params=fit_params, cv=self.cv)
gs.fit(X, y)
estimator = gs.best_estimator_
self.alpha_ = gs.best_estimator_.alpha
self.coef_ = estimator.coef_
self.intercept_ = estimator.intercept_
return self
class RidgeCV(_BaseRidgeCV, RegressorMixin):
"""Ridge regression with built-in cross-validation.
By default, it performs Generalized Cross-Validation, which is a form of
efficient Leave-One-Out cross-validation.
Parameters
----------
alphas : numpy array of shape [n_alphas]
Array of alpha values to try.
Small positive values of alpha improve the conditioning of the
problem and reduce the variance of the estimates.
Alpha corresponds to ``(2*C)^-1`` in other linear models such as
LogisticRegression or LinearSVC.
fit_intercept : boolean
Whether to calculate the intercept for this model. If set
to false, no intercept will be used in calculations
(e.g. data is expected to be already centered).
normalize : boolean, optional, default False
If True, the regressors X will be normalized before regression.
scoring : string, callable or None, optional, default: None
A string (see model evaluation documentation) or
a scorer callable object / function with signature
``scorer(estimator, X, y)``.
cv : integer or cross-validation generator, optional
If None, Generalized Cross-Validation (efficient Leave-One-Out)
will be used.
If an integer is passed, it is the number of folds for KFold cross
validation. Specific cross-validation objects can be passed, see
sklearn.cross_validation module for the list of possible objects
gcv_mode : {None, 'auto', 'svd', eigen'}, optional
Flag indicating which strategy to use when performing
Generalized Cross-Validation. Options are::
'auto' : use svd if n_samples > n_features or when X is a sparse
matrix, otherwise use eigen
'svd' : force computation via singular value decomposition of X
(does not work for sparse matrices)
'eigen' : force computation via eigendecomposition of X^T X
The 'auto' mode is the default and is intended to pick the cheaper
option of the two depending upon the shape and format of the training
data.
store_cv_values : boolean, default=False
Flag indicating if the cross-validation values corresponding to
each alpha should be stored in the `cv_values_` attribute (see
below). This flag is only compatible with `cv=None` (i.e. using
Generalized Cross-Validation).
Attributes
----------
cv_values_ : array, shape = [n_samples, n_alphas] or \
shape = [n_samples, n_targets, n_alphas], optional
Cross-validation values for each alpha (if `store_cv_values=True` and \
`cv=None`). After `fit()` has been called, this attribute will \
contain the mean squared errors (by default) or the values of the \
`{loss,score}_func` function (if provided in the constructor).
coef_ : array, shape = [n_features] or [n_targets, n_features]
Weight vector(s).
alpha_ : float
Estimated regularization parameter.
intercept_ : float | array, shape = (n_targets,)
Independent term in decision function. Set to 0.0 if
``fit_intercept = False``.
See also
--------
Ridge: Ridge regression
RidgeClassifier: Ridge classifier
RidgeClassifierCV: Ridge classifier with built-in cross validation
"""
pass
class RidgeClassifierCV(LinearClassifierMixin, _BaseRidgeCV):
"""Ridge classifier with built-in cross-validation.
By default, it performs Generalized Cross-Validation, which is a form of
efficient Leave-One-Out cross-validation. Currently, only the n_features >
n_samples case is handled efficiently.
Parameters
----------
alphas : numpy array of shape [n_alphas]
Array of alpha values to try.
Small positive values of alpha improve the conditioning of the
problem and reduce the variance of the estimates.
Alpha corresponds to ``(2*C)^-1`` in other linear models such as
LogisticRegression or LinearSVC.
fit_intercept : boolean
Whether to calculate the intercept for this model. If set
to false, no intercept will be used in calculations
(e.g. data is expected to be already centered).
normalize : boolean, optional, default False
If True, the regressors X will be normalized before regression.
scoring : string, callable or None, optional, default: None
A string (see model evaluation documentation) or
a scorer callable object / function with signature
``scorer(estimator, X, y)``.
cv : cross-validation generator, optional
If None, Generalized Cross-Validation (efficient Leave-One-Out)
will be used.
class_weight : dict, optional
Weights associated with classes in the form
``{class_label : weight}``. If not given, all classes are
supposed to have weight one.
Attributes
----------
cv_values_ : array, shape = [n_samples, n_alphas] or \
shape = [n_samples, n_responses, n_alphas], optional
Cross-validation values for each alpha (if `store_cv_values=True` and
`cv=None`). After `fit()` has been called, this attribute will contain \
the mean squared errors (by default) or the values of the \
`{loss,score}_func` function (if provided in the constructor).
coef_ : array, shape = [n_features] or [n_targets, n_features]
Weight vector(s).
alpha_ : float
Estimated regularization parameter
See also
--------
Ridge: Ridge regression
RidgeClassifier: Ridge classifier
RidgeCV: Ridge regression with built-in cross validation
Notes
-----
For multi-class classification, n_class classifiers are trained in
a one-versus-all approach. Concretely, this is implemented by taking
advantage of the multi-variate response support in Ridge.
"""
def __init__(self, alphas=(0.1, 1.0, 10.0), fit_intercept=True,
normalize=False, scoring=None, cv=None, class_weight=None):
super(RidgeClassifierCV, self).__init__(
alphas=alphas, fit_intercept=fit_intercept, normalize=normalize,
scoring=scoring, cv=cv)
self.class_weight = class_weight
def fit(self, X, y, sample_weight=None):
"""Fit the ridge classifier.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Training vectors, where n_samples is the number of samples
and n_features is the number of features.
y : array-like, shape (n_samples,)
Target values.
sample_weight : float or numpy array of shape (n_samples,)
Sample weight.
Returns
-------
self : object
Returns self.
"""
if sample_weight is None:
sample_weight = 1.
self._label_binarizer = LabelBinarizer(pos_label=1, neg_label=-1)
Y = self._label_binarizer.fit_transform(y)
if not self._label_binarizer.y_type_.startswith('multilabel'):
y = column_or_1d(y, warn=True)
# modify the sample weights with the corresponding class weight
sample_weight = (sample_weight *
compute_sample_weight(self.class_weight, y))
_BaseRidgeCV.fit(self, X, Y, sample_weight=sample_weight)
return self
@property
def classes_(self):
return self._label_binarizer.classes_
|
bsd-3-clause
|
DSLituiev/scikit-learn
|
sklearn/linear_model/tests/test_sgd.py
|
14
|
44270
|
import pickle
import unittest
import numpy as np
import scipy.sparse as sp
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_greater
from sklearn.utils.testing import assert_less
from sklearn.utils.testing import raises
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_false, assert_true
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_raises_regexp
from sklearn.utils.testing import ignore_warnings
from sklearn import linear_model, datasets, metrics
from sklearn.base import clone
from sklearn.linear_model import SGDClassifier, SGDRegressor
from sklearn.preprocessing import LabelEncoder, scale, MinMaxScaler
class SparseSGDClassifier(SGDClassifier):
def fit(self, X, y, *args, **kw):
X = sp.csr_matrix(X)
return super(SparseSGDClassifier, self).fit(X, y, *args, **kw)
def partial_fit(self, X, y, *args, **kw):
X = sp.csr_matrix(X)
return super(SparseSGDClassifier, self).partial_fit(X, y, *args, **kw)
def decision_function(self, X):
X = sp.csr_matrix(X)
return super(SparseSGDClassifier, self).decision_function(X)
def predict_proba(self, X):
X = sp.csr_matrix(X)
return super(SparseSGDClassifier, self).predict_proba(X)
class SparseSGDRegressor(SGDRegressor):
def fit(self, X, y, *args, **kw):
X = sp.csr_matrix(X)
return SGDRegressor.fit(self, X, y, *args, **kw)
def partial_fit(self, X, y, *args, **kw):
X = sp.csr_matrix(X)
return SGDRegressor.partial_fit(self, X, y, *args, **kw)
def decision_function(self, X, *args, **kw):
X = sp.csr_matrix(X)
return SGDRegressor.decision_function(self, X, *args, **kw)
# Test Data
# test sample 1
X = np.array([[-2, -1], [-1, -1], [-1, -2], [1, 1], [1, 2], [2, 1]])
Y = [1, 1, 1, 2, 2, 2]
T = np.array([[-1, -1], [2, 2], [3, 2]])
true_result = [1, 2, 2]
# test sample 2; string class labels
X2 = np.array([[-1, 1], [-0.75, 0.5], [-1.5, 1.5],
[1, 1], [0.75, 0.5], [1.5, 1.5],
[-1, -1], [0, -0.5], [1, -1]])
Y2 = ["one"] * 3 + ["two"] * 3 + ["three"] * 3
T2 = np.array([[-1.5, 0.5], [1, 2], [0, -2]])
true_result2 = ["one", "two", "three"]
# test sample 3
X3 = np.array([[1, 1, 0, 0, 0, 0], [1, 1, 0, 0, 0, 0],
[0, 0, 1, 0, 0, 0], [0, 0, 1, 0, 0, 0],
[0, 0, 0, 0, 1, 1], [0, 0, 0, 0, 1, 1],
[0, 0, 0, 1, 0, 0], [0, 0, 0, 1, 0, 0]])
Y3 = np.array([1, 1, 1, 1, 2, 2, 2, 2])
# test sample 4 - two more or less redundant feature groups
X4 = np.array([[1, 0.9, 0.8, 0, 0, 0], [1, .84, .98, 0, 0, 0],
[1, .96, .88, 0, 0, 0], [1, .91, .99, 0, 0, 0],
[0, 0, 0, .89, .91, 1], [0, 0, 0, .79, .84, 1],
[0, 0, 0, .91, .95, 1], [0, 0, 0, .93, 1, 1]])
Y4 = np.array([1, 1, 1, 1, 2, 2, 2, 2])
iris = datasets.load_iris()
# test sample 5 - test sample 1 as binary classification problem
X5 = np.array([[-2, -1], [-1, -1], [-1, -2], [1, 1], [1, 2], [2, 1]])
Y5 = [1, 1, 1, 2, 2, 2]
true_result5 = [0, 1, 1]
# Classification Test Case
class CommonTest(object):
def factory(self, **kwargs):
if "random_state" not in kwargs:
kwargs["random_state"] = 42
return self.factory_class(**kwargs)
# a simple implementation of ASGD to use for testing
# uses squared loss to find the gradient
def asgd(self, X, y, eta, alpha, weight_init=None, intercept_init=0.0):
if weight_init is None:
weights = np.zeros(X.shape[1])
else:
weights = weight_init
average_weights = np.zeros(X.shape[1])
intercept = intercept_init
average_intercept = 0.0
decay = 1.0
# sparse data has a fixed decay of .01
if (isinstance(self, SparseSGDClassifierTestCase) or
isinstance(self, SparseSGDRegressorTestCase)):
decay = .01
for i, entry in enumerate(X):
p = np.dot(entry, weights)
p += intercept
gradient = p - y[i]
weights *= 1.0 - (eta * alpha)
weights += -(eta * gradient * entry)
intercept += -(eta * gradient) * decay
average_weights *= i
average_weights += weights
average_weights /= i + 1.0
average_intercept *= i
average_intercept += intercept
average_intercept /= i + 1.0
return average_weights, average_intercept
def _test_warm_start(self, X, Y, lr):
# Test that explicit warm restart...
clf = self.factory(alpha=0.01, eta0=0.01, n_iter=5, shuffle=False,
learning_rate=lr)
clf.fit(X, Y)
clf2 = self.factory(alpha=0.001, eta0=0.01, n_iter=5, shuffle=False,
learning_rate=lr)
clf2.fit(X, Y,
coef_init=clf.coef_.copy(),
intercept_init=clf.intercept_.copy())
# ... and implicit warm restart are equivalent.
clf3 = self.factory(alpha=0.01, eta0=0.01, n_iter=5, shuffle=False,
warm_start=True, learning_rate=lr)
clf3.fit(X, Y)
assert_equal(clf3.t_, clf.t_)
assert_array_almost_equal(clf3.coef_, clf.coef_)
clf3.set_params(alpha=0.001)
clf3.fit(X, Y)
assert_equal(clf3.t_, clf2.t_)
assert_array_almost_equal(clf3.coef_, clf2.coef_)
def test_warm_start_constant(self):
self._test_warm_start(X, Y, "constant")
def test_warm_start_invscaling(self):
self._test_warm_start(X, Y, "invscaling")
def test_warm_start_optimal(self):
self._test_warm_start(X, Y, "optimal")
def test_input_format(self):
# Input format tests.
clf = self.factory(alpha=0.01, n_iter=5,
shuffle=False)
clf.fit(X, Y)
Y_ = np.array(Y)[:, np.newaxis]
Y_ = np.c_[Y_, Y_]
assert_raises(ValueError, clf.fit, X, Y_)
def test_clone(self):
# Test whether clone works ok.
clf = self.factory(alpha=0.01, n_iter=5, penalty='l1')
clf = clone(clf)
clf.set_params(penalty='l2')
clf.fit(X, Y)
clf2 = self.factory(alpha=0.01, n_iter=5, penalty='l2')
clf2.fit(X, Y)
assert_array_equal(clf.coef_, clf2.coef_)
def test_plain_has_no_average_attr(self):
clf = self.factory(average=True, eta0=.01)
clf.fit(X, Y)
assert_true(hasattr(clf, 'average_coef_'))
assert_true(hasattr(clf, 'average_intercept_'))
assert_true(hasattr(clf, 'standard_intercept_'))
assert_true(hasattr(clf, 'standard_coef_'))
clf = self.factory()
clf.fit(X, Y)
assert_false(hasattr(clf, 'average_coef_'))
assert_false(hasattr(clf, 'average_intercept_'))
assert_false(hasattr(clf, 'standard_intercept_'))
assert_false(hasattr(clf, 'standard_coef_'))
def test_late_onset_averaging_not_reached(self):
clf1 = self.factory(average=600)
clf2 = self.factory()
for _ in range(100):
if isinstance(clf1, SGDClassifier):
clf1.partial_fit(X, Y, classes=np.unique(Y))
clf2.partial_fit(X, Y, classes=np.unique(Y))
else:
clf1.partial_fit(X, Y)
clf2.partial_fit(X, Y)
assert_array_almost_equal(clf1.coef_, clf2.coef_, decimal=16)
assert_almost_equal(clf1.intercept_, clf2.intercept_, decimal=16)
def test_late_onset_averaging_reached(self):
eta0 = .001
alpha = .0001
Y_encode = np.array(Y)
Y_encode[Y_encode == 1] = -1.0
Y_encode[Y_encode == 2] = 1.0
clf1 = self.factory(average=7, learning_rate="constant",
loss='squared_loss', eta0=eta0,
alpha=alpha, n_iter=2, shuffle=False)
clf2 = self.factory(average=0, learning_rate="constant",
loss='squared_loss', eta0=eta0,
alpha=alpha, n_iter=1, shuffle=False)
clf1.fit(X, Y_encode)
clf2.fit(X, Y_encode)
average_weights, average_intercept = \
self.asgd(X, Y_encode, eta0, alpha,
weight_init=clf2.coef_.ravel(),
intercept_init=clf2.intercept_)
assert_array_almost_equal(clf1.coef_.ravel(),
average_weights.ravel(),
decimal=16)
assert_almost_equal(clf1.intercept_, average_intercept, decimal=16)
@raises(ValueError)
def test_sgd_bad_alpha_for_optimal_learning_rate(self):
# Check whether expected ValueError on bad alpha, i.e. 0
# since alpha is used to compute the optimal learning rate
self.factory(alpha=0, learning_rate="optimal")
class DenseSGDClassifierTestCase(unittest.TestCase, CommonTest):
"""Test suite for the dense representation variant of SGD"""
factory_class = SGDClassifier
def test_sgd(self):
# Check that SGD gives any results :-)
for loss in ("hinge", "squared_hinge", "log", "modified_huber"):
clf = self.factory(penalty='l2', alpha=0.01, fit_intercept=True,
loss=loss, n_iter=10, shuffle=True)
clf.fit(X, Y)
# assert_almost_equal(clf.coef_[0], clf.coef_[1], decimal=7)
assert_array_equal(clf.predict(T), true_result)
@raises(ValueError)
def test_sgd_bad_l1_ratio(self):
# Check whether expected ValueError on bad l1_ratio
self.factory(l1_ratio=1.1)
@raises(ValueError)
def test_sgd_bad_learning_rate_schedule(self):
# Check whether expected ValueError on bad learning_rate
self.factory(learning_rate="<unknown>")
@raises(ValueError)
def test_sgd_bad_eta0(self):
# Check whether expected ValueError on bad eta0
self.factory(eta0=0, learning_rate="constant")
@raises(ValueError)
def test_sgd_bad_alpha(self):
# Check whether expected ValueError on bad alpha
self.factory(alpha=-.1)
@raises(ValueError)
def test_sgd_bad_penalty(self):
# Check whether expected ValueError on bad penalty
self.factory(penalty='foobar', l1_ratio=0.85)
@raises(ValueError)
def test_sgd_bad_loss(self):
# Check whether expected ValueError on bad loss
self.factory(loss="foobar")
@raises(ValueError)
def test_sgd_n_iter_param(self):
# Test parameter validity check
self.factory(n_iter=-10000)
@raises(ValueError)
def test_sgd_shuffle_param(self):
# Test parameter validity check
self.factory(shuffle="false")
@raises(TypeError)
def test_argument_coef(self):
# Checks coef_init not allowed as model argument (only fit)
# Provided coef_ does not match dataset.
self.factory(coef_init=np.zeros((3,))).fit(X, Y)
@raises(ValueError)
def test_provide_coef(self):
# Checks coef_init shape for the warm starts
# Provided coef_ does not match dataset.
self.factory().fit(X, Y, coef_init=np.zeros((3,)))
@raises(ValueError)
def test_set_intercept(self):
# Checks intercept_ shape for the warm starts
# Provided intercept_ does not match dataset.
self.factory().fit(X, Y, intercept_init=np.zeros((3,)))
def test_set_intercept_binary(self):
# Checks intercept_ shape for the warm starts in binary case
self.factory().fit(X5, Y5, intercept_init=0)
def test_average_binary_computed_correctly(self):
# Checks the SGDClassifier correctly computes the average weights
eta = .1
alpha = 2.
n_samples = 20
n_features = 10
rng = np.random.RandomState(0)
X = rng.normal(size=(n_samples, n_features))
w = rng.normal(size=n_features)
clf = self.factory(loss='squared_loss',
learning_rate='constant',
eta0=eta, alpha=alpha,
fit_intercept=True,
n_iter=1, average=True, shuffle=False)
# simple linear function without noise
y = np.dot(X, w)
y = np.sign(y)
clf.fit(X, y)
average_weights, average_intercept = self.asgd(X, y, eta, alpha)
average_weights = average_weights.reshape(1, -1)
assert_array_almost_equal(clf.coef_,
average_weights,
decimal=14)
assert_almost_equal(clf.intercept_, average_intercept, decimal=14)
def test_set_intercept_to_intercept(self):
# Checks intercept_ shape consistency for the warm starts
# Inconsistent intercept_ shape.
clf = self.factory().fit(X5, Y5)
self.factory().fit(X5, Y5, intercept_init=clf.intercept_)
clf = self.factory().fit(X, Y)
self.factory().fit(X, Y, intercept_init=clf.intercept_)
@raises(ValueError)
def test_sgd_at_least_two_labels(self):
# Target must have at least two labels
self.factory(alpha=0.01, n_iter=20).fit(X2, np.ones(9))
def test_partial_fit_weight_class_balanced(self):
# partial_fit with class_weight='balanced' not supported"""
assert_raises_regexp(ValueError,
"class_weight 'balanced' is not supported for "
"partial_fit. In order to use 'balanced' weights, "
"use compute_class_weight\('balanced', classes, y\). "
"In place of y you can us a large enough sample "
"of the full training set target to properly "
"estimate the class frequency distributions. "
"Pass the resulting weights as the class_weight "
"parameter.",
self.factory(class_weight='balanced').partial_fit,
X, Y, classes=np.unique(Y))
def test_sgd_multiclass(self):
# Multi-class test case
clf = self.factory(alpha=0.01, n_iter=20).fit(X2, Y2)
assert_equal(clf.coef_.shape, (3, 2))
assert_equal(clf.intercept_.shape, (3,))
assert_equal(clf.decision_function([[0, 0]]).shape, (1, 3))
pred = clf.predict(T2)
assert_array_equal(pred, true_result2)
def test_sgd_multiclass_average(self):
eta = .001
alpha = .01
# Multi-class average test case
clf = self.factory(loss='squared_loss',
learning_rate='constant',
eta0=eta, alpha=alpha,
fit_intercept=True,
n_iter=1, average=True, shuffle=False)
np_Y2 = np.array(Y2)
clf.fit(X2, np_Y2)
classes = np.unique(np_Y2)
for i, cl in enumerate(classes):
y_i = np.ones(np_Y2.shape[0])
y_i[np_Y2 != cl] = -1
average_coef, average_intercept = self.asgd(X2, y_i, eta, alpha)
assert_array_almost_equal(average_coef, clf.coef_[i], decimal=16)
assert_almost_equal(average_intercept,
clf.intercept_[i],
decimal=16)
def test_sgd_multiclass_with_init_coef(self):
# Multi-class test case
clf = self.factory(alpha=0.01, n_iter=20)
clf.fit(X2, Y2, coef_init=np.zeros((3, 2)),
intercept_init=np.zeros(3))
assert_equal(clf.coef_.shape, (3, 2))
assert_true(clf.intercept_.shape, (3,))
pred = clf.predict(T2)
assert_array_equal(pred, true_result2)
def test_sgd_multiclass_njobs(self):
# Multi-class test case with multi-core support
clf = self.factory(alpha=0.01, n_iter=20, n_jobs=2).fit(X2, Y2)
assert_equal(clf.coef_.shape, (3, 2))
assert_equal(clf.intercept_.shape, (3,))
assert_equal(clf.decision_function([[0, 0]]).shape, (1, 3))
pred = clf.predict(T2)
assert_array_equal(pred, true_result2)
def test_set_coef_multiclass(self):
# Checks coef_init and intercept_init shape for multi-class
# problems
# Provided coef_ does not match dataset
clf = self.factory()
assert_raises(ValueError, clf.fit, X2, Y2, coef_init=np.zeros((2, 2)))
# Provided coef_ does match dataset
clf = self.factory().fit(X2, Y2, coef_init=np.zeros((3, 2)))
# Provided intercept_ does not match dataset
clf = self.factory()
assert_raises(ValueError, clf.fit, X2, Y2,
intercept_init=np.zeros((1,)))
# Provided intercept_ does match dataset.
clf = self.factory().fit(X2, Y2, intercept_init=np.zeros((3,)))
def test_sgd_proba(self):
# Check SGD.predict_proba
# Hinge loss does not allow for conditional prob estimate.
# We cannot use the factory here, because it defines predict_proba
# anyway.
clf = SGDClassifier(loss="hinge", alpha=0.01, n_iter=10).fit(X, Y)
assert_false(hasattr(clf, "predict_proba"))
assert_false(hasattr(clf, "predict_log_proba"))
# log and modified_huber losses can output probability estimates
# binary case
for loss in ["log", "modified_huber"]:
clf = self.factory(loss="modified_huber", alpha=0.01, n_iter=10)
clf.fit(X, Y)
p = clf.predict_proba([[3, 2]])
assert_true(p[0, 1] > 0.5)
p = clf.predict_proba([[-1, -1]])
assert_true(p[0, 1] < 0.5)
p = clf.predict_log_proba([[3, 2]])
assert_true(p[0, 1] > p[0, 0])
p = clf.predict_log_proba([[-1, -1]])
assert_true(p[0, 1] < p[0, 0])
# log loss multiclass probability estimates
clf = self.factory(loss="log", alpha=0.01, n_iter=10).fit(X2, Y2)
d = clf.decision_function([[.1, -.1], [.3, .2]])
p = clf.predict_proba([[.1, -.1], [.3, .2]])
assert_array_equal(np.argmax(p, axis=1), np.argmax(d, axis=1))
assert_almost_equal(p[0].sum(), 1)
assert_true(np.all(p[0] >= 0))
p = clf.predict_proba([[-1, -1]])
d = clf.decision_function([[-1, -1]])
assert_array_equal(np.argsort(p[0]), np.argsort(d[0]))
l = clf.predict_log_proba([[3, 2]])
p = clf.predict_proba([[3, 2]])
assert_array_almost_equal(np.log(p), l)
l = clf.predict_log_proba([[-1, -1]])
p = clf.predict_proba([[-1, -1]])
assert_array_almost_equal(np.log(p), l)
# Modified Huber multiclass probability estimates; requires a separate
# test because the hard zero/one probabilities may destroy the
# ordering present in decision_function output.
clf = self.factory(loss="modified_huber", alpha=0.01, n_iter=10)
clf.fit(X2, Y2)
d = clf.decision_function([[3, 2]])
p = clf.predict_proba([[3, 2]])
if not isinstance(self, SparseSGDClassifierTestCase):
assert_equal(np.argmax(d, axis=1), np.argmax(p, axis=1))
else: # XXX the sparse test gets a different X2 (?)
assert_equal(np.argmin(d, axis=1), np.argmin(p, axis=1))
# the following sample produces decision_function values < -1,
# which would cause naive normalization to fail (see comment
# in SGDClassifier.predict_proba)
x = X.mean(axis=0)
d = clf.decision_function([x])
if np.all(d < -1): # XXX not true in sparse test case (why?)
p = clf.predict_proba([x])
assert_array_almost_equal(p[0], [1 / 3.] * 3)
def test_sgd_l1(self):
# Test L1 regularization
n = len(X4)
rng = np.random.RandomState(13)
idx = np.arange(n)
rng.shuffle(idx)
X = X4[idx, :]
Y = Y4[idx]
clf = self.factory(penalty='l1', alpha=.2, fit_intercept=False,
n_iter=2000, shuffle=False)
clf.fit(X, Y)
assert_array_equal(clf.coef_[0, 1:-1], np.zeros((4,)))
pred = clf.predict(X)
assert_array_equal(pred, Y)
# test sparsify with dense inputs
clf.sparsify()
assert_true(sp.issparse(clf.coef_))
pred = clf.predict(X)
assert_array_equal(pred, Y)
# pickle and unpickle with sparse coef_
clf = pickle.loads(pickle.dumps(clf))
assert_true(sp.issparse(clf.coef_))
pred = clf.predict(X)
assert_array_equal(pred, Y)
def test_class_weights(self):
# Test class weights.
X = np.array([[-1.0, -1.0], [-1.0, 0], [-.8, -1.0],
[1.0, 1.0], [1.0, 0.0]])
y = [1, 1, 1, -1, -1]
clf = self.factory(alpha=0.1, n_iter=1000, fit_intercept=False,
class_weight=None)
clf.fit(X, y)
assert_array_equal(clf.predict([[0.2, -1.0]]), np.array([1]))
# we give a small weights to class 1
clf = self.factory(alpha=0.1, n_iter=1000, fit_intercept=False,
class_weight={1: 0.001})
clf.fit(X, y)
# now the hyperplane should rotate clock-wise and
# the prediction on this point should shift
assert_array_equal(clf.predict([[0.2, -1.0]]), np.array([-1]))
def test_equal_class_weight(self):
# Test if equal class weights approx. equals no class weights.
X = [[1, 0], [1, 0], [0, 1], [0, 1]]
y = [0, 0, 1, 1]
clf = self.factory(alpha=0.1, n_iter=1000, class_weight=None)
clf.fit(X, y)
X = [[1, 0], [0, 1]]
y = [0, 1]
clf_weighted = self.factory(alpha=0.1, n_iter=1000,
class_weight={0: 0.5, 1: 0.5})
clf_weighted.fit(X, y)
# should be similar up to some epsilon due to learning rate schedule
assert_almost_equal(clf.coef_, clf_weighted.coef_, decimal=2)
@raises(ValueError)
def test_wrong_class_weight_label(self):
# ValueError due to not existing class label.
clf = self.factory(alpha=0.1, n_iter=1000, class_weight={0: 0.5})
clf.fit(X, Y)
@raises(ValueError)
def test_wrong_class_weight_format(self):
# ValueError due to wrong class_weight argument type.
clf = self.factory(alpha=0.1, n_iter=1000, class_weight=[0.5])
clf.fit(X, Y)
def test_weights_multiplied(self):
# Tests that class_weight and sample_weight are multiplicative
class_weights = {1: .6, 2: .3}
sample_weights = np.random.random(Y4.shape[0])
multiplied_together = np.copy(sample_weights)
multiplied_together[Y4 == 1] *= class_weights[1]
multiplied_together[Y4 == 2] *= class_weights[2]
clf1 = self.factory(alpha=0.1, n_iter=20, class_weight=class_weights)
clf2 = self.factory(alpha=0.1, n_iter=20)
clf1.fit(X4, Y4, sample_weight=sample_weights)
clf2.fit(X4, Y4, sample_weight=multiplied_together)
assert_almost_equal(clf1.coef_, clf2.coef_)
def test_balanced_weight(self):
# Test class weights for imbalanced data"""
# compute reference metrics on iris dataset that is quite balanced by
# default
X, y = iris.data, iris.target
X = scale(X)
idx = np.arange(X.shape[0])
rng = np.random.RandomState(6)
rng.shuffle(idx)
X = X[idx]
y = y[idx]
clf = self.factory(alpha=0.0001, n_iter=1000,
class_weight=None, shuffle=False).fit(X, y)
assert_almost_equal(metrics.f1_score(y, clf.predict(X), average='weighted'), 0.96,
decimal=1)
# make the same prediction using balanced class_weight
clf_balanced = self.factory(alpha=0.0001, n_iter=1000,
class_weight="balanced",
shuffle=False).fit(X, y)
assert_almost_equal(metrics.f1_score(y, clf_balanced.predict(X), average='weighted'), 0.96,
decimal=1)
# Make sure that in the balanced case it does not change anything
# to use "balanced"
assert_array_almost_equal(clf.coef_, clf_balanced.coef_, 6)
# build an very very imbalanced dataset out of iris data
X_0 = X[y == 0, :]
y_0 = y[y == 0]
X_imbalanced = np.vstack([X] + [X_0] * 10)
y_imbalanced = np.concatenate([y] + [y_0] * 10)
# fit a model on the imbalanced data without class weight info
clf = self.factory(n_iter=1000, class_weight=None, shuffle=False)
clf.fit(X_imbalanced, y_imbalanced)
y_pred = clf.predict(X)
assert_less(metrics.f1_score(y, y_pred, average='weighted'), 0.96)
# fit a model with balanced class_weight enabled
clf = self.factory(n_iter=1000, class_weight="balanced", shuffle=False)
clf.fit(X_imbalanced, y_imbalanced)
y_pred = clf.predict(X)
assert_greater(metrics.f1_score(y, y_pred, average='weighted'), 0.96)
# fit another using a fit parameter override
clf = self.factory(n_iter=1000, class_weight="balanced", shuffle=False)
clf.fit(X_imbalanced, y_imbalanced)
y_pred = clf.predict(X)
assert_greater(metrics.f1_score(y, y_pred, average='weighted'), 0.96)
def test_sample_weights(self):
# Test weights on individual samples
X = np.array([[-1.0, -1.0], [-1.0, 0], [-.8, -1.0],
[1.0, 1.0], [1.0, 0.0]])
y = [1, 1, 1, -1, -1]
clf = self.factory(alpha=0.1, n_iter=1000, fit_intercept=False)
clf.fit(X, y)
assert_array_equal(clf.predict([[0.2, -1.0]]), np.array([1]))
# we give a small weights to class 1
clf.fit(X, y, sample_weight=[0.001] * 3 + [1] * 2)
# now the hyperplane should rotate clock-wise and
# the prediction on this point should shift
assert_array_equal(clf.predict([[0.2, -1.0]]), np.array([-1]))
@raises(ValueError)
def test_wrong_sample_weights(self):
# Test if ValueError is raised if sample_weight has wrong shape
clf = self.factory(alpha=0.1, n_iter=1000, fit_intercept=False)
# provided sample_weight too long
clf.fit(X, Y, sample_weight=np.arange(7))
@raises(ValueError)
def test_partial_fit_exception(self):
clf = self.factory(alpha=0.01)
# classes was not specified
clf.partial_fit(X3, Y3)
def test_partial_fit_binary(self):
third = X.shape[0] // 3
clf = self.factory(alpha=0.01)
classes = np.unique(Y)
clf.partial_fit(X[:third], Y[:third], classes=classes)
assert_equal(clf.coef_.shape, (1, X.shape[1]))
assert_equal(clf.intercept_.shape, (1,))
assert_equal(clf.decision_function([[0, 0]]).shape, (1, ))
id1 = id(clf.coef_.data)
clf.partial_fit(X[third:], Y[third:])
id2 = id(clf.coef_.data)
# check that coef_ haven't been re-allocated
assert_true(id1, id2)
y_pred = clf.predict(T)
assert_array_equal(y_pred, true_result)
def test_partial_fit_multiclass(self):
third = X2.shape[0] // 3
clf = self.factory(alpha=0.01)
classes = np.unique(Y2)
clf.partial_fit(X2[:third], Y2[:third], classes=classes)
assert_equal(clf.coef_.shape, (3, X2.shape[1]))
assert_equal(clf.intercept_.shape, (3,))
assert_equal(clf.decision_function([[0, 0]]).shape, (1, 3))
id1 = id(clf.coef_.data)
clf.partial_fit(X2[third:], Y2[third:])
id2 = id(clf.coef_.data)
# check that coef_ haven't been re-allocated
assert_true(id1, id2)
def test_partial_fit_multiclass_average(self):
third = X2.shape[0] // 3
clf = self.factory(alpha=0.01, average=X2.shape[0])
classes = np.unique(Y2)
clf.partial_fit(X2[:third], Y2[:third], classes=classes)
assert_equal(clf.coef_.shape, (3, X2.shape[1]))
assert_equal(clf.intercept_.shape, (3,))
clf.partial_fit(X2[third:], Y2[third:])
assert_equal(clf.coef_.shape, (3, X2.shape[1]))
assert_equal(clf.intercept_.shape, (3,))
def test_fit_then_partial_fit(self):
# Partial_fit should work after initial fit in the multiclass case.
# Non-regression test for #2496; fit would previously produce a
# Fortran-ordered coef_ that subsequent partial_fit couldn't handle.
clf = self.factory()
clf.fit(X2, Y2)
clf.partial_fit(X2, Y2) # no exception here
def _test_partial_fit_equal_fit(self, lr):
for X_, Y_, T_ in ((X, Y, T), (X2, Y2, T2)):
clf = self.factory(alpha=0.01, eta0=0.01, n_iter=2,
learning_rate=lr, shuffle=False)
clf.fit(X_, Y_)
y_pred = clf.decision_function(T_)
t = clf.t_
classes = np.unique(Y_)
clf = self.factory(alpha=0.01, eta0=0.01, learning_rate=lr,
shuffle=False)
for i in range(2):
clf.partial_fit(X_, Y_, classes=classes)
y_pred2 = clf.decision_function(T_)
assert_equal(clf.t_, t)
assert_array_almost_equal(y_pred, y_pred2, decimal=2)
def test_partial_fit_equal_fit_constant(self):
self._test_partial_fit_equal_fit("constant")
def test_partial_fit_equal_fit_optimal(self):
self._test_partial_fit_equal_fit("optimal")
def test_partial_fit_equal_fit_invscaling(self):
self._test_partial_fit_equal_fit("invscaling")
def test_regression_losses(self):
clf = self.factory(alpha=0.01, learning_rate="constant",
eta0=0.1, loss="epsilon_insensitive")
clf.fit(X, Y)
assert_equal(1.0, np.mean(clf.predict(X) == Y))
clf = self.factory(alpha=0.01, learning_rate="constant",
eta0=0.1, loss="squared_epsilon_insensitive")
clf.fit(X, Y)
assert_equal(1.0, np.mean(clf.predict(X) == Y))
clf = self.factory(alpha=0.01, loss="huber")
clf.fit(X, Y)
assert_equal(1.0, np.mean(clf.predict(X) == Y))
clf = self.factory(alpha=0.01, learning_rate="constant", eta0=0.01,
loss="squared_loss")
clf.fit(X, Y)
assert_equal(1.0, np.mean(clf.predict(X) == Y))
def test_warm_start_multiclass(self):
self._test_warm_start(X2, Y2, "optimal")
def test_multiple_fit(self):
# Test multiple calls of fit w/ different shaped inputs.
clf = self.factory(alpha=0.01, n_iter=5,
shuffle=False)
clf.fit(X, Y)
assert_true(hasattr(clf, "coef_"))
# Non-regression test: try fitting with a different label set.
y = [["ham", "spam"][i] for i in LabelEncoder().fit_transform(Y)]
clf.fit(X[:, :-1], y)
class SparseSGDClassifierTestCase(DenseSGDClassifierTestCase):
"""Run exactly the same tests using the sparse representation variant"""
factory_class = SparseSGDClassifier
###############################################################################
# Regression Test Case
class DenseSGDRegressorTestCase(unittest.TestCase, CommonTest):
"""Test suite for the dense representation variant of SGD"""
factory_class = SGDRegressor
def test_sgd(self):
# Check that SGD gives any results.
clf = self.factory(alpha=0.1, n_iter=2,
fit_intercept=False)
clf.fit([[0, 0], [1, 1], [2, 2]], [0, 1, 2])
assert_equal(clf.coef_[0], clf.coef_[1])
@raises(ValueError)
def test_sgd_bad_penalty(self):
# Check whether expected ValueError on bad penalty
self.factory(penalty='foobar', l1_ratio=0.85)
@raises(ValueError)
def test_sgd_bad_loss(self):
# Check whether expected ValueError on bad loss
self.factory(loss="foobar")
def test_sgd_averaged_computed_correctly(self):
# Tests the average regressor matches the naive implementation
eta = .001
alpha = .01
n_samples = 20
n_features = 10
rng = np.random.RandomState(0)
X = rng.normal(size=(n_samples, n_features))
w = rng.normal(size=n_features)
# simple linear function without noise
y = np.dot(X, w)
clf = self.factory(loss='squared_loss',
learning_rate='constant',
eta0=eta, alpha=alpha,
fit_intercept=True,
n_iter=1, average=True, shuffle=False)
clf.fit(X, y)
average_weights, average_intercept = self.asgd(X, y, eta, alpha)
assert_array_almost_equal(clf.coef_,
average_weights,
decimal=16)
assert_almost_equal(clf.intercept_, average_intercept, decimal=16)
def test_sgd_averaged_partial_fit(self):
# Tests whether the partial fit yields the same average as the fit
eta = .001
alpha = .01
n_samples = 20
n_features = 10
rng = np.random.RandomState(0)
X = rng.normal(size=(n_samples, n_features))
w = rng.normal(size=n_features)
# simple linear function without noise
y = np.dot(X, w)
clf = self.factory(loss='squared_loss',
learning_rate='constant',
eta0=eta, alpha=alpha,
fit_intercept=True,
n_iter=1, average=True, shuffle=False)
clf.partial_fit(X[:int(n_samples / 2)][:], y[:int(n_samples / 2)])
clf.partial_fit(X[int(n_samples / 2):][:], y[int(n_samples / 2):])
average_weights, average_intercept = self.asgd(X, y, eta, alpha)
assert_array_almost_equal(clf.coef_,
average_weights,
decimal=16)
assert_almost_equal(clf.intercept_[0], average_intercept, decimal=16)
def test_average_sparse(self):
# Checks the average weights on data with 0s
eta = .001
alpha = .01
clf = self.factory(loss='squared_loss',
learning_rate='constant',
eta0=eta, alpha=alpha,
fit_intercept=True,
n_iter=1, average=True, shuffle=False)
n_samples = Y3.shape[0]
clf.partial_fit(X3[:int(n_samples / 2)][:], Y3[:int(n_samples / 2)])
clf.partial_fit(X3[int(n_samples / 2):][:], Y3[int(n_samples / 2):])
average_weights, average_intercept = self.asgd(X3, Y3, eta, alpha)
assert_array_almost_equal(clf.coef_,
average_weights,
decimal=16)
assert_almost_equal(clf.intercept_, average_intercept, decimal=16)
def test_sgd_least_squares_fit(self):
xmin, xmax = -5, 5
n_samples = 100
rng = np.random.RandomState(0)
X = np.linspace(xmin, xmax, n_samples).reshape(n_samples, 1)
# simple linear function without noise
y = 0.5 * X.ravel()
clf = self.factory(loss='squared_loss', alpha=0.1, n_iter=20,
fit_intercept=False)
clf.fit(X, y)
score = clf.score(X, y)
assert_greater(score, 0.99)
# simple linear function with noise
y = 0.5 * X.ravel() + rng.randn(n_samples, 1).ravel()
clf = self.factory(loss='squared_loss', alpha=0.1, n_iter=20,
fit_intercept=False)
clf.fit(X, y)
score = clf.score(X, y)
assert_greater(score, 0.5)
def test_sgd_epsilon_insensitive(self):
xmin, xmax = -5, 5
n_samples = 100
X = np.linspace(xmin, xmax, n_samples).reshape(n_samples, 1)
# simple linear function without noise
y = 0.5 * X.ravel()
clf = self.factory(loss='epsilon_insensitive', epsilon=0.01,
alpha=0.1, n_iter=20,
fit_intercept=False)
clf.fit(X, y)
score = clf.score(X, y)
assert_true(score > 0.99)
# simple linear function with noise
y = 0.5 * X.ravel() \
+ np.random.randn(n_samples, 1).ravel()
clf = self.factory(loss='epsilon_insensitive', epsilon=0.01,
alpha=0.1, n_iter=20,
fit_intercept=False)
clf.fit(X, y)
score = clf.score(X, y)
assert_true(score > 0.5)
def test_sgd_huber_fit(self):
xmin, xmax = -5, 5
n_samples = 100
rng = np.random.RandomState(0)
X = np.linspace(xmin, xmax, n_samples).reshape(n_samples, 1)
# simple linear function without noise
y = 0.5 * X.ravel()
clf = self.factory(loss="huber", epsilon=0.1, alpha=0.1, n_iter=20,
fit_intercept=False)
clf.fit(X, y)
score = clf.score(X, y)
assert_greater(score, 0.99)
# simple linear function with noise
y = 0.5 * X.ravel() + rng.randn(n_samples, 1).ravel()
clf = self.factory(loss="huber", epsilon=0.1, alpha=0.1, n_iter=20,
fit_intercept=False)
clf.fit(X, y)
score = clf.score(X, y)
assert_greater(score, 0.5)
def test_elasticnet_convergence(self):
# Check that the SGD output is consistent with coordinate descent
n_samples, n_features = 1000, 5
rng = np.random.RandomState(0)
X = np.random.randn(n_samples, n_features)
# ground_truth linear model that generate y from X and to which the
# models should converge if the regularizer would be set to 0.0
ground_truth_coef = rng.randn(n_features)
y = np.dot(X, ground_truth_coef)
# XXX: alpha = 0.1 seems to cause convergence problems
for alpha in [0.01, 0.001]:
for l1_ratio in [0.5, 0.8, 1.0]:
cd = linear_model.ElasticNet(alpha=alpha, l1_ratio=l1_ratio,
fit_intercept=False)
cd.fit(X, y)
sgd = self.factory(penalty='elasticnet', n_iter=50,
alpha=alpha, l1_ratio=l1_ratio,
fit_intercept=False)
sgd.fit(X, y)
err_msg = ("cd and sgd did not converge to comparable "
"results for alpha=%f and l1_ratio=%f"
% (alpha, l1_ratio))
assert_almost_equal(cd.coef_, sgd.coef_, decimal=2,
err_msg=err_msg)
@ignore_warnings
def test_partial_fit(self):
third = X.shape[0] // 3
clf = self.factory(alpha=0.01)
clf.partial_fit(X[:third], Y[:third])
assert_equal(clf.coef_.shape, (X.shape[1], ))
assert_equal(clf.intercept_.shape, (1,))
assert_equal(clf.predict([[0, 0]]).shape, (1, ))
id1 = id(clf.coef_.data)
clf.partial_fit(X[third:], Y[third:])
id2 = id(clf.coef_.data)
# check that coef_ haven't been re-allocated
assert_true(id1, id2)
def _test_partial_fit_equal_fit(self, lr):
clf = self.factory(alpha=0.01, n_iter=2, eta0=0.01,
learning_rate=lr, shuffle=False)
clf.fit(X, Y)
y_pred = clf.predict(T)
t = clf.t_
clf = self.factory(alpha=0.01, eta0=0.01,
learning_rate=lr, shuffle=False)
for i in range(2):
clf.partial_fit(X, Y)
y_pred2 = clf.predict(T)
assert_equal(clf.t_, t)
assert_array_almost_equal(y_pred, y_pred2, decimal=2)
def test_partial_fit_equal_fit_constant(self):
self._test_partial_fit_equal_fit("constant")
def test_partial_fit_equal_fit_optimal(self):
self._test_partial_fit_equal_fit("optimal")
def test_partial_fit_equal_fit_invscaling(self):
self._test_partial_fit_equal_fit("invscaling")
def test_loss_function_epsilon(self):
clf = self.factory(epsilon=0.9)
clf.set_params(epsilon=0.1)
assert clf.loss_functions['huber'][1] == 0.1
class SparseSGDRegressorTestCase(DenseSGDRegressorTestCase):
# Run exactly the same tests using the sparse representation variant
factory_class = SparseSGDRegressor
def test_l1_ratio():
# Test if l1 ratio extremes match L1 and L2 penalty settings.
X, y = datasets.make_classification(n_samples=1000,
n_features=100, n_informative=20,
random_state=1234)
# test if elasticnet with l1_ratio near 1 gives same result as pure l1
est_en = SGDClassifier(alpha=0.001, penalty='elasticnet',
l1_ratio=0.9999999999, random_state=42).fit(X, y)
est_l1 = SGDClassifier(alpha=0.001, penalty='l1', random_state=42).fit(X, y)
assert_array_almost_equal(est_en.coef_, est_l1.coef_)
# test if elasticnet with l1_ratio near 0 gives same result as pure l2
est_en = SGDClassifier(alpha=0.001, penalty='elasticnet',
l1_ratio=0.0000000001, random_state=42).fit(X, y)
est_l2 = SGDClassifier(alpha=0.001, penalty='l2', random_state=42).fit(X, y)
assert_array_almost_equal(est_en.coef_, est_l2.coef_)
def test_underflow_or_overlow():
with np.errstate(all='raise'):
# Generate some weird data with hugely unscaled features
rng = np.random.RandomState(0)
n_samples = 100
n_features = 10
X = rng.normal(size=(n_samples, n_features))
X[:, :2] *= 1e300
assert_true(np.isfinite(X).all())
# Use MinMaxScaler to scale the data without introducing a numerical
# instability (computing the standard deviation naively is not possible
# on this data)
X_scaled = MinMaxScaler().fit_transform(X)
assert_true(np.isfinite(X_scaled).all())
# Define a ground truth on the scaled data
ground_truth = rng.normal(size=n_features)
y = (np.dot(X_scaled, ground_truth) > 0.).astype(np.int32)
assert_array_equal(np.unique(y), [0, 1])
model = SGDClassifier(alpha=0.1, loss='squared_hinge', n_iter=500)
# smoke test: model is stable on scaled data
model.fit(X_scaled, y)
assert_true(np.isfinite(model.coef_).all())
# model is numerically unstable on unscaled data
msg_regxp = (r"Floating-point under-/overflow occurred at epoch #.*"
" Scaling input data with StandardScaler or MinMaxScaler"
" might help.")
assert_raises_regexp(ValueError, msg_regxp, model.fit, X, y)
def test_numerical_stability_large_gradient():
# Non regression test case for numerical stability on scaled problems
# where the gradient can still explode with some losses
model = SGDClassifier(loss='squared_hinge', n_iter=10, shuffle=True,
penalty='elasticnet', l1_ratio=0.3, alpha=0.01,
eta0=0.001, random_state=0)
with np.errstate(all='raise'):
model.fit(iris.data, iris.target)
assert_true(np.isfinite(model.coef_).all())
def test_large_regularization():
# Non regression tests for numerical stability issues caused by large
# regularization parameters
for penalty in ['l2', 'l1', 'elasticnet']:
model = SGDClassifier(alpha=1e5, learning_rate='constant', eta0=0.1,
n_iter=5, penalty=penalty, shuffle=False)
with np.errstate(all='raise'):
model.fit(iris.data, iris.target)
assert_array_almost_equal(model.coef_, np.zeros_like(model.coef_))
|
bsd-3-clause
|
rhyolight/nupic.research
|
projects/sdr_paper/poirazi_neuron_model/run_HTM_classification_experiment.py
|
7
|
7065
|
# ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2017, Numenta, Inc. Unless you have an agreement
# with Numenta, Inc., for a separate license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU Affero Public License for more details.
#
# You should have received a copy of the GNU Affero Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
import random
import numpy
from htmresearch.frameworks.poirazi_neuron_model.neuron_model import (
power_nonlinearity, threshold_nonlinearity)
from htmresearch.frameworks.poirazi_neuron_model.neuron_model import Matrix_Neuron as Neuron
from htmresearch.frameworks.poirazi_neuron_model.data_tools import (
generate_data, generate_evenly_distributed_data_sparse, split_sparse_matrix)
from sklearn.cluster import KMeans
from multiprocessing import Pool, cpu_count
from nupic.bindings.math import *
from collections import Counter
def run_initialization_experiment(seed,
num_neurons = 50,
dim = 40,
num_bins = 10,
num_samples = 50*600,
neuron_size = 10000,
num_dendrites = 400,
dendrite_length = 25,
power = 10,
):
"""
Runs an experiment testing classifying a binary dataset, based on Poirazi &
Mel's original experiment. Learning is using our modified variant of their
rule, and positive and negative neurons compete to classify a datapoint.
Performance has historically been poor, noticeably worse than what is
achieved with only a single neuron using an HTM-style learning rule on
datasets of similar size. It is suspected that the simplifications made
to the P&M learning rule are having a negative effect.
Furthermore, P&M report that they are willing to train for an exceptional
amount of time, up to 96,000 iterations per neuron. We have never even
begun to approach this long a training time, so it is possible that our
performance would converge with theirs given more time.
This experiment does not correspond to a figure in the paper, but we report
our results across an average of 50 trials, using the settings above.
"""
numpy.random.seed(seed)
nonlinearity = power_nonlinearity(power)
pos_neurons = [Neuron(size = neuron_size, num_dendrites = num_dendrites, dendrite_length = dendrite_length, nonlinearity = nonlinearity, dim = dim*num_bins) for i in range(num_neurons/2)]
neg_neurons = [Neuron(size = neuron_size, num_dendrites = num_dendrites, dendrite_length = dendrite_length, nonlinearity = nonlinearity, dim = dim*num_bins) for i in range(num_neurons/2)]
#pos, neg = generate_evenly_distributed_data_sparse(dim = 400, num_active = 40, num_samples = num_samples/2), generate_evenly_distributed_data_sparse(dim = 400, num_active = 40, num_samples = num_samples/2)
pos, neg = generate_data(dim = dim, num_bins = num_bins, num_samples = num_samples, sparse = True)
if (pos.nRows() > num_dendrites*len(pos_neurons)):
print "Too much data to have unique dendrites for positive neurons, clustering"
pos = pos.toDense()
model = KMeans(n_clusters = len(pos_neurons), n_jobs=1)
clusters = model.fit_predict(pos)
neuron_data = [SM32() for i in range(len(pos_neurons))]
for datapoint, cluster in zip(pos, clusters):
neuron_data[cluster].append(SM32([datapoint]))
for i, neuron in enumerate(pos_neurons):
neuron.HTM_style_initialize_on_data(neuron_data[i], [1 for i in range(neuron_data[i].nRows())])
pos = SM32(pos)
else:
print "Directly initializing positive neurons with unique dendrites"
neuron_data = split_sparse_matrix(pos, len(pos_neurons))
for neuron, data in zip(pos_neurons, neuron_data):
neuron.HTM_style_initialize_on_data(data, [1 for i in range(data.nRows())])
if (neg.nRows() > num_dendrites*len(neg_neurons)):
print "Too much data to have unique dendrites for negative neurons, clustering"
neg = neg.toDense()
model = KMeans(n_clusters = len(neg_neurons), n_jobs=1)
clusters = model.fit_predict(neg)
neuron_data = [SM32() for i in range(len(neg_neurons))]
for datapoint, cluster in zip(neg, clusters):
neuron_data[cluster].append(SM32([datapoint]))
for i, neuron in enumerate(neg_neurons):
neuron.HTM_style_initialize_on_data(neuron_data[i], [1 for i in range(neuron_data[i].nRows())])
neg = SM32(neg)
else:
print "Directly initializing negative neurons with unique dendrites"
neuron_data = split_sparse_matrix(neg, len(neg_neurons))
for neuron, data in zip(neg_neurons, neuron_data):
neuron.HTM_style_initialize_on_data(data, [1 for i in range(data.nRows())])
print "Calculating error"
labels = [1 for i in range(pos.nRows())] + [-1 for i in range(neg.nRows())]
data = pos
data.append(neg)
error, fp, fn = get_error(data, labels, pos_neurons, neg_neurons)
print "Error at initialization is {}, with {} false positives and {} false negatives".format(error, fp, fn)
return error
def get_error(data, labels, pos_neurons, neg_neurons = [], add_noise = True):
"""
Calculates error, including number of false positives and false negatives.
Written to allow the use of multiple neurons, in case we attempt to use a
population in the future.
"""
num_correct = 0
num_false_positives = 0
num_false_negatives = 0
classifications = numpy.zeros(data.nRows())
for neuron in pos_neurons:
classifications += neuron.calculate_on_entire_dataset(data)
for neuron in neg_neurons:
classifications -= neuron.calculate_on_entire_dataset(data)
if add_noise:
classifications += (numpy.random.rand() - 0.5)/1000
classifications = numpy.sign(classifications)
for classification, label in zip(classifications, labels):
if classification > 0 and label > 0:
num_correct += 1.0
elif classification <= 0 and label <= 0:
num_correct += 1.0
elif classification > 0 and label <= 0:
num_false_positives += 1
else:
num_false_negatives += 1
return (1.*num_false_positives + num_false_negatives)/data.nRows(), num_false_positives, num_false_negatives
if __name__ == "__main__":
p = Pool(cpu_count())
errors = p.map(run_initialization_experiment, [100+i for i in range(50)])
print numpy.mean(errors)
|
gpl-3.0
|
ilyes14/scikit-learn
|
examples/tree/plot_tree_regression.py
|
206
|
1476
|
"""
===================================================================
Decision Tree Regression
===================================================================
A 1D regression with decision tree.
The :ref:`decision trees <tree>` is
used to fit a sine curve with addition noisy observation. As a result, it
learns local linear regressions approximating the sine curve.
We can see that if the maximum depth of the tree (controlled by the
`max_depth` parameter) is set too high, the decision trees learn too fine
details of the training data and learn from the noise, i.e. they overfit.
"""
print(__doc__)
# Import the necessary modules and libraries
import numpy as np
from sklearn.tree import DecisionTreeRegressor
import matplotlib.pyplot as plt
# Create a random dataset
rng = np.random.RandomState(1)
X = np.sort(5 * rng.rand(80, 1), axis=0)
y = np.sin(X).ravel()
y[::5] += 3 * (0.5 - rng.rand(16))
# Fit regression model
regr_1 = DecisionTreeRegressor(max_depth=2)
regr_2 = DecisionTreeRegressor(max_depth=5)
regr_1.fit(X, y)
regr_2.fit(X, y)
# Predict
X_test = np.arange(0.0, 5.0, 0.01)[:, np.newaxis]
y_1 = regr_1.predict(X_test)
y_2 = regr_2.predict(X_test)
# Plot the results
plt.figure()
plt.scatter(X, y, c="k", label="data")
plt.plot(X_test, y_1, c="g", label="max_depth=2", linewidth=2)
plt.plot(X_test, y_2, c="r", label="max_depth=5", linewidth=2)
plt.xlabel("data")
plt.ylabel("target")
plt.title("Decision Tree Regression")
plt.legend()
plt.show()
|
bsd-3-clause
|
studywolf/pydmps
|
examples/draw_number_2_temporal_scaling.py
|
1
|
1727
|
"""
Copyright (C) 2016 Travis DeWolf
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
import numpy as np
import matplotlib.pyplot as plt
import pydmps
import pydmps.dmp_discrete
y_des = np.load("2.npz")["arr_0"].T
y_des -= y_des[:, 0][:, None]
# test normal run
dmp = pydmps.dmp_discrete.DMPs_discrete(n_dmps=2, n_bfs=500, ay=np.ones(2) * 10.0)
y_track = []
dy_track = []
ddy_track = []
dmp.imitate_path(y_des=y_des)
y_track_normal, _, _ = dmp.rollout(tau=1)
y_track_slow, _, _ = dmp.rollout(tau=0.1)
y_track_fast, _, _ = dmp.rollout(tau=4)
plt.figure(1, figsize=(6, 6))
plt.plot(y_track_normal[:, 0], y_track_normal[:, 1], "b", lw=2)
plt.plot(y_track_slow[:, 0], y_track_slow[:, 1], "--r", lw=2)
plt.plot(y_track_fast[:, 0], y_track_fast[:, 1], "--y", lw=2)
plt.legend(['Normal', 'Slow', 'Fast'])
plt.title("DMP system - draw number 2")
plt.axis("equal")
plt.xlim([-2, 2])
plt.ylim([-2, 2])
plt.figure(2)
plt.subplot(3, 1, 1)
plt.title("DMP system - draw number 2")
plt.plot(y_track_normal)
plt.ylabel('Normal')
plt.subplot(3, 1, 2)
plt.plot(y_track_slow)
plt.ylabel('Slow')
plt.subplot(3, 1, 3)
plt.plot(y_track_fast)
plt.ylabel('Fast')
plt.show()
|
gpl-3.0
|
IFDYS/FDTD3D_MPI_SRC
|
view_result.py
|
1
|
3626
|
#!/usr/bin/python
from numpy import *
from matplotlib.pyplot import *
import matplotlib.pylab as pylab
import os
import time
import re
def read_slice(fname):
with open(fname) as fslice:
slice_nx,slice_ny,slice_nz = fslice.readline().split()
slice_x = fslice.readline().split()
slice_y = fslice.readline().split()
slice_z = fslice.readline().split()
slice_nx = int(slice_nx);slice_ny = int(slice_ny);slice_nz = int(slice_nz)
return slice_nx,slice_ny,slice_nz
def read_rec(frec):
global nrec
with open(frec) as fp:
nrec = int(file.readline().strip('\n'))
def read_par():
global nx,ny,nz,slice_nx,slice_ny,slice_nz,nt
with open('par.in') as fpar:
fpar.readline()
dx,dy,dz,dt = fpar.readline().split()
print 'dx dy dz dt: ',dx,dy,dz,dt
fpar.readline()
nx,ny,nz,nt = fpar.readline().split()
nx = int(nx);ny = int(ny);nz = int(nz);nt=int(nt)
print 'nx ny nz nt: ',nx,ny,nz,nt
fpar.readline()
nt_src = fpar.readline()
print 'nt of src: ',nt_src
fpar.readline()
step_t_wavefield,step_x_wavefield = fpar.readline().split()
print 'output time step and space step of wavefidld: ',step_t_wavefield,step_x_wavefield
fpar.readline()
step_slice = fpar.readline()
print 'output step of slice: ',step_slice
fpar.readline()
npml_x,npml_y,npml_z= fpar.readline().split()
print 'npml x y z: ',npml_x,npml_y,npml_z
fpar.readline()
fpar.readline() #pml m kapxmax kapymax kapzmax alpha
fpar.readline()
fsrc= fpar.readline().strip('\n')
print 'src.in: ',fsrc
fpar.readline()
frec= fpar.readline().strip('\n')
print 'rec.in: ',frec
fpar.readline()
feps = fpar.readline().strip('\n')
fpar.readline()
fmu = fpar.readline().strip('\n')
fpar.readline()
fsig= fpar.readline().strip('\n')
fpar.readline()
fslice= fpar.readline().strip('\n')
slice_nx,slice_ny,slice_nz = read_slice(fslice)
def view_slice():
xlist = os.popen('ls *xSlice*dat').readlines()
ylist = os.popen('ls *ySlice*dat').readlines()
zlist = os.popen('ls *zSlice*dat').readlines()
i = 0
for xname in xlist:
print xname
yname = ylist[i]
zname = zlist[i]
i += 1
xdata = loadtxt(xname.strip('\n'))
ydata = loadtxt(yname.strip('\n'))
zdata = loadtxt(zname.strip('\n'))
xslice = reshape(xdata,(slice_nx,ny,nz))
yslice = reshape(ydata,(slice_ny,nx,nz))
zslice = reshape(zdata,(slice_nz,nx,ny))
# data = reshape(data,(126,101))
clf()
imshow(xslice[0])
colorbar()
savefig(re.findall("^\w+",xname)[0]+".jpg")
clf()
imshow(yslice[0])
colorbar()
savefig(re.findall("^\w+",yname)[0]+".jpg")
clf()
imshow(zslice[0])
colorbar()
savefig(re.findall("^\w+",zname)[0]+".jpg")
# show()
# show(block=False)
# time.sleep(0.5)
# close()
def view_gather():
global nrec,nt
ilist = os.popen('ls gather*dat').readlines()
i = 0
for name in ilist:
gather = loadtxt(name.strip('\n'))
if gather.max() == 0 and gather.min() == 0:
continue
# for i in range(len(gather)):
plot(gather[:]/max(abs(gather[:]))+i)
i += 1
savefig('gather.png')
read_par()
view_gather()
os.chdir("./Output/")
view_slice()
#view_wavefield()
|
gpl-2.0
|
Microsoft/hummingbird
|
tests/test_sklearn_normalizer_converter.py
|
1
|
2045
|
"""
Tests sklearn Normalizer converter
"""
import unittest
import warnings
import numpy as np
import torch
from sklearn.preprocessing import Normalizer
import hummingbird.ml
class TestSklearnNormalizer(unittest.TestCase):
def test_normalizer_converter(self):
# Generate a random 2D array with values in [0, 1000)
np.random.seed(0)
data = np.random.rand(100, 200) * 1000
data = np.array(data, dtype=np.float32)
data_tensor = torch.from_numpy(data)
for norm in ["l1", "l2", "max"]:
model = Normalizer(norm=norm)
model.fit(data)
torch_model = hummingbird.ml.convert(model, "torch")
self.assertIsNotNone(torch_model)
np.testing.assert_allclose(
model.transform(data), torch_model.transform(data_tensor), rtol=1e-06, atol=1e-06,
)
def test_normalizer_converter_raises_wrong_type(self):
# Generate a random 2D array with values in [0, 1000)
np.random.seed(0)
data = np.random.rand(100, 200) * 1000
data = np.array(data, dtype=np.float32)
model = Normalizer(norm="invalid")
model.fit(data)
torch_model = hummingbird.ml.convert(model, "torch")
self.assertRaises(RuntimeError, torch_model.model._operator_map.SklearnNormalizer, torch.from_numpy(data))
# Float 64 data tests
def test_float64_normalizer_converter(self):
# Generate a random 2D array with values in [0, 1000)
np.random.seed(0)
data = np.random.rand(100, 200) * 1000
data_tensor = torch.from_numpy(data)
for norm in ["l1", "l2", "max"]:
model = Normalizer(norm=norm)
model.fit(data)
torch_model = hummingbird.ml.convert(model, "torch")
self.assertIsNotNone(torch_model)
np.testing.assert_allclose(
model.transform(data), torch_model.transform(data_tensor), rtol=1e-06, atol=1e-06,
)
if __name__ == "__main__":
unittest.main()
|
mit
|
kbase/narrative
|
src/biokbase/narrative/viewers.py
|
2
|
5271
|
import clustergrammer_widget
from clustergrammer_widget.clustergrammer import Network
import pandas as pd
import biokbase.narrative.clients as clients
from biokbase.narrative.app_util import system_variable
def view_as_clustergrammer(
ws_ref, col_categories=(), row_categories=(), normalize_on=None
):
"""
This function returns an interactive clustergrammer widget for a specified object. Data type
must contain a 'data' key with a FloatMatrix2D type value
:param ws_ref: Object workspace reference
:param col_categories: iterable with the permitted factors from the col_attributemapping.
Defaults to all factors, pass None to exclude.
:param row_categories: iterable with the permitted categories from the row_attributemapping.
Defaults to all factors, pass None to exclude.
:param normalize_on: If provided, the matrix will be converted to z-scores normalized on the
'row' or 'column' axis
:return:
"""
assert isinstance(col_categories, (tuple, set, list))
assert isinstance(row_categories, (tuple, set, list))
assert normalize_on in {None, "row", "column"}
generic_df = get_df(ws_ref, col_categories, row_categories, True)
net = Network(clustergrammer_widget)
net.df_to_dat({"mat": generic_df})
if normalize_on:
net.normalize(axis=normalize_on)
net.cluster(enrichrgram=False)
return net.widget()
def get_df(ws_ref, col_attributes=(), row_attributes=(), clustergrammer=False):
"""
Gets a dataframe from the WS object
:param ws_ref: The Workspace reference of the 2DMatrix containing object
:param col_attributes: Which column attributes should appear in the resulting DataFrame as a
multiIndex. Defaults to all attributes, pass None to use a simple index of only ID.
:param row_attributes: Which row attributes should appear in the resulting DataFrame as a
multiIndex. Defaults to all attributes, pass None to use a simple index of only ID.
:param clustergrammer: Returns a DataFrame with Clustergrammer compatible indices and columns.
Defaults to False.
:return: A Pandas DataFrame
"""
ws = clients.get("workspace")
if "/" not in ws_ref:
ws_ref = "{}/{}".format(system_variable("workspace"), ws_ref)
generic_data = ws.get_objects2({"objects": [{"ref": ws_ref}]})["data"][0]["data"]
if not _is_compatible_matrix(generic_data):
raise ValueError(
"{} is not a compatible data type for this viewer. Data type must "
"contain a 'data' key with a FloatMatrix2D type value".format(ws_ref)
)
cols = _get_categories(
generic_data["data"]["col_ids"],
ws_ref,
generic_data.get("col_attributemapping_ref"),
generic_data.get("col_mapping"),
col_attributes,
clustergrammer,
)
rows = _get_categories(
generic_data["data"]["row_ids"],
ws_ref,
generic_data.get("row_attributemapping_ref"),
generic_data.get("row_mapping"),
row_attributes,
clustergrammer,
)
return pd.DataFrame(data=generic_data["data"]["values"], columns=cols, index=rows)
def _is_compatible_matrix(obj):
try:
assert "data" in obj
assert "col_ids" in obj["data"]
assert "row_ids" in obj["data"]
assert "values" in obj["data"]
except AssertionError:
return False
return True
def _get_categories(
ids,
matrix_ref,
attributemapping_ref=None,
mapping=None,
whitelist=(),
clustergrammer=False,
):
"""Creates the correct kind of multi-factor index for clustergrammer display"""
if not attributemapping_ref or whitelist is None:
return ids
cat_list = []
ws = clients.get("workspace")
attribute_data = ws.get_objects2(
{"objects": [{"ref": matrix_ref + ";" + attributemapping_ref}]}
)["data"][0]["data"]
if not mapping:
mapping = {x: x for x in ids}
whitelist = set(whitelist)
for _id in ids:
try:
attribute_values = attribute_data["instances"][mapping[_id]]
except KeyError:
if _id not in mapping:
raise ValueError(
"Row or column id {} is not in the provided mapping".format(_id)
)
raise ValueError(
"AttributeMapping {} has no attribute {} which corresponds to row or "
"column id {} in the provided object.".format(
attributemapping_ref, mapping[_id], _id
)
)
cats = [_id]
for i, val in enumerate(attribute_values):
cat_name = attribute_data["attributes"][i]["attribute"]
if whitelist and cat_name not in whitelist:
continue
if clustergrammer:
cats.append("{}: {}".format(cat_name, val))
else:
cats.append(val)
cat_list.append(tuple(cats))
if clustergrammer:
return cat_list
attribute_names = [
x["attribute"]
for x in attribute_data["attributes"]
if not whitelist or x["attribute"] in whitelist
]
return pd.MultiIndex.from_tuples(cat_list, names=["ID"] + attribute_names)
|
mit
|
chryss/pygaarst-scripts
|
cw-research/ginaviz.py
|
1
|
9446
|
#!/usr/local/env python
# 2015-05-01 -- [email protected] (Chris Waigl)
# Fire detection: GINA download and sync script
from __future__ import print_function, unicode_literals, division
import os
import sys
import argparse
import datetime as dt
import glob
import seaborn as sns
from pygaarst import raster
from pygaarst import basemaputils as bu
from pygaarst.rasterhelpers import PygaarstRasterError
from matplotlib import pyplot as plt
import fiona
from shapely.geometry import Polygon
from shapely.geos import TopologicalError
from descartes import PolygonPatch
from pprint import pprint
sys.path.append(os.path.join(
os.path.expanduser('~'),
"Dropbox/Research/satelliteremotesensing/firedetection"))
import viirstools as vt
NASPATH = "/Volumes/cwdata1/VIIRS/GINA/dds.gina.alaska.edu/NPP/viirs"
VIZDIR = "visual"
TESTDIR = "testviz"
SCENELIST = "GINA_list.txt"
FILEPAT = 'SVM12_npp_*.h5'
VF = 'BorealAKForUAFSmoke.json'
VECTOROVERLAY = None # ugly
if VF:
VECTOROVERLAY = os.path.join(
os.path.split(
os.path.realpath(__file__))[0], VF)
MINFRAC = 0.01 # minimum fractional area for keeping a sub-scene
def printOpenFiles(openfiles):
print("### %d OPEN FILES:" % (len(openfiles), ))
pprint([f.x for f in openfiles])
def parse_arguments():
"""Parse arguments"""
parser = argparse.ArgumentParser(
description='Produce visualizations of extent of scenes')
parser.add_argument(
'-o', dest='overwrt',
help='overwrite existing images',
action='store_true')
parser.add_argument(
'--testdir',
help='use a single directory to test')
parser.add_argument(
'--ov', dest='overlayvector',
help="overlay vector file",
default=VECTOROVERLAY)
parser.add_argument(
'--dir', dest='archivedir',
help="directory containing file archive",
default=NASPATH)
parser.add_argument(
'--num', action='store_true',
help="print numbers on plot")
parser.add_argument(
'--debug', action='store_true',
help="debug mode")
return parser.parse_args()
def read_items(filepath):
"""Turns a plain text file file with one item/line into an iterator"""
with open(filepath, "rU") as src:
for line in src:
yield line.strip()
def generate_viz(
scene, outdir,fig, mm,
numbers=False,
debug=False,
datadir=None,
overwrite=False):
if debug:
import __builtin__
openfiles = set()
oldfile = __builtin__.file
class newfile(oldfile):
def __init__(self, *args):
self.x = args[0]
print("### OPENING %s ###" % str(self.x) )
oldfile.__init__(self, *args)
openfiles.add(self)
def close(self):
print("### CLOSING %s ###" % str(self.x))
oldfile.close(self)
openfiles.remove(self)
oldopen = __builtin__.open
def newopen(*args):
return newfile(*args)
__builtin__.file = newfile
__builtin__.open = newopen
figname = os.path.split('{}_plot.png'.format(scene))[-1]
if os.path.exists(os.path.join(outdir, figname)):
if overwrite:
print("{} exists, overwriting.".format(figname))
else:
print("{} exists, skipping.".format(figname))
return
ax = fig.gca()
if not datadir:
datadir = os.path.join(NASPATH, scene, 'sdr')
elif os.path.exists(os.path.join(datadir, scene, 'sdr')):
datadir = os.path.join(datadir, scene, 'sdr')
else:
datadir = os.path.join(datadir, scene)
print("Working on data in {}.".format(datadir))
if not os.path.isdir(datadir):
print("{} is not a directory, skipping.".format(datadir))
return
os.chdir(datadir)
testfiles = glob.glob(FILEPAT)
print("Working with {} data files.".format(len(testfiles)))
testfiles.sort()
current_palette = sns.husl_palette(n_colors=len(testfiles), h=.2, l=.4, s=.9)
totalfrac = 0.0
plotobj = []
plottexts = []
for idx, tf in enumerate(testfiles):
print('{}: {}'.format(idx, tf))
try:
tsto = raster.VIIRSHDF5(tf)
except IOError as err:
print("Error opening file {}: {}.".format(tf, err))
print("Aborting plot for scene {}.".format(datadir))
return
try:
edgelons, edgelats = vt.getedge(tsto)
except PygaarstRasterError as err:
print("PygaarstRasterError, aborting: {}".format(err))
return
x, y = mm(edgelons, edgelats)
contour = Polygon(zip(x, y))
try:
intersect = contour.intersection(poly)
fraction = intersect.area/poly.area
except TopologicalError:
fraction = 0
totalfrac += fraction*100
print("Intersection as fraction of AOI: {}".format(fraction))
if fraction > MINFRAC:
size = 10
alpha = 1.000
color = current_palette[idx]
plotobj.append(ax.plot(x, y, zorder=15,
linewidth=3, color=color, alpha=alpha))
else:
size = 5
alpha = .4
color = current_palette[idx]
plotobj.append(ax.scatter(x, y, size, zorder=25,
marker='o', color=color, alpha=alpha))
if numbers:
midx = 0.5 * (min(x) + max(x))
midx = max(min(midx, mm.xmax), mm.xmin)
midy = 0.5 * (min(y) + max(y))
midy = max(min(midy, mm.ymax), mm.ymin)
plottexts.append(
ax.text(midx, midy, str(idx),
color=current_palette[idx],
weight='bold',
fontsize=20
))
tsto.close()
if debug:
printOpenFiles(openfiles)
ax.set_title("{}: {:.2f}% of AOI".format(scene, totalfrac))
print("Outdir variable: {}".format(outdir))
print("Figname variable: {}".format(figname))
imgpath = os.path.join(outdir, figname)
print("Saving figure to {}.".format(imgpath))
fig.savefig(imgpath)
# clean up base plot
for item in plotobj:
try:
for subobj in item:
try:
ax.lines.remove(subobj)
except ValueError:
pass
except TypeError: # not a sequence
try:
ax.collections.remove(item)
except ValueError:
pass
for item in plottexts:
ax.texts.remove(item)
def render_poly(mmap, fig, record, transform=True):
"""Given matplotlib axes, a map and a record, adds the record as a patch
and returns the axes so that reduce() can accumulate more
patches."""
if transform:
record = bu.maptransform(mmap, record)
fig.gca().add_patch(
PolygonPatch(record['geometry'],
fc='orange', ec='orange', alpha=0.5, zorder=11))
return fig
if __name__ == '__main__':
args = parse_arguments()
# generate base map
print("Generating base map")
fig = plt.figure(figsize=(10, 10))
ax = fig.add_subplot(111)
mm = bu.map_interiorAK(width=5000000, height=5000000, resolution='l')
# add polygon overly
print("Overlaying polygon")
if args.overlayvector:
print("Overlaying vector data.")
with fiona.open(args.overlayvector, 'r') as source:
for record in source:
record = bu.maptransform(mm, record)
poly = Polygon(record['geometry']['coordinates'][0])
fig = render_poly(mm, fig, record, transform=False)
if args.testdir:
outdir = os.path.join(args.archivedir, TESTDIR)
if not os.path.exists(outdir):
os.makedirs(outdir)
slug = dt.datetime.utcnow().strftime('%Y%m%d%H%M%S')
if os.path.isdir(args.testdir):
if os.path.isdir(os.path.join(args.testdir, 'sdr')):
datadir = os.path.join(args.testdir, 'sdr')
else:
datadir = args.testdir
else:
if os.path.isdir(os.path.join(
args.archivedir, args.testdir, 'sdr')):
datadir = os.path.join(args.archivedir, args.testdir, 'sdr')
else:
datadir = os.path.join(args.archivedir, args.testdir)
generate_viz(
'test_{}'.format(slug),
outdir, fig, mm,
datadir=datadir,
numbers=args.num,
debug=args.debug)
else:
if os.path.exists(os.path.join(args.archivedir, SCENELIST)):
dirlist = read_items(os.path.join(args.archivedir, SCENELIST))
else:
dirlist = glob.glob(os.path.join(args.archivedir, "201[0-9]_*"))
dirlist = filter(os.path.isdir, dirlist)
dirlist.sort()
for scene in dirlist:
print()
outdir = os.path.join(args.archivedir, VIZDIR)
print("Scene variable: {}".format(scene))
if not os.path.exists(outdir):
os.makedirs(outdir)
print("Writing to {}.".format(outdir))
generate_viz(
scene, outdir, fig, mm,
datadir=args.archivedir,
overwrite=args.overwrt,
numbers=args.num,
debug=args.debug)
|
mit
|
kiyoto/statsmodels
|
statsmodels/sandbox/km_class.py
|
31
|
11748
|
#a class for the Kaplan-Meier estimator
from statsmodels.compat.python import range
import numpy as np
from math import sqrt
import matplotlib.pyplot as plt
class KAPLAN_MEIER(object):
def __init__(self, data, timesIn, groupIn, censoringIn):
raise RuntimeError('Newer version of Kaplan-Meier class available in survival2.py')
#store the inputs
self.data = data
self.timesIn = timesIn
self.groupIn = groupIn
self.censoringIn = censoringIn
def fit(self):
#split the data into groups based on the predicting variable
#get a set of all the groups
groups = list(set(self.data[:,self.groupIn]))
#create an empty list to store the data for different groups
groupList = []
#create an empty list for each group and add it to groups
for i in range(len(groups)):
groupList.append([])
#iterate through all the groups in groups
for i in range(len(groups)):
#iterate though the rows of dataArray
for j in range(len(self.data)):
#test if this row has the correct group
if self.data[j,self.groupIn] == groups[i]:
#add the row to groupList
groupList[i].append(self.data[j])
#create an empty list to store the times for each group
timeList = []
#iterate through all the groups
for i in range(len(groupList)):
#create an empty list
times = []
#iterate through all the rows of the group
for j in range(len(groupList[i])):
#get a list of all the times in the group
times.append(groupList[i][j][self.timesIn])
#get a sorted set of the times and store it in timeList
times = list(sorted(set(times)))
timeList.append(times)
#get a list of the number at risk and events at each time
#create an empty list to store the results in
timeCounts = []
#create an empty list to hold points for plotting
points = []
#create a list for points where censoring occurs
censoredPoints = []
#iterate trough each group
for i in range(len(groupList)):
#initialize a variable to estimate the survival function
survival = 1
#initialize a variable to estimate the variance of
#the survival function
varSum = 0
#initialize a counter for the number at risk
riskCounter = len(groupList[i])
#create a list for the counts for this group
counts = []
##create a list for points to plot
x = []
y = []
#iterate through the list of times
for j in range(len(timeList[i])):
if j != 0:
if j == 1:
#add an indicator to tell if the time
#starts a new group
groupInd = 1
#add (0,1) to the list of points
x.append(0)
y.append(1)
#add the point time to the right of that
x.append(timeList[i][j-1])
y.append(1)
#add the point below that at survival
x.append(timeList[i][j-1])
y.append(survival)
#add the survival to y
y.append(survival)
else:
groupInd = 0
#add survival twice to y
y.append(survival)
y.append(survival)
#add the time twice to x
x.append(timeList[i][j-1])
x.append(timeList[i][j-1])
#add each censored time, number of censorings and
#its survival to censoredPoints
censoredPoints.append([timeList[i][j-1],
censoringNum,survival,groupInd])
#add the count to the list
counts.append([timeList[i][j-1],riskCounter,
eventCounter,survival,
sqrt(((survival)**2)*varSum)])
#increment the number at risk
riskCounter += -1*(riskChange)
#initialize a counter for the change in the number at risk
riskChange = 0
#initialize a counter to zero
eventCounter = 0
#intialize a counter to tell when censoring occurs
censoringCounter = 0
censoringNum = 0
#iterate through the observations in each group
for k in range(len(groupList[i])):
#check of the observation has the given time
if (groupList[i][k][self.timesIn]) == (timeList[i][j]):
#increment the number at risk counter
riskChange += 1
#check if this is an event or censoring
if groupList[i][k][self.censoringIn] == 1:
#add 1 to the counter
eventCounter += 1
else:
censoringNum += 1
#check if there are any events at this time
if eventCounter != censoringCounter:
censoringCounter = eventCounter
#calculate the estimate of the survival function
survival *= ((float(riskCounter) -
eventCounter)/(riskCounter))
try:
#calculate the estimate of the variance
varSum += (eventCounter)/((riskCounter)
*(float(riskCounter)-
eventCounter))
except ZeroDivisionError:
varSum = 0
#append the last row to counts
counts.append([timeList[i][len(timeList[i])-1],
riskCounter,eventCounter,survival,
sqrt(((survival)**2)*varSum)])
#add the last time once to x
x.append(timeList[i][len(timeList[i])-1])
x.append(timeList[i][len(timeList[i])-1])
#add the last survival twice to y
y.append(survival)
#y.append(survival)
censoredPoints.append([timeList[i][len(timeList[i])-1],
censoringNum,survival,1])
#add the list for the group to al ist for all the groups
timeCounts.append(np.array(counts))
points.append([x,y])
#returns a list of arrays, where each array has as it columns: the time,
#the number at risk, the number of events, the estimated value of the
#survival function at that time, and the estimated standard error at
#that time, in that order
self.results = timeCounts
self.points = points
self.censoredPoints = censoredPoints
def plot(self):
x = []
#iterate through the groups
for i in range(len(self.points)):
#plot x and y
plt.plot(np.array(self.points[i][0]),np.array(self.points[i][1]))
#create lists of all the x and y values
x += self.points[i][0]
for j in range(len(self.censoredPoints)):
#check if censoring is occuring
if (self.censoredPoints[j][1] != 0):
#if this is the first censored point
if (self.censoredPoints[j][3] == 1) and (j == 0):
#calculate a distance beyond 1 to place it
#so all the points will fit
dx = ((1./((self.censoredPoints[j][1])+1.))
*(float(self.censoredPoints[j][0])))
#iterate through all the censored points at this time
for k in range(self.censoredPoints[j][1]):
#plot a vertical line for censoring
plt.vlines((1+((k+1)*dx)),
self.censoredPoints[j][2]-0.03,
self.censoredPoints[j][2]+0.03)
#if this censored point starts a new group
elif ((self.censoredPoints[j][3] == 1) and
(self.censoredPoints[j-1][3] == 1)):
#calculate a distance beyond 1 to place it
#so all the points will fit
dx = ((1./((self.censoredPoints[j][1])+1.))
*(float(self.censoredPoints[j][0])))
#iterate through all the censored points at this time
for k in range(self.censoredPoints[j][1]):
#plot a vertical line for censoring
plt.vlines((1+((k+1)*dx)),
self.censoredPoints[j][2]-0.03,
self.censoredPoints[j][2]+0.03)
#if this is the last censored point
elif j == (len(self.censoredPoints) - 1):
#calculate a distance beyond the previous time
#so that all the points will fit
dx = ((1./((self.censoredPoints[j][1])+1.))
*(float(self.censoredPoints[j][0])))
#iterate through all the points at this time
for k in range(self.censoredPoints[j][1]):
#plot a vertical line for censoring
plt.vlines((self.censoredPoints[j-1][0]+((k+1)*dx)),
self.censoredPoints[j][2]-0.03,
self.censoredPoints[j][2]+0.03)
#if this is a point in the middle of the group
else:
#calcuate a distance beyond the current time
#to place the point, so they all fit
dx = ((1./((self.censoredPoints[j][1])+1.))
*(float(self.censoredPoints[j+1][0])
- self.censoredPoints[j][0]))
#iterate through all the points at this time
for k in range(self.censoredPoints[j][1]):
#plot a vetical line for censoring
plt.vlines((self.censoredPoints[j][0]+((k+1)*dx)),
self.censoredPoints[j][2]-0.03,
self.censoredPoints[j][2]+0.03)
#set the size of the plot so it extends to the max x and above 1 for y
plt.xlim((0,np.max(x)))
plt.ylim((0,1.05))
#label the axes
plt.xlabel('time')
plt.ylabel('survival')
plt.show()
def show_results(self):
#start a string that will be a table of the results
resultsString = ''
#iterate through all the groups
for i in range(len(self.results)):
#label the group and header
resultsString += ('Group {0}\n\n'.format(i) +
'Time At Risk Events Survival Std. Err\n')
for j in self.results[i]:
#add the results to the string
resultsString += (
'{0:<9d}{1:<12d}{2:<11d}{3:<13.4f}{4:<6.4f}\n'.format(
int(j[0]),int(j[1]),int(j[2]),j[3],j[4]))
print(resultsString)
|
bsd-3-clause
|
kevin-intel/scikit-learn
|
sklearn/cluster/_affinity_propagation.py
|
2
|
17258
|
"""Affinity Propagation clustering algorithm."""
# Author: Alexandre Gramfort [email protected]
# Gael Varoquaux [email protected]
# License: BSD 3 clause
import numpy as np
import warnings
from ..exceptions import ConvergenceWarning
from ..base import BaseEstimator, ClusterMixin
from ..utils import as_float_array, check_random_state
from ..utils.deprecation import deprecated
from ..utils.validation import check_is_fitted
from ..metrics import euclidean_distances
from ..metrics import pairwise_distances_argmin
from .._config import config_context
def _equal_similarities_and_preferences(S, preference):
def all_equal_preferences():
return np.all(preference == preference.flat[0])
def all_equal_similarities():
# Create mask to ignore diagonal of S
mask = np.ones(S.shape, dtype=bool)
np.fill_diagonal(mask, 0)
return np.all(S[mask].flat == S[mask].flat[0])
return all_equal_preferences() and all_equal_similarities()
def affinity_propagation(S, *, preference=None, convergence_iter=15,
max_iter=200, damping=0.5, copy=True, verbose=False,
return_n_iter=False, random_state=None):
"""Perform Affinity Propagation Clustering of data.
Read more in the :ref:`User Guide <affinity_propagation>`.
Parameters
----------
S : array-like of shape (n_samples, n_samples)
Matrix of similarities between points.
preference : array-like of shape (n_samples,) or float, default=None
Preferences for each point - points with larger values of
preferences are more likely to be chosen as exemplars. The number of
exemplars, i.e. of clusters, is influenced by the input preferences
value. If the preferences are not passed as arguments, they will be
set to the median of the input similarities (resulting in a moderate
number of clusters). For a smaller amount of clusters, this can be set
to the minimum value of the similarities.
convergence_iter : int, default=15
Number of iterations with no change in the number
of estimated clusters that stops the convergence.
max_iter : int, default=200
Maximum number of iterations
damping : float, default=0.5
Damping factor between 0.5 and 1.
copy : bool, default=True
If copy is False, the affinity matrix is modified inplace by the
algorithm, for memory efficiency.
verbose : bool, default=False
The verbosity level.
return_n_iter : bool, default=False
Whether or not to return the number of iterations.
random_state : int, RandomState instance or None, default=None
Pseudo-random number generator to control the starting state.
Use an int for reproducible results across function calls.
See the :term:`Glossary <random_state>`.
.. versionadded:: 0.23
this parameter was previously hardcoded as 0.
Returns
-------
cluster_centers_indices : ndarray of shape (n_clusters,)
Index of clusters centers.
labels : ndarray of shape (n_samples,)
Cluster labels for each point.
n_iter : int
Number of iterations run. Returned only if `return_n_iter` is
set to True.
Notes
-----
For an example, see :ref:`examples/cluster/plot_affinity_propagation.py
<sphx_glr_auto_examples_cluster_plot_affinity_propagation.py>`.
When the algorithm does not converge, it returns an empty array as
``cluster_center_indices`` and ``-1`` as label for each training sample.
When all training samples have equal similarities and equal preferences,
the assignment of cluster centers and labels depends on the preference.
If the preference is smaller than the similarities, a single cluster center
and label ``0`` for every sample will be returned. Otherwise, every
training sample becomes its own cluster center and is assigned a unique
label.
References
----------
Brendan J. Frey and Delbert Dueck, "Clustering by Passing Messages
Between Data Points", Science Feb. 2007
"""
S = as_float_array(S, copy=copy)
n_samples = S.shape[0]
if S.shape[0] != S.shape[1]:
raise ValueError("S must be a square array (shape=%s)" % repr(S.shape))
if preference is None:
preference = np.median(S)
if damping < 0.5 or damping >= 1:
raise ValueError('damping must be >= 0.5 and < 1')
preference = np.array(preference)
if (n_samples == 1 or
_equal_similarities_and_preferences(S, preference)):
# It makes no sense to run the algorithm in this case, so return 1 or
# n_samples clusters, depending on preferences
warnings.warn("All samples have mutually equal similarities. "
"Returning arbitrary cluster center(s).")
if preference.flat[0] >= S.flat[n_samples - 1]:
return ((np.arange(n_samples), np.arange(n_samples), 0)
if return_n_iter
else (np.arange(n_samples), np.arange(n_samples)))
else:
return ((np.array([0]), np.array([0] * n_samples), 0)
if return_n_iter
else (np.array([0]), np.array([0] * n_samples)))
random_state = check_random_state(random_state)
# Place preference on the diagonal of S
S.flat[::(n_samples + 1)] = preference
A = np.zeros((n_samples, n_samples))
R = np.zeros((n_samples, n_samples)) # Initialize messages
# Intermediate results
tmp = np.zeros((n_samples, n_samples))
# Remove degeneracies
S += ((np.finfo(S.dtype).eps * S + np.finfo(S.dtype).tiny * 100) *
random_state.randn(n_samples, n_samples))
# Execute parallel affinity propagation updates
e = np.zeros((n_samples, convergence_iter))
ind = np.arange(n_samples)
for it in range(max_iter):
# tmp = A + S; compute responsibilities
np.add(A, S, tmp)
I = np.argmax(tmp, axis=1)
Y = tmp[ind, I] # np.max(A + S, axis=1)
tmp[ind, I] = -np.inf
Y2 = np.max(tmp, axis=1)
# tmp = Rnew
np.subtract(S, Y[:, None], tmp)
tmp[ind, I] = S[ind, I] - Y2
# Damping
tmp *= 1 - damping
R *= damping
R += tmp
# tmp = Rp; compute availabilities
np.maximum(R, 0, tmp)
tmp.flat[::n_samples + 1] = R.flat[::n_samples + 1]
# tmp = -Anew
tmp -= np.sum(tmp, axis=0)
dA = np.diag(tmp).copy()
tmp.clip(0, np.inf, tmp)
tmp.flat[::n_samples + 1] = dA
# Damping
tmp *= 1 - damping
A *= damping
A -= tmp
# Check for convergence
E = (np.diag(A) + np.diag(R)) > 0
e[:, it % convergence_iter] = E
K = np.sum(E, axis=0)
if it >= convergence_iter:
se = np.sum(e, axis=1)
unconverged = (np.sum((se == convergence_iter) + (se == 0))
!= n_samples)
if (not unconverged and (K > 0)) or (it == max_iter):
never_converged = False
if verbose:
print("Converged after %d iterations." % it)
break
else:
never_converged = True
if verbose:
print("Did not converge")
I = np.flatnonzero(E)
K = I.size # Identify exemplars
if K > 0 and not never_converged:
c = np.argmax(S[:, I], axis=1)
c[I] = np.arange(K) # Identify clusters
# Refine the final set of exemplars and clusters and return results
for k in range(K):
ii = np.where(c == k)[0]
j = np.argmax(np.sum(S[ii[:, np.newaxis], ii], axis=0))
I[k] = ii[j]
c = np.argmax(S[:, I], axis=1)
c[I] = np.arange(K)
labels = I[c]
# Reduce labels to a sorted, gapless, list
cluster_centers_indices = np.unique(labels)
labels = np.searchsorted(cluster_centers_indices, labels)
else:
warnings.warn("Affinity propagation did not converge, this model "
"will not have any cluster centers.", ConvergenceWarning)
labels = np.array([-1] * n_samples)
cluster_centers_indices = []
if return_n_iter:
return cluster_centers_indices, labels, it + 1
else:
return cluster_centers_indices, labels
###############################################################################
class AffinityPropagation(ClusterMixin, BaseEstimator):
"""Perform Affinity Propagation Clustering of data.
Read more in the :ref:`User Guide <affinity_propagation>`.
Parameters
----------
damping : float, default=0.5
Damping factor (between 0.5 and 1) is the extent to
which the current value is maintained relative to
incoming values (weighted 1 - damping). This in order
to avoid numerical oscillations when updating these
values (messages).
max_iter : int, default=200
Maximum number of iterations.
convergence_iter : int, default=15
Number of iterations with no change in the number
of estimated clusters that stops the convergence.
copy : bool, default=True
Make a copy of input data.
preference : array-like of shape (n_samples,) or float, default=None
Preferences for each point - points with larger values of
preferences are more likely to be chosen as exemplars. The number
of exemplars, ie of clusters, is influenced by the input
preferences value. If the preferences are not passed as arguments,
they will be set to the median of the input similarities.
affinity : {'euclidean', 'precomputed'}, default='euclidean'
Which affinity to use. At the moment 'precomputed' and
``euclidean`` are supported. 'euclidean' uses the
negative squared euclidean distance between points.
verbose : bool, default=False
Whether to be verbose.
random_state : int, RandomState instance or None, default=None
Pseudo-random number generator to control the starting state.
Use an int for reproducible results across function calls.
See the :term:`Glossary <random_state>`.
.. versionadded:: 0.23
this parameter was previously hardcoded as 0.
Attributes
----------
cluster_centers_indices_ : ndarray of shape (n_clusters,)
Indices of cluster centers.
cluster_centers_ : ndarray of shape (n_clusters, n_features)
Cluster centers (if affinity != ``precomputed``).
labels_ : ndarray of shape (n_samples,)
Labels of each point.
affinity_matrix_ : ndarray of shape (n_samples, n_samples)
Stores the affinity matrix used in ``fit``.
n_iter_ : int
Number of iterations taken to converge.
n_features_in_ : int
Number of features seen during :term:`fit`.
.. versionadded:: 0.24
Notes
-----
For an example, see :ref:`examples/cluster/plot_affinity_propagation.py
<sphx_glr_auto_examples_cluster_plot_affinity_propagation.py>`.
The algorithmic complexity of affinity propagation is quadratic
in the number of points.
When ``fit`` does not converge, ``cluster_centers_`` becomes an empty
array and all training samples will be labelled as ``-1``. In addition,
``predict`` will then label every sample as ``-1``.
When all training samples have equal similarities and equal preferences,
the assignment of cluster centers and labels depends on the preference.
If the preference is smaller than the similarities, ``fit`` will result in
a single cluster center and label ``0`` for every sample. Otherwise, every
training sample becomes its own cluster center and is assigned a unique
label.
References
----------
Brendan J. Frey and Delbert Dueck, "Clustering by Passing Messages
Between Data Points", Science Feb. 2007
Examples
--------
>>> from sklearn.cluster import AffinityPropagation
>>> import numpy as np
>>> X = np.array([[1, 2], [1, 4], [1, 0],
... [4, 2], [4, 4], [4, 0]])
>>> clustering = AffinityPropagation(random_state=5).fit(X)
>>> clustering
AffinityPropagation(random_state=5)
>>> clustering.labels_
array([0, 0, 0, 1, 1, 1])
>>> clustering.predict([[0, 0], [4, 4]])
array([0, 1])
>>> clustering.cluster_centers_
array([[1, 2],
[4, 2]])
"""
def __init__(self, *, damping=.5, max_iter=200, convergence_iter=15,
copy=True, preference=None, affinity='euclidean',
verbose=False, random_state=None):
self.damping = damping
self.max_iter = max_iter
self.convergence_iter = convergence_iter
self.copy = copy
self.verbose = verbose
self.preference = preference
self.affinity = affinity
self.random_state = random_state
# TODO: Remove in 1.1
# mypy error: Decorated property not supported
@deprecated( # type: ignore
"Attribute _pairwise was deprecated in "
"version 0.24 and will be removed in 1.1 (renaming of 0.26).")
@property
def _pairwise(self):
return self.affinity == "precomputed"
def _more_tags(self):
return {'pairwise': self.affinity == 'precomputed'}
def fit(self, X, y=None):
"""Fit the clustering from features, or affinity matrix.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features), or \
array-like of shape (n_samples, n_samples)
Training instances to cluster, or similarities / affinities between
instances if ``affinity='precomputed'``. If a sparse feature matrix
is provided, it will be converted into a sparse ``csr_matrix``.
y : Ignored
Not used, present here for API consistency by convention.
Returns
-------
self
"""
if self.affinity == "precomputed":
accept_sparse = False
else:
accept_sparse = 'csr'
X = self._validate_data(X, accept_sparse=accept_sparse)
if self.affinity == "precomputed":
self.affinity_matrix_ = X
elif self.affinity == "euclidean":
self.affinity_matrix_ = -euclidean_distances(X, squared=True)
else:
raise ValueError("Affinity must be 'precomputed' or "
"'euclidean'. Got %s instead"
% str(self.affinity))
self.cluster_centers_indices_, self.labels_, self.n_iter_ = \
affinity_propagation(
self.affinity_matrix_, preference=self.preference,
max_iter=self.max_iter,
convergence_iter=self.convergence_iter, damping=self.damping,
copy=self.copy, verbose=self.verbose, return_n_iter=True,
random_state=self.random_state)
if self.affinity != "precomputed":
self.cluster_centers_ = X[self.cluster_centers_indices_].copy()
return self
def predict(self, X):
"""Predict the closest cluster each sample in X belongs to.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
New data to predict. If a sparse matrix is provided, it will be
converted into a sparse ``csr_matrix``.
Returns
-------
labels : ndarray of shape (n_samples,)
Cluster labels.
"""
check_is_fitted(self)
X = self._validate_data(X, reset=False, accept_sparse='csr')
if not hasattr(self, "cluster_centers_"):
raise ValueError("Predict method is not supported when "
"affinity='precomputed'.")
if self.cluster_centers_.shape[0] > 0:
with config_context(assume_finite=True):
return pairwise_distances_argmin(X, self.cluster_centers_)
else:
warnings.warn("This model does not have any cluster centers "
"because affinity propagation did not converge. "
"Labeling every sample as '-1'.", ConvergenceWarning)
return np.array([-1] * X.shape[0])
def fit_predict(self, X, y=None):
"""Fit the clustering from features or affinity matrix, and return
cluster labels.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features), or \
array-like of shape (n_samples, n_samples)
Training instances to cluster, or similarities / affinities between
instances if ``affinity='precomputed'``. If a sparse feature matrix
is provided, it will be converted into a sparse ``csr_matrix``.
y : Ignored
Not used, present here for API consistency by convention.
Returns
-------
labels : ndarray of shape (n_samples,)
Cluster labels.
"""
return super().fit_predict(X, y)
|
bsd-3-clause
|
idlead/scikit-learn
|
examples/bicluster/plot_spectral_coclustering.py
|
276
|
1736
|
"""
==============================================
A demo of the Spectral Co-Clustering algorithm
==============================================
This example demonstrates how to generate a dataset and bicluster it
using the the Spectral Co-Clustering algorithm.
The dataset is generated using the ``make_biclusters`` function, which
creates a matrix of small values and implants bicluster with large
values. The rows and columns are then shuffled and passed to the
Spectral Co-Clustering algorithm. Rearranging the shuffled matrix to
make biclusters contiguous shows how accurately the algorithm found
the biclusters.
"""
print(__doc__)
# Author: Kemal Eren <[email protected]>
# License: BSD 3 clause
import numpy as np
from matplotlib import pyplot as plt
from sklearn.datasets import make_biclusters
from sklearn.datasets import samples_generator as sg
from sklearn.cluster.bicluster import SpectralCoclustering
from sklearn.metrics import consensus_score
data, rows, columns = make_biclusters(
shape=(300, 300), n_clusters=5, noise=5,
shuffle=False, random_state=0)
plt.matshow(data, cmap=plt.cm.Blues)
plt.title("Original dataset")
data, row_idx, col_idx = sg._shuffle(data, random_state=0)
plt.matshow(data, cmap=plt.cm.Blues)
plt.title("Shuffled dataset")
model = SpectralCoclustering(n_clusters=5, random_state=0)
model.fit(data)
score = consensus_score(model.biclusters_,
(rows[:, row_idx], columns[:, col_idx]))
print("consensus score: {:.3f}".format(score))
fit_data = data[np.argsort(model.row_labels_)]
fit_data = fit_data[:, np.argsort(model.column_labels_)]
plt.matshow(fit_data, cmap=plt.cm.Blues)
plt.title("After biclustering; rearranged to show biclusters")
plt.show()
|
bsd-3-clause
|
felipemontefuscolo/bitme
|
sim/__main__.py
|
1
|
33065
|
# NOTES:
# timestamps are of type pd.Timestamp
# side are of type str ('buy' or 'sell')
import argparse
import heapq
import os
import shutil
import sys
import time
from collections import defaultdict
from typing import List, Union, Dict
import numpy as np
import pandas as pd
from api import PositionInterface
from api.exchange_interface import ExchangeInterface
from api.symbol import Symbol
from common import OrderType, OrderContainerType, OrderCommon, OHLCV_COLUMNS, Fill, FillType, get_orders_id, \
OrderCancelReason, OrderStatus
from common.quote import Quote
from common.trade import Trade, TICK_DIRECTION
from sim.liquidator import Liquidator
from sim.position_sim import PositionSim
from tactic import TacticInterface
from tactic.TacticMakerV1 import TacticMakerV1
from tactic.TacticTakerV1 import TacticTakerV1
from tactic.tactic_tests.SimTacticLimitTest import SimTacticLimitTest
from tactic.tactic_tests.SimTacticMarketTest import SimTacticMarketTest
# logger = log.setup_custom_logger('root')
REQUEST_DELAY = pd.Timedelta('10ms')
WS_DELAY = pd.Timedelta('1ms')
ORDER_TO_FILL_DELAY = REQUEST_DELAY
CANCEL_DELAY = REQUEST_DELAY
CANCEL_NOTIF_DELAY = REQUEST_DELAY
# from autologging import logged
# TODO: prevent send orders where you would be liquidated immediately
# TODO; prevent send orders if you have no funds (xbt_balance is not in use)
# import logging
ALL_TACTICS = {
SimTacticMarketTest.id(): SimTacticMarketTest,
SimTacticLimitTest.id(): SimTacticLimitTest,
TacticMakerV1.id(): TacticMakerV1,
TacticTakerV1.id(): TacticTakerV1
}
def get_args(input_args=None):
files_required = '--files' not in (input_args if input_args else sys.argv)
parser = argparse.ArgumentParser(description='Simulation')
parser.add_argument('--ohlcv-file', type=str, help='csv filename with candles data', required=files_required)
parser.add_argument('--trades-file', type=str, help='csv filename with trades data', required=files_required)
parser.add_argument('--quotes-file', type=str, help='csv filename with quotes data', required=files_required)
parser.add_argument('--files', type=str, help='template path to the ohlcv, trades and quotes files (use %TYPE%)')
parser.add_argument('--tactics', type=str, help='comma-delimited list of tactics (id) to run', required=True)
parser.add_argument('-l', '--log-dir', type=str, help='log directory', required=True)
parser.add_argument('-b', '--begin', type=str, help='begin time')
parser.add_argument('-e', '--end', type=str, help='end time')
parser.add_argument('-x', '--pref', action='append', help='args for tactics, given in the format "key=value"')
args = parser.parse_args(args=input_args)
if not files_required:
args.ohlcv_file = args.files.replace('%TYPE%', 'ohlcv')
args.trades_file = args.files.replace('%TYPE%', 'trades')
args.quotes_file = args.files.replace('%TYPE%', 'quotes')
for f in [args.ohlcv_file, args.trades_file, args.quotes_file]:
if not os.path.isfile(f):
raise ValueError("invalid file {}".format(f))
if args.log_dir is not None:
if os.path.isfile(args.log_dir):
raise ValueError(args.log_dir + " is a file")
args.log_dir = os.path.abspath(args.log_dir)
if os.path.isdir(args.log_dir):
shutil.rmtree(args.log_dir)
os.makedirs(args.log_dir)
if args.begin:
args.begin = pd.Timestamp(args.begin)
if args.end:
args.end = pd.Timestamp(args.end)
if args.begin and args.end:
if args.begin >= args.end:
raise ValueError("begin time must be before end time")
if not args.pref:
args.pref = list()
for i in range(len(args.pref)):
args.pref[i] = args.pref[i].split("=")
args.pref = dict(args.pref)
tactics = args.tactics.split(',')
for tactic in tactics:
if tactic not in ALL_TACTICS.keys():
ValueError("Unknown tactic {}".format(tactic))
args.tactics = [ALL_TACTICS[t] for t in tactics]
return args
# only BTC is supported for now
# @logged
class SimExchangeBitMex(ExchangeInterface):
FEE = {OrderType.Limit: -0.00025, OrderType.Market: 0.00075}
SYMBOLS = list(Symbol)
# reference: https://www.bitmex.com/app/riskLimits#instrument-risk-limits
RISK_LIMITS = {Symbol.XBTUSD: 0.0015}
def __init__(self,
begin_timestamp: pd.Timestamp,
end_timestamp: pd.Timestamp,
initial_balance: float,
ohlcv: pd.DataFrame,
trades: pd.DataFrame,
quotes: pd.DataFrame,
log_dir: str,
tactics: List[TacticInterface],
tactic_prefs: dict):
ExchangeInterface.__init__(self)
self.finished = False
self.sim_start_time = False
self.xbt_initial_balance = initial_balance
self.xbt_balance = initial_balance
self.log_dir = log_dir
self.tactic_prefs = tactic_prefs
assert ohlcv is not None and len(ohlcv) > 0
assert trades is not None and len(trades) > 0
assert quotes is not None and len(quotes) > 0
self.ohlcv = ohlcv # type: pd.DataFrame
self.trades = trades # type: pd.DataFrame
self.quotes = quotes # type: pd.DataFrame
self.begin_timestamp = begin_timestamp
self.end_timestamp = end_timestamp
if self.begin_timestamp is None:
self.begin_timestamp = min(self.ohlcv.index[0], self.trades.index[0], self.quotes.index[0])
if self.end_timestamp is None:
self.end_timestamp = max(self.ohlcv.index[-1], self.trades.index[-1], self.quotes.index[-1])
self.ohlcv_idx = 0
self.trade_idx = 0
self.quote_idx = 0
if self.end_timestamp <= self.begin_timestamp:
raise ValueError(
"end_timestamp ({}) less or equal begin_timestamp ({}). Please check arguments and make sure "
"the market data first event occurs before the end time".format(
self.end_timestamp, self.begin_timestamp
))
self.current_timestamp = self.begin_timestamp # type: pd.Timestamp
# market data tick: quotes, trades, ohlcv
self.next_tick_ts = self.begin_timestamp # type: pd.Timestamp
self.liquidator_tactic = Liquidator()
self.tactics_map = {t.id(): t for t in [self.liquidator_tactic] + tactics} # type: Dict[str, TacticInterface]
ss = [tac.get_symbol() for tac in tactics]
zz = set(ss)
if len(zz) != len(ss):
raise ValueError("Tactics trading same symbol is not allowed.")
self.volume = {s: 0. for s in self.SYMBOLS} # type: Dict[Symbol, float]
self.n_fills = {s: 0 for s in self.SYMBOLS} # type: Dict[Symbol, int]
self.n_orders = {s: 0 for s in self.SYMBOLS} # type: Dict[Symbol, int]
self.n_unsolicited_cancels = {s: 0 for s in self.SYMBOLS} # type: Dict[Symbol, int]
self.n_liquidations = {s: 0 for s in self.SYMBOLS} # type: Dict[Symbol, int]
self.cum_pnl = {s: 0. for s in self.SYMBOLS} # type: Dict[Symbol, float]
self.pnl_history = defaultdict(list) # type: Dict[Symbol, List[float]]
self.positions = {s: PositionSim(s, self._log_and_update_pnl) for s in
self.SYMBOLS} # type: Dict[Symbol, PositionSim]
self.leverage = {s: 1.0 for s in self.SYMBOLS} # type: Dict[Symbol, float]
self.queue = [] # queue of events. See self._queue_append
self.event_num = 0
self.active_orders = dict() # type: OrderContainerType
self.current_quote = None # type: Quote
def _init_files(self, log_dir):
self.fills_file = open(os.path.join(log_dir, 'fills.csv'), 'w')
self.orders_file = open(os.path.join(log_dir, 'orders.csv'), 'w')
self.pnl_file = open(os.path.join(log_dir, 'pnl.csv'), 'w')
self.fills_file.write(Fill.get_header() + '\n')
self.orders_file.write(OrderCommon.get_header() + '\n')
self.pnl_file.write('timestamp,symbol,pnl,cum_pnl\n')
def _init_tactics(self):
for tac in self.tactics_map.values():
tac.initialize(self, self.tactic_prefs)
tac.is_live = False
def _close_files(self):
assert self.finished
self.fills_file.close()
self.orders_file.close()
self.pnl_file.close()
def _log_fill(self, fill: Fill):
self.fills_file.write(fill.to_line() + '\n')
def _log_order(self, order: OrderCommon):
self.orders_file.write(order.to_line() + '\n')
def _log_and_update_pnl(self, position: PositionSim):
timestamp = self.current_timestamp
pnl = position.realized_pnl
assert not np.isnan(pnl)
symbol = position.symbol
self.pnl_history[symbol].append(pnl)
self.cum_pnl[symbol] += pnl
self.pnl_file.write(','.join([str(timestamp.strftime('%Y-%m-%dT%H:%M:%S')),
symbol.name,
str(pnl),
str(self.cum_pnl[symbol])])
+ '\n')
def __enter__(self):
self.start_main_loop()
return self
def __exit__(self, type, value, traceback):
self.end_main_loop()
pass
def run_sim(self):
self.start_main_loop()
self.end_main_loop()
def start_main_loop(self):
if self.sim_start_time:
raise AttributeError('Live already started')
self.sim_start_time = time.time()
self._init_files(self.log_dir)
self._init_tactics()
tactics_names = [i.__class__.__name__ for i in self.tactics_map.values()]
tactics_names = [i for i in tactics_names if i != Liquidator.__name__]
print("Tactics running: {}".format(tactics_names))
print("Market data tick range: {} - {}".format(self.begin_timestamp, self.end_timestamp))
while not self.finished:
self._advance_tick_timestamp()
self._process_queue_until(self.next_tick_ts)
self._process_queue_until(execute_all=True)
def end_main_loop(self):
for symbol in list(Symbol):
self._exec_liquidation(symbol, reason=OrderCancelReason.end_of_sim)
for tac in self.tactics_map.values():
tac.finalize()
self._close_files()
profit_part = defaultdict(float)
loss_part = defaultdict(float)
for s in self.pnl_history.keys():
for p in self.pnl_history[s]:
if p >= 0:
profit_part[s] += p
else:
loss_part[s] += p
summary = {'sim time': time.time() - self.sim_start_time,
'initial_xbt': self.xbt_initial_balance,
'position_xbt': 'Not implemented',
'num_fills': self.n_fills,
'volume': self.volume,
'num_orders': self.n_orders,
'n_unsolicited_cancels': self.n_unsolicited_cancels,
'num_liq': self.n_liquidations,
'close_price': self.current_quote.w_mid(),
'profit_part': profit_part,
'loss_part': loss_part,
'pnl (XBT)': self.cum_pnl,
'pnl_total': sum([p for p in self.cum_pnl.values()])
}
for k, v in summary.items():
print('{}: {}'.format(k, self._transf(v, len(k))))
@staticmethod
def _transf(d, l):
if isinstance(d, dict) or isinstance(d, defaultdict):
s = []
l += 2
for k, v in d.items():
s += ["{}: {}".format(str(k), v)]
return ('\n' + ' ' * l).join(s)
else:
return d
def _process_queue_until(self, end_inclusive: pd.Timestamp = None, execute_all=False):
assert (end_inclusive is not None) ^ execute_all # XOR
while len(self.queue) > 0 and (execute_all or self.queue[0][0] <= end_inclusive):
task = heapq.heappop(self.queue)
if self.current_timestamp > task[0]:
raise AttributeError("Going back in time")
self.current_timestamp = task[0]
method = task[2]
args = task[3] # if we ever need to change the number of arguments to more than one, you can use *args
# print(self.current_timestamp, method)
method(args)
if end_inclusive:
self.print_progress(self.current_timestamp)
else:
self.print_progress(self.end_timestamp)
def _queue_append(self, timestamp, method, method_argument):
tuple_ = (timestamp, self.event_num, method, method_argument)
self.event_num += 1
heapq.heappush(self.queue, tuple_)
def _advance_tick_timestamp(self):
# DEV NOTE: advance time stamp and put market data in the queue
def get_table_ts_at_idx(table, idx):
if idx >= len(table):
return self.end_timestamp
return table.index[idx]
next_trade_ts = get_table_ts_at_idx(self.trades, self.trade_idx)
next_quote_ts = get_table_ts_at_idx(self.quotes, self.quote_idx)
next_ohlcv_ts = get_table_ts_at_idx(self.ohlcv, self.ohlcv_idx)
next_tick_ts = min(next_trade_ts, next_quote_ts, next_ohlcv_ts)
if next_tick_ts >= self.end_timestamp:
self.finished = True
return
quote_row = self.quotes.iloc[self.quote_idx]
self.current_quote = Quote(symbol=Symbol[quote_row['symbol']],
timestamp=next_quote_ts,
bid_size=quote_row['bidSize'],
bid_price=quote_row['bidPrice'],
ask_size=quote_row['askSize'],
ask_price=quote_row['askPrice'])
def add_data_to_queue_until(end_ts_inclusive, method, table, idx):
while idx < len(table) and table.index[idx] <= end_ts_inclusive:
self._queue_append(table.index[idx], method, table.iloc[idx])
idx += 1
return idx
self.trade_idx = add_data_to_queue_until(next_tick_ts, self._process_trade, self.trades, self.trade_idx)
self.quote_idx = add_data_to_queue_until(next_tick_ts, self._process_quote, self.quotes, self.quote_idx)
self.ohlcv_idx = add_data_to_queue_until(next_tick_ts, self._process_ohlcv, self.ohlcv, self.ohlcv_idx)
self.next_tick_ts = next_tick_ts
return
def _exec_liquidation(self, symbol: Symbol, reason=OrderCancelReason.liquidation):
orders_to_cancel = [o for o in self.active_orders.values() if o.symbol == symbol]
for order in orders_to_cancel:
order.status_msg = reason
self._exec_order_cancels(orders_to_cancel)
if reason == OrderCancelReason.liquidation:
self.n_liquidations[symbol] += 1
if self.positions[symbol].is_open:
order = OrderCommon(symbol=symbol,
type=OrderType.Market,
client_id=self.liquidator_tactic.gen_order_id(),
signed_qty=-self.positions[symbol].signed_qty
)
self._exec_market_order(order)
return
def _exec_order_cancels(self, orders: List[OrderCommon]):
"""
DEV NOTE: important! the cancel reasons should be set directly in the order!
DEV NOTE: important! this method should support cancel orders that are not in self.active_orders yet
:param orders:
:return:
"""
for o in orders:
if o.status_msg is None:
raise AttributeError("Order cancel must have a reason")
ids_to_delete = {o.client_id for o in orders}
self.active_orders = {oid: self.active_orders[oid] for oid in self.active_orders.keys()
if oid not in ids_to_delete}
for order in orders:
if order.status_msg == OrderCancelReason.invalid_price:
raise AttributeError('We should always have valid prices')
should_notify_tactic = order.status_msg != OrderCancelReason.requested_by_user and \
order.status_msg != OrderCancelReason.end_of_sim
if should_notify_tactic:
method = self._get_tactic(order.client_id).handle_cancel
self._queue_append(self.current_timestamp + CANCEL_NOTIF_DELAY, method, order)
self.n_unsolicited_cancels[order.symbol] += 1
def _get_tactic(self, order_client_id: str) -> TacticInterface:
tactic_id = order_client_id.split('_')[0]
return self.tactics_map[tactic_id]
def _exec_market_order(self, order: OrderCommon):
assert order.type == OrderType.Market
assert order.status == OrderStatus.Pending
order.status = OrderStatus.New
tick_size = order.symbol.tick_size
# TODO: watch for self-cross
# We only fill at two price levels: best price and second best price
if order.side() > 0:
best_price = self.current_quote.ask_price
second_best = best_price + tick_size
size = self.current_quote.ask_size
else:
best_price = self.current_quote.bid_price
second_best = best_price - tick_size
size = self.current_quote.bid_size
qty_fill1 = min(size, order.leaves_qty)
qty_fill2 = order.leaves_qty - qty_fill1
fully_filled = self._fill_order(order=order, qty_to_fill=qty_fill1, price_fill=best_price)
if not fully_filled:
assert qty_fill2 > 0
self._fill_order(order=order, qty_to_fill=qty_fill2, price_fill=second_best)
return
def _exec_limit_order(self, order: OrderCommon):
assert order.type == OrderType.Limit
bid = self.current_quote.bid_price
ask = self.current_quote.ask_price
order_side = order.side()
self._check_price_sanity(order)
violated_post_only = order_side > 0 and order.price >= ask or order_side < 0 and order.price <= bid
if violated_post_only:
order.status = OrderStatus.Canceled
order.status_msg = OrderCancelReason.cross_during_post_only
self._exec_order_cancels([order])
return
order.status = OrderStatus.New
self.active_orders[order.client_id] = order
order._made_spread = order_side > 0 and order.price > bid or order_side < 0 and order.price < ask
def _check_price_sanity(self, order: OrderCommon):
if not self.is_price_sane(order.price, order.symbol):
raise ValueError('Order with invalid price. {}'.format(order))
@staticmethod
def is_price_sane(price: float, symbol: Symbol = Symbol.XBTUSD):
if symbol == Symbol.XBTUSD:
two = price + price
return abs(int(two) - two) < 2e-10
# TODO: add sanity check for other symbols
return True
def _process_trade(self, trade: pd.Series):
# this method fills against limit orders
# trade expected fields: ['timestamp', 'symbol', 'side', 'price', 'size', 'tickDirection']
side_str = trade['side']
trade_side = +1 if side_str[0] == 'B' else -1
trade_price = trade['price']
trade_qty = trade['size'] # unsigned
trade_sym = Symbol[trade['symbol']]
# This price is the price that would happen if we could insert the orders price in the real book
sim_price = trade_price
for order in self.active_orders.values(): # type: OrderCommon
if order.type != OrderType.Limit:
continue
assert order.is_open()
order_side = order.side()
cross = order_side != trade_side and trade_price * trade_side >= order.price * trade_side
if cross:
if order._made_spread:
# _made_spread means that the order were posted at a better price than bid/ask
# liquidity from order has higher priority than the liquidity in the book
fill_qty = min(trade_qty, order.leaves_qty)
else:
if order_side > 0:
book_price = self.current_quote.bid_price
book_size = self.current_quote.bid_size
else:
book_price = self.current_quote.ask_price
book_size = self.current_quote.ask_size
is_quote_better_price = book_price * order_side >= order.price * order_side
if is_quote_better_price:
fill_qty = min(max(trade_qty - book_size, 0), order.leaves_qty)
else:
fill_qty = min(trade_qty, order.leaves_qty)
if fill_qty > 0:
self._fill_order(order=order,
qty_to_fill=fill_qty,
price_fill=order.price)
if trade_side > 0:
sim_price = min(sim_price, order.price)
else:
sim_price = max(sim_price, order.price)
self.active_orders = {o.client_id: o for o in self.active_orders.values() if o.is_open()}
for tac in self.tactics_map.values():
if tac.get_symbol() != trade_sym:
continue
trade = Trade(symbol=trade_sym,
timestamp=self.current_timestamp,
side=+1 if trade['side'][0] == 'B' else -1,
price=sim_price,
size=trade_qty,
tick_direction=TICK_DIRECTION[trade['tickDirection']])
self._queue_append(self.current_timestamp + WS_DELAY, tac.handle_trade, trade)
return
def _process_quote(self, quote: pd.Series):
# quote expected columns: ['timestamp', 'symbol', 'bidSize', 'bidPrice', 'askPrice', 'askSize']
bid = quote['bidPrice']
ask = quote['askPrice']
symbol = Symbol[quote['symbol']]
if len(self.active_orders) > 0:
for order in self.active_orders.values(): # type: OrderCommon
if order.type == OrderType.Limit:
order_side = order.side()
if (order_side > 0 and ask <= order.price) or (order_side < 0 and bid >= order.price):
self._fill_order(order=order,
qty_to_fill=order.leaves_qty,
price_fill=order.price)
self.active_orders = {o.client_id: o for o in self.active_orders.values() if o.is_open()}
for tac in self.tactics_map.values():
if tac.get_symbol() != symbol:
continue
q = Quote(symbol=symbol,
timestamp=self.current_time(),
bid_size=quote['bidSize'],
bid_price=quote['bidPrice'],
ask_size=quote['askSize'],
ask_price=quote['askPrice'])
self._queue_append(self.current_timestamp + WS_DELAY, tac.handle_quote, q)
return
def _process_ohlcv(self, ohlcv: pd.Series):
# ohlcv expect columns: timestamp, symbol, open, high, low, close, size
ohlcv_view = self.get_candles1m()
for tactic in self.tactics_map.values():
method = tactic.handle_1m_candles
self._queue_append(self.current_timestamp + WS_DELAY, method, ohlcv_view)
def _fill_order(self, order: OrderCommon, qty_to_fill, price_fill, fee=0.):
assert price_fill >= 0, "Price has to be positive"
assert abs(order.leaves_qty) >= 1, "Order with leaves_qty < 1 should be closed"
assert abs(order.signed_qty) >= 1, "Order signed_qty should not be less than 1"
assert abs(qty_to_fill) >= 1, "Can not fill order with qty less than 1"
fully_filled = order.fill(qty_to_fill)
side = order.side()
fill = Fill(symbol=order.symbol,
side=order.side_str(),
qty_filled=qty_to_fill,
price_fill=price_fill,
fill_time=self.current_timestamp,
fill_type=FillType.complete if fully_filled else FillType.partial,
order_id=order.client_id)
self._log_fill(fill)
self.volume[order.symbol] += abs(qty_to_fill) * price_fill
self.n_fills[order.symbol] += 1
position = self.positions[order.symbol]
position.update(signed_qty=qty_to_fill * side,
price=price_fill,
leverage=self.leverage[order.symbol],
current_timestamp=self.current_timestamp,
fee=fee)
method = self._get_tactic(order.client_id).handle_fill
self._queue_append(self.current_timestamp + WS_DELAY, method, fill)
return fully_filled
def get_candles1m(self) -> pd.DataFrame:
return self.ohlcv.iloc[:max(self.ohlcv_idx, 1)]
def get_opened_orders(self, symbol: Symbol, client_id_prefix: str) -> OrderContainerType:
return {o.client_id: o for o in self.active_orders.values()
if o.symbol == symbol and client_id_prefix == o.client_id[0:len(client_id_prefix)]}
def send_orders(self, orders: List[OrderCommon]):
for o in orders:
if o.status != OrderStatus.Pending:
raise ValueError("Sending order with status different from Pending: {}".format(o))
if abs(o.signed_qty) < 1:
raise ValueError("Order abs(quantity) has to be greater than 1, found {}".format(o.signed_qty))
self.n_orders[o.symbol] += 1
self._queue_append(self.current_timestamp + ORDER_TO_FILL_DELAY, self._send_orders_impl, orders)
def _send_orders_impl(self, orders: List[OrderCommon]):
for o in orders:
o.time_posted = self.current_timestamp
if o.type == OrderType.Market:
self._exec_market_order(o)
elif o.type == OrderType.Limit:
self._exec_limit_order(o)
else:
raise NotImplementedError()
self._log_order(o)
pass
def cancel_orders(self, orders: Union[OrderContainerType, List[OrderCommon], List[str]]):
self._queue_append(self.current_timestamp + CANCEL_DELAY, self._cancel_orders_impl, orders)
def cancel_all_orders(self, symbol: Symbol):
orders = [o for o in self.active_orders.values() if o.symbol == symbol]
if orders:
self._queue_append(self.current_timestamp + CANCEL_DELAY, self._cancel_orders_impl, orders)
def close_position(self, symbol: Symbol):
# TODO: it should also close limit orders on the same side as the position
pos = self.positions[symbol]
oid = self.liquidator_tactic.gen_order_id().split('_')
oid = oid[0] + '_closing_' + oid[1]
if pos.is_open:
self.send_orders([OrderCommon(symbol=symbol,
type=OrderType.Market,
client_id=oid,
signed_qty=-pos.signed_qty)])
def _cancel_orders_impl(self, orders: Union[OrderContainerType, List[OrderCommon], List[str]]):
# The reason to split this function from cancel_orders is to simulate the delay in the cancels
ids = get_orders_id(orders)
for i in ids:
order = self.active_orders.get(i)
if order:
order.status = OrderStatus.Canceled
order.status_msg = OrderCancelReason.requested_by_user
self._exec_order_cancels([order])
else:
# raise AttributeError("Invalid order id {}. Opened orders are: {}".format(i, [k for k in self.active_orders]))
pass
def current_time(self) -> pd.Timestamp:
return self.current_timestamp
def get_quote(self, symbol: Symbol) -> Quote:
return self.current_quote
def get_position(self, symbol: Symbol) -> PositionInterface:
return self.positions[symbol]
def get_pnl_history(self, symbol: Symbol) -> List[float]:
return self.pnl_history[symbol]
def set_leverage(self, symbol: Symbol, leverage: float):
if not (0.01 <= leverage <= 100.):
raise ValueError("Invalid leverage {}, allowed range is 0.01 <= leverage <= 100".format(leverage))
self.leverage[symbol] = leverage
def get_balance_xbt(self) -> float:
return self.xbt_balance
def is_open(self):
return not self.finished
def print_progress(self, current_ts: pd.Timestamp):
progress = min(1.0, (current_ts - self.begin_timestamp) / (self.end_timestamp - self.begin_timestamp))
if progress < 1.:
sys.stdout.write("progress: %.4f%% \r" % (100 * progress))
else:
print("progress: 100% ")
sys.stdout.flush()
def read_timeseries(filename: str, cols: list, begin: pd.Timestamp, end: pd.Timestamp, end_inclusive: bool = False):
table = pd.read_csv(filename)
table.set_index('timestamp', inplace=True)
table = table[cols]
table.index = pd.DatetimeIndex(table.index)
if end is None:
if end_inclusive is True:
raise ValueError("If 'end_inclusive' is set, 'end' must be specified (not None)")
else:
end_inclusive = True
md_range = (table.index[0], table.index[-1])
begin = begin if begin else table.index[0]
end = end if end else table.index[-1]
if end_inclusive:
table = table[(table.index >= begin) & (table.index <= end)]
else:
table = table[(table.index >= begin) & (table.index < end)]
if len(table) == 0:
raise ValueError("Specified begin {} and end time {} produces empty market data (range {})".format(
begin, end, md_range
))
if abs(table.index[0] - begin) > pd.Timedelta('1 min'):
raise ValueError('Provided begin_timestamp is too early for the market data (more the 1 min of inactivity)')
if abs(table.index[-1] - end) > pd.Timedelta('1 min'):
raise ValueError('Provided end_timestamp is too early for the market data (more the 1 min of inactivity)')
return table
def read_market_data(args):
start = time.time()
ohlcv = read_timeseries(filename=args.ohlcv_file,
cols=['symbol', 'open', 'high', 'low', 'close', 'size'],
begin=args.begin,
end=args.end)
print('ohlcv : n_rows={}, time reading: {}s'.format(len(ohlcv), '%.2f' % (time.time() - start)))
start = time.time()
trades = read_timeseries(filename=args.trades_file,
cols=['symbol', 'side', 'price', 'size', 'tickDirection'],
begin=args.begin,
end=args.end)
print('trades: n_rows={}, time reading: {}s'.format(len(trades), '%.2f' % (time.time() - start)))
start = time.time()
quotes = read_timeseries(filename=args.quotes_file,
cols=['symbol', 'bidSize', 'bidPrice', 'askPrice', 'askSize'],
begin=args.begin,
end=args.end)
print('quotes: n_rows={}, time reading: {}s'.format(len(quotes), '%.2f' % (time.time() - start)))
return ohlcv, trades, quotes
def test_all():
pass
# @logged
def main(input_args=None):
# logging.basicConfig(
# filename='messages',
# filemode='w',
# level=logging.INFO,
# format='%(levelname)s:%(name)s:%(funcName)s:%(message)s '
# )
# main._log.info("starting SIM")
args = get_args(input_args)
ohlcv, trades, quotes = read_market_data(args)
tactics = [t() for t in args.tactics]
sim = SimExchangeBitMex(args.begin,
args.end,
initial_balance=1,
ohlcv=ohlcv,
trades=trades,
quotes=quotes,
log_dir=args.log_dir,
tactics=tactics,
tactic_prefs=args.pref)
sim.run_sim()
# needed for show.py
pars_used_file = open(os.path.join(args.log_dir, 'parameters_used'), 'w')
pars_used_file.write(','.join(input_args if input_args else sys.argv))
pars_used_file.write("")
pars_used_file.close()
return 0
if __name__ == "__main__":
sys.exit(main())
|
mpl-2.0
|
shyamalschandra/scikit-learn
|
sklearn/feature_extraction/hashing.py
|
41
|
6175
|
# Author: Lars Buitinck <[email protected]>
# License: BSD 3 clause
import numbers
import numpy as np
import scipy.sparse as sp
from . import _hashing
from ..base import BaseEstimator, TransformerMixin
def _iteritems(d):
"""Like d.iteritems, but accepts any collections.Mapping."""
return d.iteritems() if hasattr(d, "iteritems") else d.items()
class FeatureHasher(BaseEstimator, TransformerMixin):
"""Implements feature hashing, aka the hashing trick.
This class turns sequences of symbolic feature names (strings) into
scipy.sparse matrices, using a hash function to compute the matrix column
corresponding to a name. The hash function employed is the signed 32-bit
version of Murmurhash3.
Feature names of type byte string are used as-is. Unicode strings are
converted to UTF-8 first, but no Unicode normalization is done.
Feature values must be (finite) numbers.
This class is a low-memory alternative to DictVectorizer and
CountVectorizer, intended for large-scale (online) learning and situations
where memory is tight, e.g. when running prediction code on embedded
devices.
Read more in the :ref:`User Guide <feature_hashing>`.
Parameters
----------
n_features : integer, optional
The number of features (columns) in the output matrices. Small numbers
of features are likely to cause hash collisions, but large numbers
will cause larger coefficient dimensions in linear learners.
dtype : numpy type, optional, default np.float64
The type of feature values. Passed to scipy.sparse matrix constructors
as the dtype argument. Do not set this to bool, np.boolean or any
unsigned integer type.
input_type : string, optional, default "dict"
Either "dict" (the default) to accept dictionaries over
(feature_name, value); "pair" to accept pairs of (feature_name, value);
or "string" to accept single strings.
feature_name should be a string, while value should be a number.
In the case of "string", a value of 1 is implied.
The feature_name is hashed to find the appropriate column for the
feature. The value's sign might be flipped in the output (but see
non_negative, below).
non_negative : boolean, optional, default False
Whether output matrices should contain non-negative values only;
effectively calls abs on the matrix prior to returning it.
When True, output values can be interpreted as frequencies.
When False, output values will have expected value zero.
Examples
--------
>>> from sklearn.feature_extraction import FeatureHasher
>>> h = FeatureHasher(n_features=10)
>>> D = [{'dog': 1, 'cat':2, 'elephant':4},{'dog': 2, 'run': 5}]
>>> f = h.transform(D)
>>> f.toarray()
array([[ 0., 0., -4., -1., 0., 0., 0., 0., 0., 2.],
[ 0., 0., 0., -2., -5., 0., 0., 0., 0., 0.]])
See also
--------
DictVectorizer : vectorizes string-valued features using a hash table.
sklearn.preprocessing.OneHotEncoder : handles nominal/categorical features
encoded as columns of integers.
"""
def __init__(self, n_features=(2 ** 20), input_type="dict",
dtype=np.float64, non_negative=False):
self._validate_params(n_features, input_type)
self.dtype = dtype
self.input_type = input_type
self.n_features = n_features
self.non_negative = non_negative
@staticmethod
def _validate_params(n_features, input_type):
# strangely, np.int16 instances are not instances of Integral,
# while np.int64 instances are...
if not isinstance(n_features, (numbers.Integral, np.integer)):
raise TypeError("n_features must be integral, got %r (%s)."
% (n_features, type(n_features)))
elif n_features < 1 or n_features >= 2 ** 31:
raise ValueError("Invalid number of features (%d)." % n_features)
if input_type not in ("dict", "pair", "string"):
raise ValueError("input_type must be 'dict', 'pair' or 'string',"
" got %r." % input_type)
def fit(self, X=None, y=None):
"""No-op.
This method doesn't do anything. It exists purely for compatibility
with the scikit-learn transformer API.
Returns
-------
self : FeatureHasher
"""
# repeat input validation for grid search (which calls set_params)
self._validate_params(self.n_features, self.input_type)
return self
def transform(self, raw_X, y=None):
"""Transform a sequence of instances to a scipy.sparse matrix.
Parameters
----------
raw_X : iterable over iterable over raw features, length = n_samples
Samples. Each sample must be iterable an (e.g., a list or tuple)
containing/generating feature names (and optionally values, see
the input_type constructor argument) which will be hashed.
raw_X need not support the len function, so it can be the result
of a generator; n_samples is determined on the fly.
y : (ignored)
Returns
-------
X : scipy.sparse matrix, shape = (n_samples, self.n_features)
Feature matrix, for use with estimators or further transformers.
"""
raw_X = iter(raw_X)
if self.input_type == "dict":
raw_X = (_iteritems(d) for d in raw_X)
elif self.input_type == "string":
raw_X = (((f, 1) for f in x) for x in raw_X)
indices, indptr, values = \
_hashing.transform(raw_X, self.n_features, self.dtype)
n_samples = indptr.shape[0] - 1
if n_samples == 0:
raise ValueError("Cannot vectorize empty sequence.")
X = sp.csr_matrix((values, indices, indptr), dtype=self.dtype,
shape=(n_samples, self.n_features))
X.sum_duplicates() # also sorts the indices
if self.non_negative:
np.abs(X.data, X.data)
return X
|
bsd-3-clause
|
cbertinato/pandas
|
pandas/tests/reductions/test_stat_reductions.py
|
1
|
9418
|
"""
Tests for statistical reductions of 2nd moment or higher: var, skew, kurt, ...
"""
import inspect
import numpy as np
import pytest
import pandas.util._test_decorators as td
import pandas as pd
from pandas import DataFrame, Series
from pandas.core.arrays import DatetimeArray, PeriodArray, TimedeltaArray
import pandas.util.testing as tm
class TestDatetimeLikeStatReductions:
@pytest.mark.parametrize('box', [Series, pd.Index, DatetimeArray])
def test_dt64_mean(self, tz_naive_fixture, box):
tz = tz_naive_fixture
dti = pd.date_range('2001-01-01', periods=11, tz=tz)
# shuffle so that we are not just working with monotone-increasing
dti = dti.take([4, 1, 3, 10, 9, 7, 8, 5, 0, 2, 6])
dtarr = dti._data
obj = box(dtarr)
assert obj.mean() == pd.Timestamp('2001-01-06', tz=tz)
assert obj.mean(skipna=False) == pd.Timestamp('2001-01-06', tz=tz)
# dtarr[-2] will be the first date 2001-01-1
dtarr[-2] = pd.NaT
obj = box(dtarr)
assert obj.mean() == pd.Timestamp('2001-01-06 07:12:00', tz=tz)
assert obj.mean(skipna=False) is pd.NaT
@pytest.mark.parametrize('box', [Series, pd.Index, PeriodArray])
def test_period_mean(self, box):
# GH#24757
dti = pd.date_range('2001-01-01', periods=11)
# shuffle so that we are not just working with monotone-increasing
dti = dti.take([4, 1, 3, 10, 9, 7, 8, 5, 0, 2, 6])
# use hourly frequency to avoid rounding errors in expected results
# TODO: flesh this out with different frequencies
parr = dti._data.to_period('H')
obj = box(parr)
with pytest.raises(TypeError, match="ambiguous"):
obj.mean()
with pytest.raises(TypeError, match="ambiguous"):
obj.mean(skipna=True)
# parr[-2] will be the first date 2001-01-1
parr[-2] = pd.NaT
with pytest.raises(TypeError, match="ambiguous"):
obj.mean()
with pytest.raises(TypeError, match="ambiguous"):
obj.mean(skipna=True)
@pytest.mark.parametrize('box', [Series, pd.Index, TimedeltaArray])
def test_td64_mean(self, box):
tdi = pd.TimedeltaIndex([0, 3, -2, -7, 1, 2, -1, 3, 5, -2, 4],
unit='D')
tdarr = tdi._data
obj = box(tdarr)
result = obj.mean()
expected = np.array(tdarr).mean()
assert result == expected
tdarr[0] = pd.NaT
assert obj.mean(skipna=False) is pd.NaT
result2 = obj.mean(skipna=True)
assert result2 == tdi[1:].mean()
# exact equality fails by 1 nanosecond
assert result2.round('us') == (result * 11. / 10).round('us')
class TestSeriesStatReductions:
# Note: the name TestSeriesStatReductions indicates these tests
# were moved from a series-specific test file, _not_ that these tests are
# intended long-term to be series-specific
def _check_stat_op(self, name, alternate, string_series_,
check_objects=False, check_allna=False):
with pd.option_context('use_bottleneck', False):
f = getattr(Series, name)
# add some NaNs
string_series_[5:15] = np.NaN
# mean, idxmax, idxmin, min, and max are valid for dates
if name not in ['max', 'min', 'mean']:
ds = Series(pd.date_range('1/1/2001', periods=10))
with pytest.raises(TypeError):
f(ds)
# skipna or no
assert pd.notna(f(string_series_))
assert pd.isna(f(string_series_, skipna=False))
# check the result is correct
nona = string_series_.dropna()
tm.assert_almost_equal(f(nona), alternate(nona.values))
tm.assert_almost_equal(f(string_series_), alternate(nona.values))
allna = string_series_ * np.nan
if check_allna:
assert np.isnan(f(allna))
# dtype=object with None, it works!
s = Series([1, 2, 3, None, 5])
f(s)
# GH#2888
items = [0]
items.extend(range(2 ** 40, 2 ** 40 + 1000))
s = Series(items, dtype='int64')
tm.assert_almost_equal(float(f(s)), float(alternate(s.values)))
# check date range
if check_objects:
s = Series(pd.bdate_range('1/1/2000', periods=10))
res = f(s)
exp = alternate(s)
assert res == exp
# check on string data
if name not in ['sum', 'min', 'max']:
with pytest.raises(TypeError):
f(Series(list('abc')))
# Invalid axis.
with pytest.raises(ValueError):
f(string_series_, axis=1)
# Unimplemented numeric_only parameter.
if 'numeric_only' in inspect.getfullargspec(f).args:
with pytest.raises(NotImplementedError, match=name):
f(string_series_, numeric_only=True)
def test_sum(self):
string_series = tm.makeStringSeries().rename('series')
self._check_stat_op('sum', np.sum, string_series, check_allna=False)
def test_mean(self):
string_series = tm.makeStringSeries().rename('series')
self._check_stat_op('mean', np.mean, string_series)
def test_median(self):
string_series = tm.makeStringSeries().rename('series')
self._check_stat_op('median', np.median, string_series)
# test with integers, test failure
int_ts = Series(np.ones(10, dtype=int), index=range(10))
tm.assert_almost_equal(np.median(int_ts), int_ts.median())
def test_prod(self):
string_series = tm.makeStringSeries().rename('series')
self._check_stat_op('prod', np.prod, string_series)
def test_min(self):
string_series = tm.makeStringSeries().rename('series')
self._check_stat_op('min', np.min, string_series, check_objects=True)
def test_max(self):
string_series = tm.makeStringSeries().rename('series')
self._check_stat_op('max', np.max, string_series, check_objects=True)
def test_var_std(self):
string_series = tm.makeStringSeries().rename('series')
datetime_series = tm.makeTimeSeries().rename('ts')
alt = lambda x: np.std(x, ddof=1)
self._check_stat_op('std', alt, string_series)
alt = lambda x: np.var(x, ddof=1)
self._check_stat_op('var', alt, string_series)
result = datetime_series.std(ddof=4)
expected = np.std(datetime_series.values, ddof=4)
tm.assert_almost_equal(result, expected)
result = datetime_series.var(ddof=4)
expected = np.var(datetime_series.values, ddof=4)
tm.assert_almost_equal(result, expected)
# 1 - element series with ddof=1
s = datetime_series.iloc[[0]]
result = s.var(ddof=1)
assert pd.isna(result)
result = s.std(ddof=1)
assert pd.isna(result)
def test_sem(self):
string_series = tm.makeStringSeries().rename('series')
datetime_series = tm.makeTimeSeries().rename('ts')
alt = lambda x: np.std(x, ddof=1) / np.sqrt(len(x))
self._check_stat_op('sem', alt, string_series)
result = datetime_series.sem(ddof=4)
expected = np.std(datetime_series.values,
ddof=4) / np.sqrt(len(datetime_series.values))
tm.assert_almost_equal(result, expected)
# 1 - element series with ddof=1
s = datetime_series.iloc[[0]]
result = s.sem(ddof=1)
assert pd.isna(result)
@td.skip_if_no_scipy
def test_skew(self):
from scipy.stats import skew
string_series = tm.makeStringSeries().rename('series')
alt = lambda x: skew(x, bias=False)
self._check_stat_op('skew', alt, string_series)
# test corner cases, skew() returns NaN unless there's at least 3
# values
min_N = 3
for i in range(1, min_N + 1):
s = Series(np.ones(i))
df = DataFrame(np.ones((i, i)))
if i < min_N:
assert np.isnan(s.skew())
assert np.isnan(df.skew()).all()
else:
assert 0 == s.skew()
assert (df.skew() == 0).all()
@td.skip_if_no_scipy
def test_kurt(self):
from scipy.stats import kurtosis
string_series = tm.makeStringSeries().rename('series')
alt = lambda x: kurtosis(x, bias=False)
self._check_stat_op('kurt', alt, string_series)
index = pd.MultiIndex(
levels=[['bar'], ['one', 'two', 'three'], [0, 1]],
codes=[[0, 0, 0, 0, 0, 0], [0, 1, 2, 0, 1, 2], [0, 1, 0, 1, 0, 1]]
)
s = Series(np.random.randn(6), index=index)
tm.assert_almost_equal(s.kurt(), s.kurt(level=0)['bar'])
# test corner cases, kurt() returns NaN unless there's at least 4
# values
min_N = 4
for i in range(1, min_N + 1):
s = Series(np.ones(i))
df = DataFrame(np.ones((i, i)))
if i < min_N:
assert np.isnan(s.kurt())
assert np.isnan(df.kurt()).all()
else:
assert 0 == s.kurt()
assert (df.kurt() == 0).all()
|
bsd-3-clause
|
jalexvig/tensorflow
|
tensorflow/examples/learn/text_classification_character_rnn.py
|
38
|
4036
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Example of recurrent neural networks over characters for DBpedia dataset.
This model is similar to one described in this paper:
"Character-level Convolutional Networks for Text Classification"
http://arxiv.org/abs/1509.01626
and is somewhat alternative to the Lua code from here:
https://github.com/zhangxiangxiao/Crepe
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import argparse
import sys
import numpy as np
import pandas
import tensorflow as tf
FLAGS = None
MAX_DOCUMENT_LENGTH = 100
HIDDEN_SIZE = 20
MAX_LABEL = 15
CHARS_FEATURE = 'chars' # Name of the input character feature.
def char_rnn_model(features, labels, mode):
"""Character level recurrent neural network model to predict classes."""
byte_vectors = tf.one_hot(features[CHARS_FEATURE], 256, 1., 0.)
byte_list = tf.unstack(byte_vectors, axis=1)
cell = tf.nn.rnn_cell.GRUCell(HIDDEN_SIZE)
_, encoding = tf.nn.static_rnn(cell, byte_list, dtype=tf.float32)
logits = tf.layers.dense(encoding, MAX_LABEL, activation=None)
predicted_classes = tf.argmax(logits, 1)
if mode == tf.estimator.ModeKeys.PREDICT:
return tf.estimator.EstimatorSpec(
mode=mode,
predictions={
'class': predicted_classes,
'prob': tf.nn.softmax(logits)
})
loss = tf.losses.sparse_softmax_cross_entropy(labels=labels, logits=logits)
if mode == tf.estimator.ModeKeys.TRAIN:
optimizer = tf.train.AdamOptimizer(learning_rate=0.01)
train_op = optimizer.minimize(loss, global_step=tf.train.get_global_step())
return tf.estimator.EstimatorSpec(mode, loss=loss, train_op=train_op)
eval_metric_ops = {
'accuracy': tf.metrics.accuracy(
labels=labels, predictions=predicted_classes)
}
return tf.estimator.EstimatorSpec(
mode=mode, loss=loss, eval_metric_ops=eval_metric_ops)
def main(unused_argv):
# Prepare training and testing data
dbpedia = tf.contrib.learn.datasets.load_dataset(
'dbpedia', test_with_fake_data=FLAGS.test_with_fake_data)
x_train = pandas.DataFrame(dbpedia.train.data)[1]
y_train = pandas.Series(dbpedia.train.target)
x_test = pandas.DataFrame(dbpedia.test.data)[1]
y_test = pandas.Series(dbpedia.test.target)
# Process vocabulary
char_processor = tf.contrib.learn.preprocessing.ByteProcessor(
MAX_DOCUMENT_LENGTH)
x_train = np.array(list(char_processor.fit_transform(x_train)))
x_test = np.array(list(char_processor.transform(x_test)))
# Build model
classifier = tf.estimator.Estimator(model_fn=char_rnn_model)
# Train.
train_input_fn = tf.estimator.inputs.numpy_input_fn(
x={CHARS_FEATURE: x_train},
y=y_train,
batch_size=128,
num_epochs=None,
shuffle=True)
classifier.train(input_fn=train_input_fn, steps=100)
# Eval.
test_input_fn = tf.estimator.inputs.numpy_input_fn(
x={CHARS_FEATURE: x_test},
y=y_test,
num_epochs=1,
shuffle=False)
scores = classifier.evaluate(input_fn=test_input_fn)
print('Accuracy: {0:f}'.format(scores['accuracy']))
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument(
'--test_with_fake_data',
default=False,
help='Test the example code with fake data.',
action='store_true')
FLAGS, unparsed = parser.parse_known_args()
tf.app.run(main=main, argv=[sys.argv[0]] + unparsed)
|
apache-2.0
|
zonca/healpy
|
healpy/__init__.py
|
1
|
3423
|
#
# This file is part of Healpy.
#
# Healpy is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# Healpy is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Healpy; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
#
# For more information about Healpy, see http://code.google.com/p/healpy
#
"""HealPy is a package to manipulate Healpix maps (ang2pix, pix2ang) and
compute spherical harmonics tranforms on them.
"""
import warnings
try:
ImportWarning
except NameError:
class ImportWarning(Warning):
pass
from version import __version__
from pixelfunc import (ma, mask_good, mask_bad,
ang2pix, pix2ang,
pix2vec, vec2pix,
vec2ang, ang2vec,
nside2npix, npix2nside,
isnsideok, isnpixok,
ring2nest, nest2ring, reorder,
get_neighbours, get_all_neighbours, max_pixrad, get_interp_val,
fit_dipole, fit_monopole,
remove_dipole, remove_monopole,
get_nside, maptype, ud_grade, nside2resol, nside2pixarea,
get_map_size)
from sphtfunc import (anafast, map2alm,
alm2map, Alm, synalm, synfast,
smoothing, smoothalm, almxfl, alm2cl,
pixwin, alm2map_der1, gauss_beam)
from _sphtools import rotate_alm
try:
from _query_disc import query_disc, query_strip, query_polygon, boundaries
except ImportError:
warnings.warn('Warning: cannot import query disc module')
try:
from _pixelfunc import ringinfo, pix2ring
except ImportError:
warnings.warn('Warning: cannot import pixelfunc module')
from rotator import Rotator, vec2dir, dir2vec
try:
from _healpy_pixel_lib import UNSEEN
except ImportError:
warnings.warn('Warning: cannot import pixel lib module')
try:
from pshyt import job
from pshyt import *
except ImportError:
warnings.warn("Warning: Cannot import pshyt module)",
category=ImportWarning)
try:
from visufunc import (mollview,graticule,delgraticules,gnomview,
projplot,projscatter, projtext, cartview, orthview)
from zoomtool import mollzoom,set_g_clim
if visufunc.matplotlib.__version__ == '0.98,3':
warnings.warn("Bug in matplotlib 0.98.3 prevents mollview from working\n"+
"You should upgrade to matplotlib 0.98.4 or above",
category=ImportWarning)
except ImportError:
warnings.warn("Warning: Cannot import visualisation tools (needs matplotlib)",
category=ImportWarning)
try:
from fitsfunc import write_map,read_map,mrdfits,mwrfits,read_alm,write_alm,write_cl,read_cl
except:
warnings.warn("Warning: Cannot import fits i/o tools (needs pyfits)",
category=ImportWarning)
|
gpl-2.0
|
cpcloud/vbench
|
setup.py
|
2
|
1250
|
#/usr/bin/env python
from numpy.distutils.misc_util import Configuration
from numpy.distutils.core import setup
DESCRIPTION = "Performance benchmarking and monitoring tool"
LONG_DESCRIPTION = """
Performance benchmarking and monitoring tool
"""
REQUIRES = ['sqlalchemy', 'pandas']
DISTNAME = 'vbench'
LICENSE = 'BSD'
AUTHOR = "Wes McKinney"
AUTHOR_EMAIL = "[email protected]"
URL = "https://github.com/wesm/vbench"
CLASSIFIERS = [
'Development Status :: 2 - Pre-Alpha',
'Environment :: Console',
'Operating System :: OS Independent',
'Intended Audience :: Science/Research',
'Programming Language :: Python',
'Topic :: Scientific/Engineering',
]
MAJOR = 0
MINOR = 1
ISRELEASED = False
VERSION = '%d.%d' % (MAJOR, MINOR)
FULLVERSION = VERSION
if not ISRELEASED:
FULLVERSION += '.beta'
if __name__ == '__main__':
setup(name=DISTNAME,
version=VERSION,
author=AUTHOR,
author_email=AUTHOR_EMAIL,
packages=['vbench', 'vbench.tests'],
package_data={'vbench' : ['scripts/*.py']},
description=DESCRIPTION,
license=LICENSE,
url=URL,
long_description=LONG_DESCRIPTION,
classifiers=CLASSIFIERS,
platforms='any')
|
mit
|
sinhrks/scikit-learn
|
sklearn/decomposition/__init__.py
|
76
|
1490
|
"""
The :mod:`sklearn.decomposition` module includes matrix decomposition
algorithms, including among others PCA, NMF or ICA. Most of the algorithms of
this module can be regarded as dimensionality reduction techniques.
"""
from .nmf import NMF, ProjectedGradientNMF, non_negative_factorization
from .pca import PCA, RandomizedPCA
from .incremental_pca import IncrementalPCA
from .kernel_pca import KernelPCA
from .sparse_pca import SparsePCA, MiniBatchSparsePCA
from .truncated_svd import TruncatedSVD
from .fastica_ import FastICA, fastica
from .dict_learning import (dict_learning, dict_learning_online, sparse_encode,
DictionaryLearning, MiniBatchDictionaryLearning,
SparseCoder)
from .factor_analysis import FactorAnalysis
from ..utils.extmath import randomized_svd
from .online_lda import LatentDirichletAllocation
__all__ = ['DictionaryLearning',
'FastICA',
'IncrementalPCA',
'KernelPCA',
'MiniBatchDictionaryLearning',
'MiniBatchSparsePCA',
'NMF',
'PCA',
'ProjectedGradientNMF',
'RandomizedPCA',
'SparseCoder',
'SparsePCA',
'dict_learning',
'dict_learning_online',
'fastica',
'non_negative_factorization',
'randomized_svd',
'sparse_encode',
'FactorAnalysis',
'TruncatedSVD',
'LatentDirichletAllocation']
|
bsd-3-clause
|
campagnola/acq4
|
acq4/pyqtgraph/exporters/Matplotlib.py
|
4
|
4817
|
from ..Qt import QtGui, QtCore
from .Exporter import Exporter
from .. import PlotItem
from .. import functions as fn
__all__ = ['MatplotlibExporter']
"""
It is helpful when using the matplotlib Exporter if your
.matplotlib/matplotlibrc file is configured appropriately.
The following are suggested for getting usable PDF output that
can be edited in Illustrator, etc.
backend : Qt4Agg
text.usetex : True # Assumes you have a findable LaTeX installation
interactive : False
font.family : sans-serif
font.sans-serif : 'Arial' # (make first in list)
mathtext.default : sf
figure.facecolor : white # personal preference
# next setting allows pdf font to be readable in Adobe Illustrator
pdf.fonttype : 42 # set fonts to TrueType (otherwise it will be 3
# and the text will be vectorized.
text.dvipnghack : True # primarily to clean up font appearance on Mac
The advantage is that there is less to do to get an exported file cleaned and ready for
publication. Fonts are not vectorized (outlined), and window colors are white.
"""
class MatplotlibExporter(Exporter):
Name = "Matplotlib Window"
windows = []
def __init__(self, item):
Exporter.__init__(self, item)
def parameters(self):
return None
def cleanAxes(self, axl):
if type(axl) is not list:
axl = [axl]
for ax in axl:
if ax is None:
continue
for loc, spine in ax.spines.items():
if loc in ['left', 'bottom']:
pass
elif loc in ['right', 'top']:
spine.set_color('none')
# do not draw the spine
else:
raise ValueError('Unknown spine location: %s' % loc)
# turn off ticks when there is no spine
ax.xaxis.set_ticks_position('bottom')
def export(self, fileName=None):
if isinstance(self.item, PlotItem):
mpw = MatplotlibWindow()
MatplotlibExporter.windows.append(mpw)
stdFont = 'Arial'
fig = mpw.getFigure()
# get labels from the graphic item
xlabel = self.item.axes['bottom']['item'].label.toPlainText()
ylabel = self.item.axes['left']['item'].label.toPlainText()
title = self.item.titleLabel.text
ax = fig.add_subplot(111, title=title)
ax.clear()
self.cleanAxes(ax)
#ax.grid(True)
for item in self.item.curves:
x, y = item.getData()
opts = item.opts
pen = fn.mkPen(opts['pen'])
if pen.style() == QtCore.Qt.NoPen:
linestyle = ''
else:
linestyle = '-'
color = tuple([c/255. for c in fn.colorTuple(pen.color())])
symbol = opts['symbol']
if symbol == 't':
symbol = '^'
symbolPen = fn.mkPen(opts['symbolPen'])
symbolBrush = fn.mkBrush(opts['symbolBrush'])
markeredgecolor = tuple([c/255. for c in fn.colorTuple(symbolPen.color())])
markerfacecolor = tuple([c/255. for c in fn.colorTuple(symbolBrush.color())])
markersize = opts['symbolSize']
if opts['fillLevel'] is not None and opts['fillBrush'] is not None:
fillBrush = fn.mkBrush(opts['fillBrush'])
fillcolor = tuple([c/255. for c in fn.colorTuple(fillBrush.color())])
ax.fill_between(x=x, y1=y, y2=opts['fillLevel'], facecolor=fillcolor)
pl = ax.plot(x, y, marker=symbol, color=color, linewidth=pen.width(),
linestyle=linestyle, markeredgecolor=markeredgecolor, markerfacecolor=markerfacecolor,
markersize=markersize)
xr, yr = self.item.viewRange()
ax.set_xbound(*xr)
ax.set_ybound(*yr)
ax.set_xlabel(xlabel) # place the labels.
ax.set_ylabel(ylabel)
mpw.draw()
else:
raise Exception("Matplotlib export currently only works with plot items")
MatplotlibExporter.register()
class MatplotlibWindow(QtGui.QMainWindow):
def __init__(self):
from ..widgets import MatplotlibWidget
QtGui.QMainWindow.__init__(self)
self.mpl = MatplotlibWidget.MatplotlibWidget()
self.setCentralWidget(self.mpl)
self.show()
def __getattr__(self, attr):
return getattr(self.mpl, attr)
def closeEvent(self, ev):
MatplotlibExporter.windows.remove(self)
|
mit
|
wasade/qiime
|
scripts/make_bootstrapped_tree.py
|
1
|
2269
|
#!/usr/bin/env python
# File created on 09 Feb 2010
from __future__ import division
__author__ = "Justin Kuczynski"
__copyright__ = "Copyright 2011, The QIIME Project"
__credits__ = [
"Justin Kuczynski",
"Jesse Stombaugh",
"Jose Antonio Navas Molina"]
__license__ = "GPL"
__version__ = "1.8.0-dev"
__maintainer__ = "Justin Kuczynski"
__email__ = "[email protected]"
from matplotlib import use
use('Agg', warn=False)
import matplotlib
from qiime.util import make_option
from qiime.parse import parse_newick
from qiime.util import parse_command_line_parameters
from qiime.make_bootstrapped_tree import write_pdf_bootstrap_tree
from qiime.parse import parse_bootstrap_support
script_info = {}
script_info['brief_description'] = """Make bootstrapped tree"""
script_info['script_description'] = """This script takes a tree and bootstrap\
support file and creates a pdf, colored by bootstrap support."""
script_info['script_usage'] = []
script_info['script_usage'].append(("""Example:""",
"""In this example, the user supplies a tree file and a text file\
containing the jackknife support information, which results in a pdf file:""",
"""%prog -m master_tree.tre -s jackknife_support.txt -o jackknife_samples.pdf"""))
script_info[
'output_description'] = """The result of this script is a pdf file."""
script_info['required_options'] = [
make_option('-m', '--master_tree', type='existing_filepath',
help='This is the path to the master tree'),
make_option('-s', '--support', type='existing_filepath',
help='This is the path to the bootstrap support file'),
make_option('-o', '--output_file', type='new_filepath',
help="This is the filename where the output should be written.")
]
script_info['optional_options'] = []
script_info['version'] = __version__
def main():
option_parser, opts, args = parse_command_line_parameters(**script_info)
tree = parse_newick(open(opts.master_tree, 'U'))
support_file = open(opts.support)
bootstraps = parse_bootstrap_support(support_file)
support_file.close()
write_pdf_bootstrap_tree(tree, opts.output_file, bootstraps)
if __name__ == "__main__":
main()
|
gpl-2.0
|
raghavrv/scikit-learn
|
examples/linear_model/plot_logistic_multinomial.py
|
81
|
2525
|
"""
====================================================
Plot multinomial and One-vs-Rest Logistic Regression
====================================================
Plot decision surface of multinomial and One-vs-Rest Logistic Regression.
The hyperplanes corresponding to the three One-vs-Rest (OVR) classifiers
are represented by the dashed lines.
"""
print(__doc__)
# Authors: Tom Dupre la Tour <[email protected]>
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
from sklearn.datasets import make_blobs
from sklearn.linear_model import LogisticRegression
# make 3-class dataset for classification
centers = [[-5, 0], [0, 1.5], [5, -1]]
X, y = make_blobs(n_samples=1000, centers=centers, random_state=40)
transformation = [[0.4, 0.2], [-0.4, 1.2]]
X = np.dot(X, transformation)
for multi_class in ('multinomial', 'ovr'):
clf = LogisticRegression(solver='sag', max_iter=100, random_state=42,
multi_class=multi_class).fit(X, y)
# print the training scores
print("training score : %.3f (%s)" % (clf.score(X, y), multi_class))
# create a mesh to plot in
h = .02 # step size in the mesh
x_min, x_max = X[:, 0].min() - 1, X[:, 0].max() + 1
y_min, y_max = X[:, 1].min() - 1, X[:, 1].max() + 1
xx, yy = np.meshgrid(np.arange(x_min, x_max, h),
np.arange(y_min, y_max, h))
# Plot the decision boundary. For that, we will assign a color to each
# point in the mesh [x_min, x_max]x[y_min, y_max].
Z = clf.predict(np.c_[xx.ravel(), yy.ravel()])
# Put the result into a color plot
Z = Z.reshape(xx.shape)
plt.figure()
plt.contourf(xx, yy, Z, cmap=plt.cm.Paired)
plt.title("Decision surface of LogisticRegression (%s)" % multi_class)
plt.axis('tight')
# Plot also the training points
colors = "bry"
for i, color in zip(clf.classes_, colors):
idx = np.where(y == i)
plt.scatter(X[idx, 0], X[idx, 1], c=color, cmap=plt.cm.Paired,
edgecolor='black', s=20)
# Plot the three one-against-all classifiers
xmin, xmax = plt.xlim()
ymin, ymax = plt.ylim()
coef = clf.coef_
intercept = clf.intercept_
def plot_hyperplane(c, color):
def line(x0):
return (-(x0 * coef[c, 0]) - intercept[c]) / coef[c, 1]
plt.plot([xmin, xmax], [line(xmin), line(xmax)],
ls="--", color=color)
for i, color in zip(clf.classes_, colors):
plot_hyperplane(i, color)
plt.show()
|
bsd-3-clause
|
jablonskim/jupyweave
|
tests/test_settings_snippet_pattern.py
|
1
|
35350
|
from copy import deepcopy
from unittest import TestCase
from settings.group_names import GroupName
from jupyweave.exceptions import InvalidBoolValueError, TimeoutValueError
from jupyweave.exceptions import InvalidConfigurationError
from jupyweave.settings.snippet import Snippet
class TestSnippetPattern(TestCase):
EXAMPLE2 = """<!DOCTYPE html>
<default_settings lang="Python 3" />
<html>
<body>
<p>This is a paragraph.</p>
<snippet output="T" timeout="20000" echo="True" output_type="All" processor="highlight" lines="6:11,13">
%matplotlib inline
import matplotlib
import numpy as np
import matplotlib.pyplot as plt
def f(x):
return x**2
x = np.linspace(0, 3*np.pi, 500)
plt.plot(x, np.sin(f(x)))
plt.title('A simple chirp')
print('before')
plt.show()
print('after')
</snippet>
<p>This is a paragraph.</p>
<p>This is a paragraph.</p>
<p>This is a paragraph.</p>
<p>This is a paragraph.</p>
<div></div>
<snippet lang="Python 3" output="F" context="TestCtx2" allow_error="F" processor="highlight"
id="test_id1">
for i in range(5):
print(i)
j = 100
j
</snippet>
<p>This is a paragraph.</p>
<p><output
id="test_id1" /></p>
<p>This is a paragraph.</p>
<p>This is a paragraph.</p>
<b>ąłńżźćęóążź</b>
<snippet lang="SQL" lines="!1">
!sqlite:///:memory:;
CREATE TABLE test (id, aa, bb);
INSERT INTO test VALUES (1, 'test1', 'test2');
INSERT INTO test VALUES (2, 'test2', 'test2');
SELECT * FROM test;
SELECT 5;
DELETE FROM test WHERE id = 2;
SELECT * FROM test;
</snippet>
</body>
</html>"""
EXAMPLE1 = """Markdown example
================
<!!![Python 3]>
Testtesttest
------------
1. Test test
Te*s*t
<#[Python 3] echo=T output=T id=Abcde5>
for i in range(10):
print(i)
print('Test')
<@>
T**es**t
> qqq
> qqq
<$id=Abcde5>
Link [example](https://pl.wikipedia.org/wiki/Markdown)
Test `Test` Test
"""
EXAMPLE3 = """\documentclass{article}
\title{Test}
\date{07-05-2016}
\author{MJ}
\default_settings{lang[Python 3]}
\begin{document}
\maketitle
\pagenumbering{gobble}
\newpage
\pagenumbering{arabic}
\section{Test 1}
Test, test.
\subsection{Test test}
Test doc\\
\begin_code{echo[T], output[T], id[Test]}
for i in range(10):
print(i)
print('Test')
\end_code
Test, test, test\\
\snippet_output{id[Test]}
End
\end{document}"""
DATA1 = {
"begin": "<#{S}>",
"end": "<@>",
"output": "<${S}>",
"default_settings": "<!!!{S}>",
"settings": {
"language": "[{L}]",
"echo": "echo={E}",
"output": "output={O}",
"context": "context={C}",
"snippet_id": "id={I}",
"timeout": "timeout={T}",
"error": "allow_error={R}",
"output_type": "output_type={OT}",
"processor": "processor={P}",
"echo_lines": "lines={EL}",
"image_name": "img_name={IN}",
"font_size": "font_size={FS}",
"image_width": "img_w={IW}",
"image_height": "img_h={IH}",
"image_align": "img_align={IA}"
},
"patterns": {
"settings": "{S}",
"language": "{L}",
"echo": "{E}",
"output": "{O}",
"context": "{C}",
"snippet_id": "{I}",
"timeout": "{T}",
"error": "{R}",
"output_type": "{OT}",
"processor": "{P}",
"echo_lines": "{EL}",
"image_name": "{IN}",
"font_size": "{FS}",
"image_width": "{IW}",
"image_height": "{IH}",
"image_align": "{IA}"
}
}
DATA2 = {
"begin": "<snippet{S}>",
"end": "</snippet>",
"output": "<output{S}/>",
"default_settings": "<default_settings{S}/>",
"settings": {
"language": "lang=\"{L}\"",
"echo": "echo=\"{E}\"",
"output": "output=\"{O}\"",
"context": "context=\"{C}\"",
"snippet_id": "id=\"{I}\"",
"timeout": "timeout=\"{T}\"",
"error": "allow_error=\"{R}\"",
"output_type": "output_type=\"{OT}\"",
"processor": "processor=\"{P}\"",
"echo_lines": "lines=\"{EL}\"",
"image_name": "img_name=\"{IN}\"",
"font_size": "font_size=\"{FS}\"",
"image_width": "img_w=\"{IW}\"",
"image_height": "img_h=\"{IH}\"",
"image_align": "img_align=\"{IA}\""
},
"patterns": {
"settings": "{S}",
"language": "{L}",
"echo": "{E}",
"output": "{O}",
"context": "{C}",
"snippet_id": "{I}",
"timeout": "{T}",
"error": "{R}",
"output_type": "{OT}",
"processor": "{P}",
"echo_lines": "{EL}",
"image_name": "{IN}",
"font_size": "{FS}",
"image_width": "{IW}",
"image_height": "{IH}",
"image_align": "{IA}"
}
}
DATA3 = {
"begin": "\begin_code{@S}",
"end": "\end_code",
"output": "\snippet_output{@S}",
"default_settings": "\default_settings{@S}",
"settings": {
"language": "lang[@L]",
"echo": "echo[@E]",
"output": "output[@O]",
"context": "context[@C]",
"snippet_id": "id[@I]",
"timeout": "timeout[@T]",
"error": "allow_error[@R]",
"output_type": "output_type[@OT]",
"processor": "processor[@P]",
"echo_lines": "lines[@EL]",
"image_name": "img_name=[@IN]",
"font_size": "font_size=[@FS]",
"image_width": "img_w=[@IW]",
"image_height": "img_h=[@IH]",
"image_align": "img_align=[@IA]"
},
"patterns": {
"settings": "@S",
"language": "@L",
"echo": "@E",
"output": "@O",
"context": "@C",
"snippet_id": "@I",
"timeout": "@T",
"error": "@R",
"output_type": "@OT",
"processor": "@P",
"echo_lines": "@EL",
"image_name": "@IN",
"font_size": "@FS",
"image_width": "@IW",
"image_height": "@IH",
"image_align": "@IA"
}
}
def test_invalid_fields(self):
invalid_data = deepcopy(TestSnippetPattern.DATA1)
invalid_data['invalid'] = 'invalid'
with self.assertRaises(InvalidConfigurationError):
Snippet(invalid_data)
invalid_data = deepcopy(TestSnippetPattern.DATA1)
invalid_data['settings']['invalid'] = 'invalid'
with self.assertRaises(InvalidConfigurationError):
Snippet(invalid_data)
invalid_data = deepcopy(TestSnippetPattern.DATA1)
invalid_data['patterns']['invalid'] = 'invalid'
with self.assertRaises(InvalidConfigurationError):
Snippet(invalid_data)
def test_entry1(self):
snippet = Snippet(TestSnippetPattern.DATA1)
entry = snippet.pattern().entry()
entries = [x for x in entry.finditer(TestSnippetPattern.EXAMPLE1)]
self.assertEqual(2, len(entries))
entry = entries[0]
self.assertNotEqual(None, entry.group(GroupName.CODE_SNIPPET))
self.assertEqual(None, entry.group(GroupName.OUTPUT_SNIPPET))
self.assertNotEqual(None, entry.group(GroupName.CODE))
code = '''
for i in range(10):
print(i)
print('Test')
'''
self.assertEqual(code, entry.group(GroupName.CODE))
settings = '''[Python 3] echo=T output=T id=Abcde5'''
self.assertEqual(settings, entry.group(GroupName.CODE_SETTINGS))
entry = entries[1]
self.assertEqual(None, entry.group(GroupName.CODE_SNIPPET))
self.assertNotEqual(None, entry.group(GroupName.OUTPUT_SNIPPET))
self.assertEqual(None, entry.group(GroupName.CODE))
settings = '''id=Abcde5'''
self.assertEqual(settings, entry.group(GroupName.OUTPUT_SETTINGS))
def test_entry2(self):
snippet = Snippet(TestSnippetPattern.DATA2)
entry = snippet.pattern().entry()
entries = [x for x in entry.finditer(TestSnippetPattern.EXAMPLE2)]
self.assertEqual(4, len(entries))
entry = entries[0]
self.assertNotEqual(None, entry.group(GroupName.CODE_SNIPPET))
self.assertEqual(None, entry.group(GroupName.OUTPUT_SNIPPET))
self.assertNotEqual(None, entry.group(GroupName.CODE))
code = """
%matplotlib inline
import matplotlib
import numpy as np
import matplotlib.pyplot as plt
def f(x):
return x**2
x = np.linspace(0, 3*np.pi, 500)
plt.plot(x, np.sin(f(x)))
plt.title('A simple chirp')
print('before')
plt.show()
print('after')
"""
self.assertEqual(code, entry.group(GroupName.CODE))
settings = ''' output="T" timeout="20000" echo="True" output_type="All" processor="highlight" lines="6:11,13"'''
self.assertEqual(settings, entry.group(GroupName.CODE_SETTINGS))
entry = entries[1]
self.assertNotEqual(None, entry.group(GroupName.CODE_SNIPPET))
self.assertEqual(None, entry.group(GroupName.OUTPUT_SNIPPET))
self.assertNotEqual(None, entry.group(GroupName.CODE))
code = """
for i in range(5):
print(i)
j = 100
j
"""
self.assertEqual(code, entry.group(GroupName.CODE))
settings = ''' lang="Python 3" output="F" context="TestCtx2" allow_error="F" processor="highlight"
id="test_id1"'''
self.assertEqual(settings, entry.group(GroupName.CODE_SETTINGS))
entry = entries[2]
self.assertEqual(None, entry.group(GroupName.CODE_SNIPPET))
self.assertNotEqual(None, entry.group(GroupName.OUTPUT_SNIPPET))
self.assertEqual(None, entry.group(GroupName.CODE))
settings = '''
id="test_id1" '''
self.assertEqual(settings, entry.group(GroupName.OUTPUT_SETTINGS))
entry = entries[3]
self.assertNotEqual(None, entry.group(GroupName.CODE_SNIPPET))
self.assertEqual(None, entry.group(GroupName.OUTPUT_SNIPPET))
self.assertNotEqual(None, entry.group(GroupName.CODE))
code = """
!sqlite:///:memory:;
CREATE TABLE test (id, aa, bb);
INSERT INTO test VALUES (1, 'test1', 'test2');
INSERT INTO test VALUES (2, 'test2', 'test2');
SELECT * FROM test;
SELECT 5;
DELETE FROM test WHERE id = 2;
SELECT * FROM test;
"""
self.assertEqual(code, entry.group(GroupName.CODE))
settings = ''' lang="SQL" lines="!1"'''
self.assertEqual(settings, entry.group(GroupName.CODE_SETTINGS))
def test_entry3(self):
snippet = Snippet(TestSnippetPattern.DATA3)
entry = snippet.pattern().entry()
entries = [x for x in entry.finditer(TestSnippetPattern.EXAMPLE3)]
self.assertEqual(2, len(entries))
entry = entries[0]
self.assertNotEqual(None, entry.group(GroupName.CODE_SNIPPET))
self.assertEqual(None, entry.group(GroupName.OUTPUT_SNIPPET))
self.assertNotEqual(None, entry.group(GroupName.CODE))
code = '''
for i in range(10):
print(i)
print('Test')
'''
self.assertEqual(code, entry.group(GroupName.CODE))
settings = '''echo[T], output[T], id[Test]'''
self.assertEqual(settings, entry.group(GroupName.CODE_SETTINGS))
entry = entries[1]
self.assertEqual(None, entry.group(GroupName.CODE_SNIPPET))
self.assertNotEqual(None, entry.group(GroupName.OUTPUT_SNIPPET))
self.assertEqual(None, entry.group(GroupName.CODE))
settings = '''id[Test]'''
self.assertEqual(settings, entry.group(GroupName.OUTPUT_SETTINGS))
def test_default_settings1(self):
snippet = Snippet(TestSnippetPattern.DATA1)
entry = snippet.pattern().default_settings()
entries = [x for x in entry.finditer(TestSnippetPattern.EXAMPLE1)]
self.assertEqual(1, len(entries))
entry = entries[0]
self.assertNotEqual(None, entry.group(GroupName.DEFAULT_SETTINGS_SNIPPET))
settings = '''[Python 3]'''
self.assertEqual(settings, entry.group(GroupName.DEFAULT_SETTINGS))
def test_default_settings2(self):
snippet = Snippet(TestSnippetPattern.DATA2)
entry = snippet.pattern().default_settings()
entries = [x for x in entry.finditer(TestSnippetPattern.EXAMPLE2)]
self.assertEqual(1, len(entries))
entry = entries[0]
self.assertNotEqual(None, entry.group(GroupName.DEFAULT_SETTINGS_SNIPPET))
settings = ''' lang="Python 3" '''
self.assertEqual(settings, entry.group(GroupName.DEFAULT_SETTINGS))
def test_language1(self):
snippet = Snippet(TestSnippetPattern.DATA1)
self.assertEqual('Python 3', snippet.pattern().language(''' [Python 3] output=F context=TestCtx2 allow_error=F processor=highlight
id=test_id1'''))
self.assertEqual(None, snippet.pattern().language(''' ]Python 3] output=F context=TestCtx2 allow_error=F processor=highlight
id=test_id1'''))
self.assertEqual('Python 3', snippet.pattern().language('''[Python 3]output=F context=TestCtx2 allow_error=F processor=highlight
id=test_id1'''))
self.assertEqual(None, snippet.pattern().language(''' output=F context=TestCtx2 allow_error=F processor=highlight
id=test_id1'''))
def test_language2(self):
snippet = Snippet(TestSnippetPattern.DATA2)
self.assertEqual('Python 3', snippet.pattern().language(''' lang="Python 3" output="F" context="TestCtx2" allow_error="F" processor="highlight"
id="test_id1"'''))
self.assertEqual(None, snippet.pattern().language(''' aalang="Python 3" output="F" context="TestCtx2" allow_error="F" processor="highlight"
id="test_id1"'''))
self.assertEqual(None, snippet.pattern().language(''' langaa="Python 3" output="F" context="TestCtx2" allow_error="F" processor="highlight"
id="test_id1"'''))
self.assertEqual(None, snippet.pattern().language(''' output="F" context="TestCtx2" allow_error="F" processor="highlight"
id="test_id1"'''))
def test_echo1(self):
snippet = Snippet(TestSnippetPattern.DATA1)
self.assertEqual(None, snippet.pattern().echo(''' [Python 3] output=F context=TestCtx2 allow_error=F processor=highlight
id=test_id1'''))
self.assertTrue(snippet.pattern().echo(''' [Python 3] output=F context=TestCtx2 echo=T allow_error=F processor=highlight
id=test_id1'''))
self.assertEqual(None, snippet.pattern().echo(''' [Python 3] output=F aecho=1 echoqq=False context=TestCtx2 allow_error=F processor=highlight
id=test_id1'''))
self.assertTrue(snippet.pattern().echo('echo=T'))
self.assertTrue(snippet.pattern().echo('echo=1'))
self.assertTrue(snippet.pattern().echo('echo=True'))
self.assertTrue(snippet.pattern().echo('echo=Y'))
self.assertTrue(snippet.pattern().echo('echo=t'))
self.assertTrue(snippet.pattern().echo('echo=y'))
self.assertTrue(snippet.pattern().echo('echo=YES'))
self.assertTrue(snippet.pattern().echo('echo=yes'))
self.assertFalse(snippet.pattern().echo('echo=F'))
self.assertFalse(snippet.pattern().echo('echo=False'))
self.assertFalse(snippet.pattern().echo('echo=0'))
self.assertFalse(snippet.pattern().echo('echo=N'))
self.assertFalse(snippet.pattern().echo(' echo=No'))
self.assertFalse(snippet.pattern().echo('echo=no '))
with self.assertRaises(InvalidBoolValueError):
snippet.pattern().echo('echo=X')
def test_echo2(self):
snippet = Snippet(TestSnippetPattern.DATA2)
self.assertEqual(None, snippet.pattern().echo(''' lang="Python 3" output="F" context="TestCtx2" allow_error="F" processor="highlight"
id="test_id1"'''))
self.assertTrue(snippet.pattern().echo(''' lang="Python 3" output="F" context="TestCtx2" echo="T" allow_error="F" processor="highlight"
id="test_id1"'''))
self.assertEqual(None, snippet.pattern().echo(''' lang="Python 3" output="F" aecho="1" echoqq="False" context="TestCtx2" allow_error="F" processor="highlight"
id="test_id1"'''))
self.assertTrue(snippet.pattern().echo('echo="T"'))
self.assertTrue(snippet.pattern().echo('echo="1"'))
self.assertTrue(snippet.pattern().echo('echo="True"'))
self.assertTrue(snippet.pattern().echo('echo="Y"'))
self.assertTrue(snippet.pattern().echo('echo="t"'))
self.assertTrue(snippet.pattern().echo('echo="y"'))
self.assertTrue(snippet.pattern().echo('echo="YES"'))
self.assertTrue(snippet.pattern().echo('echo="yes"'))
self.assertFalse(snippet.pattern().echo('echo="F"'))
self.assertFalse(snippet.pattern().echo('echo="False"'))
self.assertFalse(snippet.pattern().echo('echo="0"'))
self.assertFalse(snippet.pattern().echo('echo="N"'))
self.assertFalse(snippet.pattern().echo(' echo="No"'))
self.assertFalse(snippet.pattern().echo('echo="no" '))
with self.assertRaises(InvalidBoolValueError):
snippet.pattern().echo('echo="X"')
def test_output1(self):
snippet = Snippet(TestSnippetPattern.DATA1)
self.assertEqual(None, snippet.pattern().output(''' [Python 3] context=TestCtx2 allow_error=F processor=highlight
id=test_id1'''))
self.assertFalse(snippet.pattern().output(''' [Python 3]output=F context=TestCtx2 echo=T allow_error=F processor=highlight
id=test_id1'''))
self.assertEqual(None, snippet.pattern().output(''' [Python 3] qqoutput=F outputww=0 context=TestCtx2 allow_error=F processor=highlight
id=test_id1'''))
self.assertTrue(snippet.pattern().output('output=T'))
self.assertTrue(snippet.pattern().output('output=1'))
self.assertTrue(snippet.pattern().output('output=True'))
self.assertTrue(snippet.pattern().output('output=Y'))
self.assertTrue(snippet.pattern().output('output=t'))
self.assertTrue(snippet.pattern().output('output=y'))
self.assertTrue(snippet.pattern().output('output=YES'))
self.assertTrue(snippet.pattern().output('output=yes'))
self.assertFalse(snippet.pattern().output('output=F'))
self.assertFalse(snippet.pattern().output('output=False '))
self.assertFalse(snippet.pattern().output('output=0'))
self.assertFalse(snippet.pattern().output('output=N'))
self.assertFalse(snippet.pattern().output(' output=No'))
self.assertFalse(snippet.pattern().output('output=no '))
with self.assertRaises(InvalidBoolValueError):
snippet.pattern().output('output="X"')
def test_output2(self):
snippet = Snippet(TestSnippetPattern.DATA2)
self.assertEqual(None, snippet.pattern().output(''' lang="Python 3" context="TestCtx2" allow_error="F" processor="highlight"
id="test_id1"'''))
self.assertFalse(snippet.pattern().output(''' lang="Python 3"output="F" context="TestCtx2" echo="T" allow_error="F" processor="highlight"
id="test_id1"'''))
self.assertEqual(None, snippet.pattern().output(''' lang="Python 3" qqoutput="F" outputww="0" context="TestCtx2" allow_error="F" processor="highlight"
id="test_id1"'''))
self.assertTrue(snippet.pattern().output('output="T"'))
self.assertTrue(snippet.pattern().output('output="1"'))
self.assertTrue(snippet.pattern().output('output="True"'))
self.assertTrue(snippet.pattern().output('output="Y"'))
self.assertTrue(snippet.pattern().output('output="t"'))
self.assertTrue(snippet.pattern().output('output="y"'))
self.assertTrue(snippet.pattern().output('output="YES"'))
self.assertTrue(snippet.pattern().output('output="yes"'))
self.assertFalse(snippet.pattern().output('output="F"'))
self.assertFalse(snippet.pattern().output('output=" False "'))
self.assertFalse(snippet.pattern().output('output="0"'))
self.assertFalse(snippet.pattern().output('output="N"'))
self.assertFalse(snippet.pattern().output(' output="No"'))
self.assertFalse(snippet.pattern().output('output="no" '))
with self.assertRaises(InvalidBoolValueError):
snippet.pattern().output('output="X"')
def test_context1(self):
snippet = Snippet(TestSnippetPattern.DATA1)
self.assertEqual('TestCtx2', snippet.pattern().context(''' [Python 3] output=F context=TestCtx2 allow_error=F processor=highlight
id=test_id1'''))
self.assertEqual(None, snippet.pattern().context(''' [Python 3] output=F qqcontext=TestCtx2 allow_error=F processor=highlight
id=test_id1'''))
self.assertEqual(None, snippet.pattern().context(''' [Python 3] output=F contextqq=TestCtx2 allow_error=F processor=highlight
id=test_id1'''))
self.assertEqual(None, snippet.pattern().context(''' output=F allow_error=F processor=highlight
id=test_id1'''))
def test_context2(self):
snippet = Snippet(TestSnippetPattern.DATA2)
self.assertEqual('TestCtx2', snippet.pattern().context(''' lang="Python 3" output="F" context="TestCtx2" allow_error="F" processor="highlight"
id="test_id1"'''))
self.assertEqual(None, snippet.pattern().context(''' lang="Python 3" output="F" qqcontext="TestCtx2" allow_error="F" processor="highlight"
id="test_id1"'''))
self.assertEqual(None, snippet.pattern().context(''' lang="Python 3" output="F" contextqq="TestCtx2" allow_error="F" processor="highlight"
id="test_id1"'''))
self.assertEqual(None, snippet.pattern().context(''' output="F" allow_error="F" processor="highlight"
id="test_id1"'''))
def test_id1(self):
snippet = Snippet(TestSnippetPattern.DATA1)
self.assertEqual('test_id1', snippet.pattern().id(''' [Python 3] output=F context=TestCtx2 allow_error=F processor=highlight
id=test_id1'''))
self.assertEqual(None, snippet.pattern().id(''' [Python 3] output=F context=TestCtx2 allow_error=F processor=highlight
aid=test_id1'''))
self.assertEqual(None, snippet.pattern().id(''' [Python 3] output=F context=TestCtx2 allow_error=F processor=highlight
idw=test_id1'''))
self.assertEqual(None, snippet.pattern().id(''' output=F context=TestCtx2 allow_error=F processor=highlight '''))
def test_id2(self):
snippet = Snippet(TestSnippetPattern.DATA2)
self.assertEqual('test_id1', snippet.pattern().id(''' lang="Python 3" output="F" context="TestCtx2" allow_error="F" processor="highlight"
id="test_id1"'''))
self.assertEqual(None, snippet.pattern().id(''' lang="Python 3" output="F" context="TestCtx2" allow_error="F" processor="highlight"
aid="test_id1"'''))
self.assertEqual(None, snippet.pattern().id(''' lang="Python 3" output="F" context="TestCtx2" allow_error="F" processor="highlight"
idw="test_id1"'''))
self.assertEqual(None, snippet.pattern().id(''' output="F" context="TestCtx2" allow_error="F" processor="highlight" '''))
def test_timeout1(self):
snippet = Snippet(TestSnippetPattern.DATA1)
self.assertEqual(2.5, snippet.pattern().timeout(''' [Python 3] output=F context=TestCtx2 timeout=2500 allow_error=F processor=highlight
id="test_id1"'''))
self.assertEqual(None, snippet.pattern().timeout(''' [Python 3] aatimeout=2500 output=F context=TestCtx2 allow_error="F" processor=highlight
aid="test_id1"'''))
self.assertEqual(None, snippet.pattern().timeout(''' [Python 3] timeoutaa=2500 output=F context=TestCtx2 allow_error=F processor=highlight
idw="test_id1"'''))
self.assertEqual(None, snippet.pattern().timeout(''' output=F context=TestCtx2 allow_error=F processor=highlight '''))
with self.assertRaises(TimeoutValueError):
snippet.pattern().timeout(''' [Python 3] timeout=invalid output=F context=TestCtx2 allow_error=F processor=highlight
idw=test_id1''')
def test_timeout2(self):
snippet = Snippet(TestSnippetPattern.DATA2)
self.assertEqual(2.5, snippet.pattern().timeout(''' lang="Python 3" output="F" context="TestCtx2" timeout="2500" allow_error="F" processor="highlight"
id="test_id1"'''))
self.assertEqual(None, snippet.pattern().timeout(''' lang="Python 3" aatimeout="2500" output="F" context="TestCtx2" allow_error="F" processor="highlight"
aid="test_id1"'''))
self.assertEqual(None, snippet.pattern().timeout(''' lang="Python 3" timeoutaa="2500" output="F" context="TestCtx2" allow_error="F" processor="highlight"
idw="test_id1"'''))
self.assertEqual(None, snippet.pattern().timeout(''' output="F" context="TestCtx2" allow_error="F" processor="highlight" '''))
with self.assertRaises(TimeoutValueError):
snippet.pattern().timeout(''' lang="Python 3" timeout="invalid" output="F" context="TestCtx2" allow_error="F" processor="highlight"
idw="test_id1"''')
def test_error1(self):
snippet = Snippet(TestSnippetPattern.DATA1)
self.assertEqual(None, snippet.pattern().error(''' [Python 3] output=F context=TestCtx2 processor=highlight
id=test_id1'''))
self.assertTrue(snippet.pattern().error(''' [Python 3] output=F context=TestCtx2 echo=T allow_error=1 processor=highlight
id=test_id1'''))
self.assertEqual(None, snippet.pattern().error(''' [Python 3] output=F aecho=1 echoqq=False context=TestCtx2 aallow_error=F allow_errorq=1 processor=highlight
id=test_id1'''))
self.assertTrue(snippet.pattern().error('allow_error=T'))
self.assertTrue(snippet.pattern().error('allow_error=1'))
self.assertTrue(snippet.pattern().error('allow_error=True'))
self.assertTrue(snippet.pattern().error('allow_error=Y'))
self.assertTrue(snippet.pattern().error('allow_error=t'))
self.assertTrue(snippet.pattern().error('allow_error=y'))
self.assertTrue(snippet.pattern().error('allow_error=YES'))
self.assertTrue(snippet.pattern().error('allow_error=yes'))
self.assertFalse(snippet.pattern().error('allow_error=F'))
self.assertFalse(snippet.pattern().error('allow_error=False'))
self.assertFalse(snippet.pattern().error('allow_error=0'))
self.assertFalse(snippet.pattern().error('allow_error=N'))
self.assertFalse(snippet.pattern().error(' allow_error=No'))
self.assertFalse(snippet.pattern().error('allow_error=no '))
with self.assertRaises(InvalidBoolValueError):
snippet.pattern().error('allow_error=X')
def test_error2(self):
snippet = Snippet(TestSnippetPattern.DATA2)
self.assertEqual(None, snippet.pattern().error(''' lang="Python 3" output="F" context="TestCtx2" processor="highlight"
id="test_id1"'''))
self.assertTrue(snippet.pattern().error(''' lang="Python 3" output="F" context="TestCtx2" echo="T" allow_error="1" processor="highlight"
id="test_id1"'''))
self.assertEqual(None, snippet.pattern().error(''' lang="Python 3" output="F" aecho="1" echoqq="False" context="TestCtx2" aallow_error="F" allow_errorq="1" processor="highlight"
id="test_id1"'''))
self.assertTrue(snippet.pattern().error('allow_error="T"'))
self.assertTrue(snippet.pattern().error('allow_error="1"'))
self.assertTrue(snippet.pattern().error('allow_error="True"'))
self.assertTrue(snippet.pattern().error('allow_error="Y"'))
self.assertTrue(snippet.pattern().error('allow_error="t"'))
self.assertTrue(snippet.pattern().error('allow_error="y"'))
self.assertTrue(snippet.pattern().error('allow_error="YES"'))
self.assertTrue(snippet.pattern().error('allow_error="yes"'))
self.assertFalse(snippet.pattern().error('allow_error="F"'))
self.assertFalse(snippet.pattern().error('allow_error="False"'))
self.assertFalse(snippet.pattern().error('allow_error="0"'))
self.assertFalse(snippet.pattern().error('allow_error="N"'))
self.assertFalse(snippet.pattern().error(' allow_error="No"'))
self.assertFalse(snippet.pattern().error('allow_error="no" '))
with self.assertRaises(InvalidBoolValueError):
snippet.pattern().error('allow_error="X"')
def test_output_type1(self):
snippet = Snippet(TestSnippetPattern.DATA1)
self.assertTrue(snippet.pattern().output_type(''' [Python 3] output=F context=TestCtx2 allow_error=F output_type=Stdout,Image processor=highlight
id=test_id1''').is_enabled('Stdout'))
self.assertTrue(snippet.pattern().output_type(''' [Python 3] output=F context=TestCtx2 allow_error=F output_type=Stdout,Image processor=highlight
id=test_id1''').is_enabled('Image'))
self.assertFalse(snippet.pattern().output_type(''' [Python 3] output=F context=TestCtx2 allow_error=F output_type=Stdout,Image processor=highlight
id=test_id1''').is_enabled('Stderr'))
self.assertFalse(snippet.pattern().output_type(''' [Python 3] output=F context=TestCtx2 allow_error=F output_type=Stdout,Image processor=highlight
id=test_id1''').is_enabled('Text'))
self.assertTrue(snippet.pattern().output_type(''' [Python 3] output=F context=TestCtx2 allow_error=F processor=highlight
id=test_id1''').is_enabled('Stdout'))
self.assertTrue(snippet.pattern().output_type(''' [Python 3] output=F context=TestCtx2 allow_error=F processor=highlight
id=test_id1''').is_enabled('Image'))
def test_output_type2(self):
snippet = Snippet(TestSnippetPattern.DATA2)
self.assertTrue(snippet.pattern().output_type(''' lang="Python 3" output="F" context="TestCtx2" allow_error="F" output_type="Stdout, Image" processor="highlight"
id="test_id1"''').is_enabled('Stdout'))
self.assertTrue(snippet.pattern().output_type(''' lang="Python 3" output="F" context="TestCtx2" allow_error="F" output_type="Stdout, Image" processor="highlight"
id="test_id1"''').is_enabled('Image'))
self.assertFalse(snippet.pattern().output_type(''' lang="Python 3" output="F" context="TestCtx2" allow_error="F" output_type="Stdout, Image" processor="highlight"
id="test_id1"''').is_enabled('Stderr'))
self.assertFalse(snippet.pattern().output_type(''' lang="Python 3" output="F" context="TestCtx2" allow_error="F" output_type="Stdout, Image" processor="highlight"
id="test_id1"''').is_enabled('Text'))
self.assertTrue(snippet.pattern().output_type(''' lang="Python 3" output="F" context="TestCtx2" allow_error="F" processor="highlight"
id="test_id1"''').is_enabled('Stdout'))
self.assertTrue(snippet.pattern().output_type(''' lang="Python 3" output="F" context="TestCtx2" allow_error="F" processor="highlight"
id="test_id1"''').is_enabled('Image'))
def test_processor1(self):
snippet = Snippet(TestSnippetPattern.DATA1)
self.assertEqual('highlight', snippet.pattern().processor(''' [Python 3] output=F context=TestCtx2 allow_error=F processor=highlight
id=test_id1'''))
self.assertEqual(None, snippet.pattern().processor(''' [Python 3] output=F context=TestCtx2 allow_error=F qddprocessor=highlight
id=test_id1'''))
self.assertEqual(None, snippet.pattern().processor('''[Python 3] output=F context=TestCtx2 allow_error=F processorqq=highlight
id=test_id1'''))
self.assertEqual(None, snippet.pattern().processor(''' output=F context=TestCtx2 allow_error=F
id=test_id1'''))
def test_processor2(self):
snippet = Snippet(TestSnippetPattern.DATA2)
self.assertEqual('highlight', snippet.pattern().processor(''' lang="Python 3" output="F" context="TestCtx2" allow_error="F" processor="highlight"
id="test_id1"'''))
self.assertEqual(None, snippet.pattern().processor(''' lang="Python 3" output="F" context="TestCtx2" allow_error="F" qddprocessor="highlight"
id="test_id1"'''))
self.assertEqual(None, snippet.pattern().processor(''' lang="Python 3" output="F" context="TestCtx2" allow_error="F" processorqq="highlight"
id="test_id1"'''))
self.assertEqual(None, snippet.pattern().processor(''' output="F" context="TestCtx2" allow_error="F"
id="test_id1"'''))
def test_echo_lines1(self):
snippet = Snippet(TestSnippetPattern.DATA1)
lines = snippet.pattern().echo_lines('echo_lines=5,6,10')
self.assertEqual(2, len(lines))
self.assertFalse(lines[0])
self.assertEqual(3, len(lines[1]))
self.assertEqual(6, lines[1][1])
lines = snippet.pattern().echo_lines('echo_lines=1,5-15,23')
self.assertEqual(2, len(lines))
self.assertFalse(lines[0])
self.assertEqual(13, len(lines[1]))
self.assertEqual(7, lines[1][3])
lines = snippet.pattern().echo_lines('echo_lines=!1,5-15,23')
self.assertEqual(2, len(lines))
self.assertTrue(lines[0])
self.assertEqual(13, len(lines[1]))
self.assertEqual(7, lines[1][3])
with self.assertRaises(ValueError):
lines = snippet.pattern().echo_lines('echo_lines=!1,5-15,23aqqw')
def test_echo_lines2(self):
snippet = Snippet(TestSnippetPattern.DATA2)
lines = snippet.pattern().echo_lines('echo_lines="5,6, 10"')
self.assertEqual(2, len(lines))
self.assertFalse(lines[0])
self.assertEqual(3, len(lines[1]))
self.assertEqual(6, lines[1][1])
lines = snippet.pattern().echo_lines('echo_lines="1, 5-15, 23"')
self.assertEqual(2, len(lines))
self.assertFalse(lines[0])
self.assertEqual(13, len(lines[1]))
self.assertEqual(7, lines[1][3])
lines = snippet.pattern().echo_lines('echo_lines="! 1, 5 - 15, 23"')
self.assertEqual(2, len(lines))
self.assertTrue(lines[0])
self.assertEqual(13, len(lines[1]))
self.assertEqual(7, lines[1][3])
with self.assertRaises(ValueError):
lines = snippet.pattern().echo_lines('echo_lines="! 1, 5 - 15, 23 aqqw"')
|
mit
|
iamGavinZhou/12306_captcha
|
res_recog_v2.py
|
1
|
7962
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import os
import time
import scipy.misc as misc
import numpy as np
import pandas as pd
from pandas import DataFrame
from caffe_util import *
from util import get_success_rate
"""思路:
(1) 识别12306验证码的中文字符
(2) 搜索库,获得百度图片上抓的5张图(done)
(3) 切割12306验证码,获得小图(8张)(done)
(4) 获得每张小图的向量表示(ResNet)(done)
(5) 获得库中5张图的向量表示
(6) 获得5张图分别与8张小图哪个最近且同时满足相似度大于0.75,形成可能的答案集A
(7) 对A中的每张图分别与除自身以外的图(向量)进行相似度的比较,选定阈值筛选得到最终的答案集B
出现的问题:
(1) 答案集A中满足0.75的约束导致丢失
(2) 某些认错导致认多
"""
TEST_DIR = 'data/test_data/'
TEMP_DIR = 'data/craw_recog_data/temp'
CRAW_DB_PATH = 'data/DB/'
LABEL_FILE = 'data/label.txt'
DB_FILE = 'data/DB.csv'
TOP_K = 7
LAYERS = 101
THRES = 0.60
if not os.path.exists(TEMP_DIR):
os.makedirs(TEMP_DIR)
print 'start network initialization.....'
start_ini = time.clock()
net = init_net(layers=LAYERS, using_gpu=True)
print 'done!!!, using time: %.4f s' % (time.clock()-start_ini)
def pearson_relative(vec1, vec2):
"""pearson相关系数
[-1,1], 绝对值越大越相关
"""
assert vec1.shape==vec2.shape
cov = np.mean(vec1*vec2)-(np.mean(vec1)*np.mean(vec2))
std = np.std(vec1)*np.std(vec2)
return cov/std
def cos_relative(vec1, vec2):
"""计算余弦相似度
[-1,1],越接近1表示方向越接近,越接近-1表示方向越相反
"""
assert vec1.shape==vec2.shape
dot = np.sum(vec1*vec2)
sqt = np.sqrt(np.sum(vec1**2))*np.sqrt(np.sum(vec2**2))
return dot/sqt
def recog_label():
"""识别测试集下所有12306验证码的中文字符"""
# label一样的文字也存在不同的扭曲变形
label_file = pd.read_table('data/test_label.txt', names=['data'])
label_list = label_file['data'].tolist()
return label_list
def split_to_small_img(img_path, index):
"""将12306验证码分割成小图"""
res = []
im = misc.imread(img_path, mode='RGB')
im_crop = im[37:183, 2:290, :]
for row in range(0, 2):
for col in range(0, 4):
# every small content image
row_start, row_end = row*73, row*73+72
col_start, col_end = col*72, col*72+71
content_img = im_crop[row_start:row_end+1, col_start:col_end+1, :]
# resize the small content image
# content_resize_img = misc.imresize(content_img, [200, 200])
img_save_path = os.path.join(TEMP_DIR, str(index)+'_'+str(row)+'_'+str(col)+'.jpg')
misc.imsave(img_save_path, content_img)
res.append(img_save_path)
return res
def get_ResNet_vector(img_path):
"""获得图像的ResNet的向量表达"""
img = load_image(img_path)
img_p = preprocess(img)
# feed image
net.blobs['data'].data[0] = img_p.transpose((2, 0, 1))
assert net.blobs['data'].data[0].shape == (3, 224, 224)
net.forward()
# fc layer feature
caffe_fcb = net.blobs['fc1000'].data[0] # numpy ndarray with shape(1000,)
caffe_fc = caffe_fcb.reshape(1, 1000)
return caffe_fc
def get_label_list():
"""获得所有label"""
label_file = pd.read_table(LABEL_FILE, names=['data'])
label_list = label_file['data'].tolist()
return label_list
print 'get label list...'
label_list = get_label_list()
print 'label length: %d' % len(label_list)
def get_all_DB_vec():
"""获得所有抓取图的vector表示"""
names = []
for label in label_list:
for index in range(1, TOP_K+1):
img_path_jpg = os.path.join(CRAW_DB_PATH, label+'_'+str(index)+'.jpg')
img_path_png = os.path.join(CRAW_DB_PATH, label+'_'+str(index)+'.png')
if os.path.exists(img_path_jpg):
names.append(img_path_jpg)
if os.path.exists(img_path_png):
names.append(img_path_png)
nums = len(label_list) * TOP_K
db_vecs = np.zeros((nums, 1000), dtype=np.float32)
for x in range(0, nums):
path_t = names[x]
vec_t = get_ResNet_vector(path_t)
db_vecs[x] = vec_t
return db_vecs
print 'start get all DB vectors.....'
start_db = time.clock()
if not os.path.exists(DB_FILE):
print 'no pre-computed data, start calc.....'
db_vecs = get_all_DB_vec() # 抓取的图所代表的vector
df = DataFrame(db_vecs)
df.to_csv(DB_FILE, header=False, index=False)
else:
print 'using pre-computed data...'
db_vecs = pd.read_csv(DB_FILE, header=None).as_matrix()
print 'done!!!, using time: %.4f s' % (time.clock()-start_db)
def get_spec_vec(label):
"""获得DB中label所代表的vector"""
label_index = label_list.index(label)
return db_vecs[label_index*TOP_K:label_index*TOP_K+TOP_K, :]
def calc_dis(vec1, vec2):
"""计算两个向量的距离"""
assert vec1.shape == vec2.shape
return np.sqrt(np.sum((vec1-vec2) ** 2))
def recog(img_path, index):
"""识别12306验证码"""
# split
small_imgs_path = split_to_small_img(img_path, index)
assert len(small_imgs_path)==8
'''Phase A
(1) 获得12306验证码8张小图的vector表示
(2) 获得抓取的图的vector表示
(3) 对每张抓取的图判断最相似的图加入到集合A(vec距离)
(4) 校正
'''
# 1
# 获得12306小图所代表的vector
vecs = np.zeros((8, 1000), dtype=np.float32)
for x in range(0, 8):
img_path_t = small_imgs_path[x]
vec = get_ResNet_vector(img_path_t)
vecs[x] = vec
# 2
# 获得DB中label所代表的图的vector
label = recog_label()[index]
db_s_vec = get_spec_vec(label)
# 3
# 找出每个抓取的图最相似的小图
min_indexes = []
for x in range(0, TOP_K):
vec_d = db_s_vec[x]
dis_t = np.zeros((1, 8))
for y in range(0, 8):
vec_s = vecs[y]
dis = calc_dis(vec_s, vec_d)
dis_t[0, y] = dis
min_dis = np.min(dis_t)
min_index = np.argwhere(dis_t==min_dis)[0,1] # 与抓取的图距离最小的图的index
# 相似度要满足大于等于0.75
re_t = cos_relative(vec_d, vecs[min_index])
if re_t >= 0.75:
min_indexes.append(min_index)
min_index_dict = dict() # 统计出现的次数
for index in min_indexes:
if not index in min_index_dict.keys():
min_index_dict[index] = 1
else:
min_index_dict[index] += 1
'''
对出现次数超过TOP_K一半的小图进行二次查找
找出与之相似度最高的一张图
'''
refine_list = []
for index in min_index_dict.keys():
if min_index_dict[index] > (TOP_K//2):
refine_list.append(index)
min_index_set = set(min_indexes)
for x in refine_list:
vec_d = vecs[x]
dis_t = np.zeros((1, 8))
for y in range(0, 8):
vec_s = vecs[y]
dis = calc_dis(vec_s, vec_d)
dis_t[0, y] = dis
# 找出距离最小的一个
dist_sort = np.sort(dis_t)
s2_least = dist_sort[0, 0:2]
for least in s2_least:
least_index = np.argwhere(dis_t==least)[0,1]
if least_index == x:
continue
min_index_set.add(least_index)
# 4
# calibration
fi_index = []
for index1 in min_index_set:
all_sat = True
vec1 = vecs[index1]
for index2 in min_index_set:
if (index1 == index2) or (index2 in fi_index):
continue
vec2 = vecs[index2]
simi = cos_relative(vec1, vec2)
if simi < THRES:
all_sat = False
break
if all_sat:
fi_index.append(index1)
return fi_index
def main():
"""识别12306验证码"""
fi = open('data/test_recog.txt', 'w')
print 'all preparation done!!!'
print 'start recognition procedure.....'
all_start = time.clock()
files = os.listdir(TEST_DIR)
print 'all test data: %d' % len(files)
for index in range(0, len(files)):
img_path = os.path.join(TEST_DIR, str(index)+'.jpg')
ans = recog(img_path, index)
# print 'recog: ' + img_path
# print 'ans: ' + ','.join([str(x) for x in ans])
recog_ans = ''
for x in ans:
recog_ans += str(x)
recog_ans += ' '
fi.write(str(index)+':'+recog_ans[0:-1]+'\n')
print 'All used: %.4f s' % (time.clock()-all_start)
print 'Avg time: %.4f s' % ((time.clock()-all_start)/len(files))
print 'start calc success rate...'
print 'acc: %.2f' % get_success_rate()
if __name__ == '__main__':
main()
|
mit
|
thunder-project/thunder-factorization
|
test/test_algorithms.py
|
1
|
3886
|
import pytest
from factorization import SVD, PCA, ICA, NMF
from numpy import array, linspace, sin, cos, square, absolute, c_, random, dot, allclose
from sklearn.datasets import make_low_rank_matrix
from thunder.series import fromarray
pytestmark = pytest.mark.usefixtures("eng")
def allclose_sign(a1, a2, atol=1e-8, rtol=1e-5):
"""
check if arrays are equal, up to sign flips along columns
"""
from itertools import product
if a1.shape != a2.shape:
return False
for signs in product(*a1.shape[1]*((-1, 1),)):
if allclose(signs*a1, a2, atol=atol, rtol=rtol):
return True
return False
def allclose_permute(a1, a2, atol=1e-8, rtol=1e-5):
"""
check if arrays are equal, up to reordering of columns
"""
from itertools import permutations
if a1.shape != a2.shape:
return False
for p in permutations(range(a1.shape[1])):
if allclose(a1[:,p], a2, atol=atol, rtol=rtol):
return True
return False
def allclose_sign_permute(a1, a2, atol=1e-8, rtol=1e-5):
"""
check if arrays are equal, up to reordering and sign flips along of columns
"""
from itertools import permutations
if a1.shape != a2.shape:
return False
for p in permutations(range(a1.shape[1])):
if allclose_sign(a1[:,p], a2, atol=atol, rtol=rtol):
return True
return False
def test_svd(eng):
x = make_low_rank_matrix(n_samples=10, n_features=5, random_state=0)
x = fromarray(x, engine=eng)
from sklearn.utils.extmath import randomized_svd
u1, s1, v1 = randomized_svd(x.toarray(), n_components=2, random_state=0)
u2, s2, v2 = SVD(k=2, method='direct').fit(x)
assert allclose_sign(u1, u2)
assert allclose(s1, s2)
assert allclose_sign(v1.T, v2.T)
u2, s2, v2 = SVD(k=2, method='em', max_iter=100, seed=0).fit(x)
tol = 1e-1
assert allclose_sign(u1, u2, atol=tol)
assert allclose(s1, s2, atol=tol)
assert allclose_sign(v1.T, v2.T, atol=tol)
def test_pca(eng):
x = make_low_rank_matrix(n_samples=10, n_features=5, random_state=0)
x = fromarray(x, engine=eng)
from sklearn.decomposition import PCA as skPCA
pca = skPCA(n_components=2)
t1 = pca.fit_transform(x.toarray())
w1_T = pca.components_
t2, w2_T = PCA(k=2, svd_method='direct').fit(x)
assert allclose_sign(w1_T.T, w2_T.T)
assert allclose_sign(t1, t2)
t2, w2_T = PCA(k=2, svd_method='em', max_iter=100, seed=0).fit(x)
tol = 1e-1
assert allclose_sign(w1_T.T, w2_T.T, atol=tol)
assert allclose_sign(t1, t2, atol=tol)
def test_ica(eng):
t = linspace(0, 10, 100)
s1 = sin(t)
s2 = square(sin(2*t))
x = c_[s1, s2, s1+s2]
random.seed(0)
x += 0.001*random.randn(*x.shape)
x = fromarray(x, engine=eng)
def normalize_ICA(s, aT):
a = aT.T
c = a.sum(axis=0)
return s*c, (a/c).T
from sklearn.decomposition import FastICA
ica = FastICA(n_components=2, fun='cube', random_state=0)
s1 = ica.fit_transform(x.toarray())
aT1 = ica.mixing_.T
s1, aT1 = normalize_ICA(s1, aT1)
s2, aT2 = ICA(k=2, svd_method='direct', max_iter=200, seed=0).fit(x)
s2, aT2 = normalize_ICA(s2, aT2)
tol=1e-1
assert allclose_sign_permute(s1, s2, atol=tol)
assert allclose_sign_permute(aT1, aT2, atol=tol)
def test_nmf(eng):
t = linspace(0, 10, 100)
s1 = 1 + absolute(sin(t))
s2 = 1 + square(cos(2*t))
h = c_[s1, s2].T
w = array([[1, 0], [0, 1], [1, 1]])
x = dot(w, h)
x = fromarray(x, engine=eng)
from sklearn.decomposition import NMF as skNMF
nmf = skNMF(n_components=2, random_state=0)
w1 = nmf.fit_transform(x.toarray())
h1 = nmf.components_
xhat1 = dot(w1, h1)
w2, h2 = NMF(k=2, seed=0).fit(x)
xhat2 = dot(w2, h2)
tol=1e-1
assert allclose(xhat1, xhat2, atol=tol)
|
mit
|
joshloyal/scikit-learn
|
sklearn/neighbors/tests/test_kd_tree.py
|
26
|
7800
|
import numpy as np
from numpy.testing import assert_array_almost_equal
from sklearn.neighbors.kd_tree import (KDTree, NeighborsHeap,
simultaneous_sort, kernel_norm,
nodeheap_sort, DTYPE, ITYPE)
from sklearn.neighbors.dist_metrics import DistanceMetric
from sklearn.utils.testing import SkipTest, assert_allclose
V = np.random.random((3, 3))
V = np.dot(V, V.T)
DIMENSION = 3
METRICS = {'euclidean': {},
'manhattan': {},
'chebyshev': {},
'minkowski': dict(p=3)}
def brute_force_neighbors(X, Y, k, metric, **kwargs):
D = DistanceMetric.get_metric(metric, **kwargs).pairwise(Y, X)
ind = np.argsort(D, axis=1)[:, :k]
dist = D[np.arange(Y.shape[0])[:, None], ind]
return dist, ind
def check_neighbors(dualtree, breadth_first, k, metric, X, Y, kwargs):
kdt = KDTree(X, leaf_size=1, metric=metric, **kwargs)
dist1, ind1 = kdt.query(Y, k, dualtree=dualtree,
breadth_first=breadth_first)
dist2, ind2 = brute_force_neighbors(X, Y, k, metric, **kwargs)
# don't check indices here: if there are any duplicate distances,
# the indices may not match. Distances should not have this problem.
assert_array_almost_equal(dist1, dist2)
def test_kd_tree_query():
np.random.seed(0)
X = np.random.random((40, DIMENSION))
Y = np.random.random((10, DIMENSION))
for (metric, kwargs) in METRICS.items():
for k in (1, 3, 5):
for dualtree in (True, False):
for breadth_first in (True, False):
yield (check_neighbors,
dualtree, breadth_first,
k, metric, X, Y, kwargs)
def test_kd_tree_query_radius(n_samples=100, n_features=10):
np.random.seed(0)
X = 2 * np.random.random(size=(n_samples, n_features)) - 1
query_pt = np.zeros(n_features, dtype=float)
eps = 1E-15 # roundoff error can cause test to fail
kdt = KDTree(X, leaf_size=5)
rad = np.sqrt(((X - query_pt) ** 2).sum(1))
for r in np.linspace(rad[0], rad[-1], 100):
ind = kdt.query_radius([query_pt], r + eps)[0]
i = np.where(rad <= r + eps)[0]
ind.sort()
i.sort()
assert_array_almost_equal(i, ind)
def test_kd_tree_query_radius_distance(n_samples=100, n_features=10):
np.random.seed(0)
X = 2 * np.random.random(size=(n_samples, n_features)) - 1
query_pt = np.zeros(n_features, dtype=float)
eps = 1E-15 # roundoff error can cause test to fail
kdt = KDTree(X, leaf_size=5)
rad = np.sqrt(((X - query_pt) ** 2).sum(1))
for r in np.linspace(rad[0], rad[-1], 100):
ind, dist = kdt.query_radius([query_pt], r + eps, return_distance=True)
ind = ind[0]
dist = dist[0]
d = np.sqrt(((query_pt - X[ind]) ** 2).sum(1))
assert_array_almost_equal(d, dist)
def compute_kernel_slow(Y, X, kernel, h):
d = np.sqrt(((Y[:, None, :] - X) ** 2).sum(-1))
norm = kernel_norm(h, X.shape[1], kernel)
if kernel == 'gaussian':
return norm * np.exp(-0.5 * (d * d) / (h * h)).sum(-1)
elif kernel == 'tophat':
return norm * (d < h).sum(-1)
elif kernel == 'epanechnikov':
return norm * ((1.0 - (d * d) / (h * h)) * (d < h)).sum(-1)
elif kernel == 'exponential':
return norm * (np.exp(-d / h)).sum(-1)
elif kernel == 'linear':
return norm * ((1 - d / h) * (d < h)).sum(-1)
elif kernel == 'cosine':
return norm * (np.cos(0.5 * np.pi * d / h) * (d < h)).sum(-1)
else:
raise ValueError('kernel not recognized')
def check_results(kernel, h, atol, rtol, breadth_first, Y, kdt, dens_true):
dens = kdt.kernel_density(Y, h, atol=atol, rtol=rtol,
kernel=kernel,
breadth_first=breadth_first)
assert_allclose(dens, dens_true, atol=atol,
rtol=max(rtol, 1e-7))
def test_kd_tree_kde(n_samples=100, n_features=3):
np.random.seed(0)
X = np.random.random((n_samples, n_features))
Y = np.random.random((n_samples, n_features))
kdt = KDTree(X, leaf_size=10)
for kernel in ['gaussian', 'tophat', 'epanechnikov',
'exponential', 'linear', 'cosine']:
for h in [0.01, 0.1, 1]:
dens_true = compute_kernel_slow(Y, X, kernel, h)
for rtol in [0, 1E-5]:
for atol in [1E-6, 1E-2]:
for breadth_first in (True, False):
yield (check_results, kernel, h, atol, rtol,
breadth_first, Y, kdt, dens_true)
def test_gaussian_kde(n_samples=1000):
# Compare gaussian KDE results to scipy.stats.gaussian_kde
from scipy.stats import gaussian_kde
np.random.seed(0)
x_in = np.random.normal(0, 1, n_samples)
x_out = np.linspace(-5, 5, 30)
for h in [0.01, 0.1, 1]:
kdt = KDTree(x_in[:, None])
try:
gkde = gaussian_kde(x_in, bw_method=h / np.std(x_in))
except TypeError:
raise SkipTest("Old scipy, does not accept explicit bandwidth.")
dens_kdt = kdt.kernel_density(x_out[:, None], h) / n_samples
dens_gkde = gkde.evaluate(x_out)
assert_array_almost_equal(dens_kdt, dens_gkde, decimal=3)
def test_kd_tree_two_point(n_samples=100, n_features=3):
np.random.seed(0)
X = np.random.random((n_samples, n_features))
Y = np.random.random((n_samples, n_features))
r = np.linspace(0, 1, 10)
kdt = KDTree(X, leaf_size=10)
D = DistanceMetric.get_metric("euclidean").pairwise(Y, X)
counts_true = [(D <= ri).sum() for ri in r]
def check_two_point(r, dualtree):
counts = kdt.two_point_correlation(Y, r=r, dualtree=dualtree)
assert_array_almost_equal(counts, counts_true)
for dualtree in (True, False):
yield check_two_point, r, dualtree
def test_kd_tree_pickle():
import pickle
np.random.seed(0)
X = np.random.random((10, 3))
kdt1 = KDTree(X, leaf_size=1)
ind1, dist1 = kdt1.query(X)
def check_pickle_protocol(protocol):
s = pickle.dumps(kdt1, protocol=protocol)
kdt2 = pickle.loads(s)
ind2, dist2 = kdt2.query(X)
assert_array_almost_equal(ind1, ind2)
assert_array_almost_equal(dist1, dist2)
for protocol in (0, 1, 2):
yield check_pickle_protocol, protocol
def test_neighbors_heap(n_pts=5, n_nbrs=10):
heap = NeighborsHeap(n_pts, n_nbrs)
for row in range(n_pts):
d_in = np.random.random(2 * n_nbrs).astype(DTYPE)
i_in = np.arange(2 * n_nbrs, dtype=ITYPE)
for d, i in zip(d_in, i_in):
heap.push(row, d, i)
ind = np.argsort(d_in)
d_in = d_in[ind]
i_in = i_in[ind]
d_heap, i_heap = heap.get_arrays(sort=True)
assert_array_almost_equal(d_in[:n_nbrs], d_heap[row])
assert_array_almost_equal(i_in[:n_nbrs], i_heap[row])
def test_node_heap(n_nodes=50):
vals = np.random.random(n_nodes).astype(DTYPE)
i1 = np.argsort(vals)
vals2, i2 = nodeheap_sort(vals)
assert_array_almost_equal(i1, i2)
assert_array_almost_equal(vals[i1], vals2)
def test_simultaneous_sort(n_rows=10, n_pts=201):
dist = np.random.random((n_rows, n_pts)).astype(DTYPE)
ind = (np.arange(n_pts) + np.zeros((n_rows, 1))).astype(ITYPE)
dist2 = dist.copy()
ind2 = ind.copy()
# simultaneous sort rows using function
simultaneous_sort(dist, ind)
# simultaneous sort rows using numpy
i = np.argsort(dist2, axis=1)
row_ind = np.arange(n_rows)[:, None]
dist2 = dist2[row_ind, i]
ind2 = ind2[row_ind, i]
assert_array_almost_equal(dist, dist2)
assert_array_almost_equal(ind, ind2)
|
bsd-3-clause
|
AlexRobson/scikit-learn
|
sklearn/datasets/tests/test_20news.py
|
280
|
3045
|
"""Test the 20news downloader, if the data is available."""
import numpy as np
import scipy.sparse as sp
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import SkipTest
from sklearn import datasets
def test_20news():
try:
data = datasets.fetch_20newsgroups(
subset='all', download_if_missing=False, shuffle=False)
except IOError:
raise SkipTest("Download 20 newsgroups to run this test")
# Extract a reduced dataset
data2cats = datasets.fetch_20newsgroups(
subset='all', categories=data.target_names[-1:-3:-1], shuffle=False)
# Check that the ordering of the target_names is the same
# as the ordering in the full dataset
assert_equal(data2cats.target_names,
data.target_names[-2:])
# Assert that we have only 0 and 1 as labels
assert_equal(np.unique(data2cats.target).tolist(), [0, 1])
# Check that the number of filenames is consistent with data/target
assert_equal(len(data2cats.filenames), len(data2cats.target))
assert_equal(len(data2cats.filenames), len(data2cats.data))
# Check that the first entry of the reduced dataset corresponds to
# the first entry of the corresponding category in the full dataset
entry1 = data2cats.data[0]
category = data2cats.target_names[data2cats.target[0]]
label = data.target_names.index(category)
entry2 = data.data[np.where(data.target == label)[0][0]]
assert_equal(entry1, entry2)
def test_20news_length_consistency():
"""Checks the length consistencies within the bunch
This is a non-regression test for a bug present in 0.16.1.
"""
try:
data = datasets.fetch_20newsgroups(
subset='all', download_if_missing=False, shuffle=False)
except IOError:
raise SkipTest("Download 20 newsgroups to run this test")
# Extract the full dataset
data = datasets.fetch_20newsgroups(subset='all')
assert_equal(len(data['data']), len(data.data))
assert_equal(len(data['target']), len(data.target))
assert_equal(len(data['filenames']), len(data.filenames))
def test_20news_vectorized():
# This test is slow.
raise SkipTest("Test too slow.")
bunch = datasets.fetch_20newsgroups_vectorized(subset="train")
assert_true(sp.isspmatrix_csr(bunch.data))
assert_equal(bunch.data.shape, (11314, 107428))
assert_equal(bunch.target.shape[0], 11314)
assert_equal(bunch.data.dtype, np.float64)
bunch = datasets.fetch_20newsgroups_vectorized(subset="test")
assert_true(sp.isspmatrix_csr(bunch.data))
assert_equal(bunch.data.shape, (7532, 107428))
assert_equal(bunch.target.shape[0], 7532)
assert_equal(bunch.data.dtype, np.float64)
bunch = datasets.fetch_20newsgroups_vectorized(subset="all")
assert_true(sp.isspmatrix_csr(bunch.data))
assert_equal(bunch.data.shape, (11314 + 7532, 107428))
assert_equal(bunch.target.shape[0], 11314 + 7532)
assert_equal(bunch.data.dtype, np.float64)
|
bsd-3-clause
|
waltervh/BornAgain
|
Examples/python/simulation/ex06_Reflectometry/MaterialProfileWithParticles.py
|
2
|
1740
|
"""
Example for producing a profile of SLD of a multilayer with particles
and slicing.
"""
import bornagain as ba
from bornagain import deg, angstrom, nm
import numpy as np
import matplotlib.pyplot as plt
def get_sample():
"""
Defines sample and returns it
"""
# creating materials
m_ambient = ba.MaterialBySLD("Ambient", 0.0, 0.0)
m_ti = ba.MaterialBySLD("Ti", -1.9493e-06, 0.0)
m_ni = ba.MaterialBySLD("Ni", 9.4245e-06, 0.0)
m_particle = ba.MaterialBySLD("Particle", 5e-6, 0.0)
m_substrate = ba.MaterialBySLD("SiSubstrate", 2.0704e-06, 0.0)
# creating layers
ambient_layer = ba.Layer(m_ambient)
ti_layer = ba.Layer(m_ti, 30 * angstrom)
ni_layer = ba.Layer(m_ni, 70 * angstrom)
substrate_layer = ba.Layer(m_substrate)
# create roughness
roughness = ba.LayerRoughness(5 * angstrom, 0.5, 10 * angstrom)
# create particle layout
ff = ba.FormFactorCone(5 * nm, 10 * nm, 75 * deg)
particle = ba.Particle(m_particle, ff)
layout = ba.ParticleLayout()
layout.addParticle(particle)
iff = ba.InterferenceFunction2DLattice.createSquare(10 * nm)
layout.setInterferenceFunction(iff)
ambient_layer.addLayout(layout)
ambient_layer.setNumberOfSlices(20)
# creating multilayer
multi_layer = ba.MultiLayer()
multi_layer.addLayer(ambient_layer)
for i in range(2):
multi_layer.addLayerWithTopRoughness(ti_layer, roughness)
multi_layer.addLayerWithTopRoughness(ni_layer, roughness)
multi_layer.addLayer(substrate_layer)
return multi_layer
if __name__ == '__main__':
sample = get_sample()
zpoints, slds = ba.MaterialProfile(sample)
plt.figure()
plt.plot(zpoints, np.real(slds))
plt.show()
|
gpl-3.0
|
gewaltig/cython-neuron
|
topology/examples/test_3d_gauss.py
|
2
|
2636
|
#! /usr/bin/env python
#
# test_3d_gauss.py
#
# This file is part of NEST.
#
# Copyright (C) 2004 The NEST Initiative
#
# NEST is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 2 of the License, or
# (at your option) any later version.
#
# NEST is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with NEST. If not, see <http://www.gnu.org/licenses/>.
'''
NEST Topology Module
EXPERIMENTAL example of 3d layer.
3d layers are currently not supported, use at your own risk!
Hans Ekkehard Plesser, UMB
'''
import nest
import pylab
import random
import nest.topology as topo
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
pylab.ion()
nest.ResetKernel()
# generate list of 100 (x,y,z) pairs
pos = [[random.uniform(-0.5,0.5), random.uniform(-0.5,0.5), random.uniform(-0.5,0.5)]
for j in range(10000)]
l1 = topo.CreateLayer({'extent': [1.5, 1.5, 1.5], # must specify 3d extent AND center
'center': [0., 0., 0.],
'positions': pos,
'elements': 'iaf_neuron'})
# visualize
#xext, yext = nest.GetStatus(l1, 'topology')[0]['extent']
#xctr, yctr = nest.GetStatus(l1, 'topology')[0]['center']
# extract position information, transpose to list of x, y and z positions
xpos, ypos, zpos = zip(*topo.GetPosition(nest.GetChildren(l1)[0]))
fig = plt.figure()
ax = fig.add_subplot(111, projection='3d')
ax.scatter(xpos, ypos, zpos, s=15, facecolor='b', edgecolor='none')
# Gaussian connections in full volume [-0.75,0.75]**3
topo.ConnectLayers(l1, l1,
{'connection_type': 'divergent', 'allow_autapses': False,
'mask': {'volume': {'lower_left': [-0.75,-0.75,-0.75], 'upper_right': [0.75,0.75,0.75]}},
'kernel':{'gaussian': {'p_center': 1., 'sigma': 0.25}}})
# show connections from center element
# sender shown in red, targets in green
ctr=topo.FindCenterElement(l1)
xtgt, ytgt, ztgt = zip(*topo.GetTargetPositions(ctr,l1)[0])
xctr, yctr, zctr = topo.GetPosition(ctr)[0]
ax.scatter([xctr],[yctr],[zctr],s=40, facecolor='r', edgecolor='none')
ax.scatter(xtgt,ytgt,ztgt,s=40, facecolor='g', edgecolor='g')
tgts=topo.GetTargetNodes(ctr,l1)[0]
d=topo.Distance(ctr,tgts)
plt.figure()
plt.hist(d,100)
#plt.show()
|
gpl-2.0
|
mediagit2016/workcamp-maschinelles-lernen-grundlagen
|
17-12-11-workcamp-ml/mglearn/plot_scaling.py
|
4
|
1505
|
import matplotlib.pyplot as plt
import numpy as np
from sklearn.datasets import make_blobs
from sklearn.preprocessing import (StandardScaler, MinMaxScaler, Normalizer,
RobustScaler)
from .plot_helpers import cm2
def plot_scaling():
X, y = make_blobs(n_samples=50, centers=2, random_state=4, cluster_std=1)
X += 3
plt.figure(figsize=(15, 8))
main_ax = plt.subplot2grid((2, 4), (0, 0), rowspan=2, colspan=2)
main_ax.scatter(X[:, 0], X[:, 1], c=y, cmap=cm2, s=60)
maxx = np.abs(X[:, 0]).max()
maxy = np.abs(X[:, 1]).max()
main_ax.set_xlim(-maxx + 1, maxx + 1)
main_ax.set_ylim(-maxy + 1, maxy + 1)
main_ax.set_title("Original Data")
other_axes = [plt.subplot2grid((2, 4), (i, j))
for j in range(2, 4) for i in range(2)]
for ax, scaler in zip(other_axes, [StandardScaler(), RobustScaler(),
MinMaxScaler(), Normalizer(norm='l2')]):
X_ = scaler.fit_transform(X)
ax.scatter(X_[:, 0], X_[:, 1], c=y, cmap=cm2, s=60)
ax.set_xlim(-2, 2)
ax.set_ylim(-2, 2)
ax.set_title(type(scaler).__name__)
other_axes.append(main_ax)
for ax in other_axes:
ax.spines['left'].set_position('center')
ax.spines['right'].set_color('none')
ax.spines['bottom'].set_position('center')
ax.spines['top'].set_color('none')
ax.xaxis.set_ticks_position('bottom')
ax.yaxis.set_ticks_position('left')
|
gpl-3.0
|
jpo/healthcareai-py
|
healthcareai/tests/test_model_eval.py
|
4
|
2285
|
import unittest
import numpy as np
import pandas as pd
import sklearn
import healthcareai.common.model_eval as hcai_eval
from healthcareai.common.healthcareai_error import HealthcareAIError
import healthcareai.tests.helpers as test_helpers
class TestROC(unittest.TestCase):
def test_roc(self):
df = pd.DataFrame({'a': np.repeat(np.arange(.1, 1.1, .1), 10)})
b = np.repeat(0, 100)
b[[56, 62, 63, 68, 74, 75, 76, 81, 82, 84, 85, 87, 88] + list(range(90, 100))] = 1
df['b'] = b
# ROC_AUC
result = hcai_eval.compute_roc(df['b'], df['a'])
self.assertAlmostEqual(round(result['roc_auc'], 4), 0.9433)
self.assertAlmostEqual(round(result['best_true_positive_rate'], 4), 0.9565)
self.assertAlmostEqual(round(result['best_false_positive_rate'], 4), 0.2338)
class TestPR(unittest.TestCase):
def test_pr(self):
df = pd.DataFrame({'a': np.repeat(np.arange(.1, 1.1, .1), 10)})
b = np.repeat(0, 100)
b[[56, 62, 63, 68, 74, 75, 76, 81, 82, 84, 85, 87, 88] + list(range(90, 100))] = 1
df['b'] = b
# PR_AUC
out = hcai_eval.compute_pr(df['b'], df['a'])
test_helpers.assertBetween(self, 0.8, 0.87, out['pr_auc'])
self.assertAlmostEqual(round(out['best_precision'], 4), 0.8000)
self.assertAlmostEqual(round(out['best_recall'], 4), 0.6957)
class TestPlotRandomForestFeatureImportance(unittest.TestCase):
def test_raises_error_on_non_rf_estimator(self):
linear_regressor = sklearn.linear_model.LinearRegression()
self.assertRaises(
HealthcareAIError,
hcai_eval.plot_random_forest_feature_importance,
linear_regressor,
None,
None,
save=False)
class TestValidation(unittest.TestCase):
def test_same_length_predictions_and_labels(self):
self.assertTrue(hcai_eval._validate_predictions_and_labels_are_equal_length([0, 1, 2], [1, 2, 3]))
def test_different_length_predictions_and_labels_raises_error(self):
self.assertRaises(
HealthcareAIError,
hcai_eval._validate_predictions_and_labels_are_equal_length,
[0, 1, 2],
[0, 1, 2, 3, 4])
if __name__ == '__main__':
unittest.main()
|
mit
|
googleinterns/ooxml-comparator
|
dataparser/dataParser.py
|
1
|
4678
|
import os
import json
import xmltodict
import zipfile
import pandas as pd
from pathlib import Path
import pytest
import sys
import filecmp
class OoxmlFile:
"""
OOXML file class object to load various XMLs from the zip of the OOXML file and
saving them in json format in the coresponding path in the generate folder.
"""
def __init__(self,filename):
self.filename = filename
def load_data(self):
"""
This function loads the OOXML files, Unzips then and saves their data
in a dictionary format for all the XMLs present in the code.
"""
archive = zipfile.ZipFile(self.filename, 'r')
self.xml_files = {}
self.rel_files = {}
self.other_data_files = {}
for zip_obj in archive.infolist():
extension = str(zip_obj.filename).strip().split('.')[-1]
if extension=='xml':
with archive.open(zip_obj.filename) as myfile:
data = myfile.read()
self.xml_files[zip_obj.filename]= xmltodict.parse(data)
elif extension=='rels':
with archive.open(zip_obj.filename) as myfile:
data = myfile.read()
self.rel_files[zip_obj.filename]=xmltodict.parse(data)
else:
self.other_data_files[zip_obj.filename]=zip_obj.filename
def save_json_data(self,out_path):
"""
This saves the loaded XML file's dictionaries into the path provides
in JSON format.
@Param out_path: path to save all the XML files by creating a folder
for the XML file.
"""
path_name_decomp = self.filename.split('/')
dir_name = self.filename[:-len(path_name_decomp[-1])]
base_name = path_name_decomp[-1].replace('.','_')
dir_name = out_path
dir_name = dir_name+base_name
if not os.path.exists(dir_name):
os.mkdir(dir_name)
for xml_file in list(self.xml_files.keys()):
json_file_name = dir_name+'/'+xml_file.replace('/','_')+'.json'
with open(json_file_name,'w') as outfile:
jData = json.dumps(self.xml_files[xml_file],indent=4)
outfile.write(json.dumps(self.xml_files[xml_file],indent=4))
for xml_file in list(self.rel_files.keys()):
json_file_name = dir_name+'/'+xml_file.replace('/','_')+'.json'
with open(json_file_name,'w') as outfile:
jData = json.dumps(self.rel_files[xml_file],indent=4)
outfile.write(jData)
def is_valid_path(path):
"""
Function to check if a path is valid
@Param path: Path to be checked for validity or existence.
"""
isDirectory = os.path.isdir(path)
if not isDirectory:
return False
else:
return True
def prepare_folder(path):
"""
Function to create a new generate folder and
create all the json data for all files in the generated folder.
@param path: path to the folder whose all files has to be converted to json data
"""
path = path.strip()
if path[-1]=='/':
gen_path = path + 'generated/'
else:
gen_path = path + '/generated/'
converted_files = 0
failed_files = 0
if not os.path.exists(gen_path):
os.mkdir(gen_path)
exec_logger = open(gen_path+'exec_log.txt','w+')
for cur_path in Path(path).rglob('*.pptx'):
try:
exec_logger.write("Processing : "+str(cur_path)+"\n")
cur_file = OoxmlFile(str(cur_path))
cur_file.load_data()
cur_file.save_json_data(gen_path)
converted_files += 1
except:
failed_files += 1
exec_logger.write("Failed to convert : "+str(cur_path)+"\n")
for cur_path in Path(path).rglob('*.xlsx'):
try:
exec_logger.write("Processing : "+str(cur_path)+"\n")
cur_file = OoxmlFile(str(cur_path))
cur_file.load_data()
cur_file.save_json_data(gen_path)
converted_files += 1
except:
failed_files += 1
exec_logger.write("Failed to convert : "+str(cur_path)+"\n")
for cur_path in Path(path).rglob('*.docx'):
try:
exec_logger.write("Processing : "+str(cur_path)+"\n")
cur_file = OoxmlFile(str(cur_path))
cur_file.load_data()
cur_file.save_json_data(gen_path)
converted_files += 1
except:
failed_files += 1
exec_logger.write("Failed to convert : "+str(cur_path)+"\n")
exec_logger.write("Number of files processed :"+str(converted_files)+"\n")
exec_logger.write("Number of conversion Failed :"+str(failed_files)+"\n")
exec_logger.write("All files in the path converted Successfully and the Json data is stored in the generated folder\n")
exec_logger.write("Location saved at : "+str(gen_path))
exec_logger.close()
if __name__ == "__main__":
cur_path = sys.argv[-1].strip()
status_logger = open('status.txt','w+')
if not is_valid_path(cur_path):
status_logger.write("Please enter a valid path\n")
else:
try:
prepare_folder(cur_path)
status_logger.write("File conversions Successful")
except:
status_logger.wrte("File conversions failed.")
status_logger.close()
|
apache-2.0
|
chenyyx/scikit-learn-doc-zh
|
examples/zh/linear_model/plot_logistic_l1_l2_sparsity.py
|
384
|
2601
|
"""
==============================================
L1 Penalty and Sparsity in Logistic Regression
==============================================
Comparison of the sparsity (percentage of zero coefficients) of solutions when
L1 and L2 penalty are used for different values of C. We can see that large
values of C give more freedom to the model. Conversely, smaller values of C
constrain the model more. In the L1 penalty case, this leads to sparser
solutions.
We classify 8x8 images of digits into two classes: 0-4 against 5-9.
The visualization shows coefficients of the models for varying C.
"""
print(__doc__)
# Authors: Alexandre Gramfort <[email protected]>
# Mathieu Blondel <[email protected]>
# Andreas Mueller <[email protected]>
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
from sklearn.linear_model import LogisticRegression
from sklearn import datasets
from sklearn.preprocessing import StandardScaler
digits = datasets.load_digits()
X, y = digits.data, digits.target
X = StandardScaler().fit_transform(X)
# classify small against large digits
y = (y > 4).astype(np.int)
# Set regularization parameter
for i, C in enumerate((100, 1, 0.01)):
# turn down tolerance for short training time
clf_l1_LR = LogisticRegression(C=C, penalty='l1', tol=0.01)
clf_l2_LR = LogisticRegression(C=C, penalty='l2', tol=0.01)
clf_l1_LR.fit(X, y)
clf_l2_LR.fit(X, y)
coef_l1_LR = clf_l1_LR.coef_.ravel()
coef_l2_LR = clf_l2_LR.coef_.ravel()
# coef_l1_LR contains zeros due to the
# L1 sparsity inducing norm
sparsity_l1_LR = np.mean(coef_l1_LR == 0) * 100
sparsity_l2_LR = np.mean(coef_l2_LR == 0) * 100
print("C=%.2f" % C)
print("Sparsity with L1 penalty: %.2f%%" % sparsity_l1_LR)
print("score with L1 penalty: %.4f" % clf_l1_LR.score(X, y))
print("Sparsity with L2 penalty: %.2f%%" % sparsity_l2_LR)
print("score with L2 penalty: %.4f" % clf_l2_LR.score(X, y))
l1_plot = plt.subplot(3, 2, 2 * i + 1)
l2_plot = plt.subplot(3, 2, 2 * (i + 1))
if i == 0:
l1_plot.set_title("L1 penalty")
l2_plot.set_title("L2 penalty")
l1_plot.imshow(np.abs(coef_l1_LR.reshape(8, 8)), interpolation='nearest',
cmap='binary', vmax=1, vmin=0)
l2_plot.imshow(np.abs(coef_l2_LR.reshape(8, 8)), interpolation='nearest',
cmap='binary', vmax=1, vmin=0)
plt.text(-8, 3, "C = %.2f" % C)
l1_plot.set_xticks(())
l1_plot.set_yticks(())
l2_plot.set_xticks(())
l2_plot.set_yticks(())
plt.show()
|
gpl-3.0
|
fanmuzhi/UFT
|
test/embedding_in_qt4.py
|
7
|
4149
|
#!/usr/bin/env python
# embedding_in_qt4.py --- Simple Qt4 application embedding matplotlib canvases
#
# Copyright (C) 2005 Florent Rougon
# 2006 Darren Dale
#
# This file is an example program for matplotlib. It may be used and
# modified with no restriction; raw copies as well as modified versions
# may be distributed without limitation.
from __future__ import unicode_literals
import sys
import os
import random
from matplotlib.backends import qt4_compat
use_pyside = qt4_compat.QT_API == qt4_compat.QT_API_PYSIDE
if use_pyside:
from PySide import QtGui, QtCore
else:
from PyQt4 import QtGui, QtCore
from numpy import arange, sin, pi
from matplotlib.backends.backend_qt4agg import FigureCanvasQTAgg as FigureCanvas
from matplotlib.figure import Figure
progname = os.path.basename(sys.argv[0])
progversion = "0.1"
class MyMplCanvas(FigureCanvas):
"""Ultimately, this is a QWidget (as well as a FigureCanvasAgg, etc.)."""
def __init__(self, parent=None, width=5, height=4, dpi=100):
fig = Figure(figsize=(width, height), dpi=dpi)
self.axes = fig.add_subplot(111)
# We want the axes cleared every time plot() is called
self.axes.hold(False)
self.compute_initial_figure()
#
FigureCanvas.__init__(self, fig)
self.setParent(parent)
FigureCanvas.setSizePolicy(self,
QtGui.QSizePolicy.Expanding,
QtGui.QSizePolicy.Expanding)
FigureCanvas.updateGeometry(self)
def compute_initial_figure(self):
pass
class MyStaticMplCanvas(MyMplCanvas):
"""Simple canvas with a sine plot."""
def compute_initial_figure(self):
t = arange(0.0, 3.0, 0.01)
s = sin(2*pi*t)
self.axes.plot(t, s)
class MyDynamicMplCanvas(MyMplCanvas):
"""A canvas that updates itself every second with a new plot."""
def __init__(self, *args, **kwargs):
MyMplCanvas.__init__(self, *args, **kwargs)
timer = QtCore.QTimer(self)
timer.timeout.connect(self.update_figure)
timer.start(1000)
def compute_initial_figure(self):
self.axes.plot([0, 1, 2, 3], [1, 2, 0, 4], 'r')
def update_figure(self):
# Build a list of 4 random integers between 0 and 10 (both inclusive)
l = [random.randint(0, 10) for i in range(4)]
self.axes.plot([0, 1, 2, 3], l, 'r')
self.draw()
class ApplicationWindow(QtGui.QMainWindow):
def __init__(self):
QtGui.QMainWindow.__init__(self)
self.setAttribute(QtCore.Qt.WA_DeleteOnClose)
self.setWindowTitle("application main window")
self.file_menu = QtGui.QMenu('&File', self)
self.file_menu.addAction('&Quit', self.fileQuit,
QtCore.Qt.CTRL + QtCore.Qt.Key_Q)
self.menuBar().addMenu(self.file_menu)
self.help_menu = QtGui.QMenu('&Help', self)
self.menuBar().addSeparator()
self.menuBar().addMenu(self.help_menu)
self.help_menu.addAction('&About', self.about)
self.main_widget = QtGui.QWidget(self)
l = QtGui.QVBoxLayout(self.main_widget)
sc = MyStaticMplCanvas(self.main_widget, width=5, height=4, dpi=100)
dc = MyDynamicMplCanvas(self.main_widget, width=5, height=4, dpi=100)
l.addWidget(sc)
l.addWidget(dc)
self.main_widget.setFocus()
self.setCentralWidget(self.main_widget)
self.statusBar().showMessage("All hail matplotlib!", 2000)
def fileQuit(self):
self.close()
def closeEvent(self, ce):
self.fileQuit()
def about(self):
QtGui.QMessageBox.about(self, "About",
"""embedding_in_qt4.py example
Copyright 2005 Florent Rougon, 2006 Darren Dale
This program is a simple example of a Qt4 application embedding matplotlib
canvases.
It may be used and modified with no restriction; raw copies as well as
modified versions may be distributed without limitation."""
)
qApp = QtGui.QApplication(sys.argv)
aw = ApplicationWindow()
aw.setWindowTitle("%s" % progname)
aw.show()
sys.exit(qApp.exec_())
#qApp.exec_()
|
gpl-3.0
|
zdary/intellij-community
|
python/helpers/pydev/_pydevd_bundle/pydevd_comm.py
|
5
|
82984
|
''' pydevd - a debugging daemon
This is the daemon you launch for python remote debugging.
Protocol:
each command has a format:
id\tsequence-num\ttext
id: protocol command number
sequence-num: each request has a sequence number. Sequence numbers
originating at the debugger are odd, sequence numbers originating
at the daemon are even. Every response uses the same sequence number
as the request.
payload: it is protocol dependent. When response is a complex structure, it
is returned as XML. Each attribute value is urlencoded, and then the whole
payload is urlencoded again to prevent stray characters corrupting protocol/xml encodings
Commands:
NUMBER NAME FROM* ARGUMENTS RESPONSE NOTE
100 series: program execution
101 RUN JAVA - -
102 LIST_THREADS JAVA RETURN with XML listing of all threads
103 THREAD_CREATE PYDB - XML with thread information
104 THREAD_KILL JAVA id (or * to exit) kills the thread
PYDB id nofies JAVA that thread was killed
105 THREAD_SUSPEND JAVA XML of the stack, suspends the thread
reason for suspension
PYDB id notifies JAVA that thread was suspended
106 CMD_THREAD_RUN JAVA id resume the thread
PYDB id \t reason notifies JAVA that thread was resumed
107 STEP_INTO JAVA thread_id
108 STEP_OVER JAVA thread_id
109 STEP_RETURN JAVA thread_id
110 GET_VARIABLE JAVA thread_id \t frame_id \t GET_VARIABLE with XML of var content
FRAME|GLOBAL \t attributes*
111 SET_BREAK JAVA file/line of the breakpoint
112 REMOVE_BREAK JAVA file/line of the return
113 CMD_EVALUATE_EXPRESSION JAVA expression result of evaluating the expression
114 CMD_GET_FRAME JAVA request for frame contents
115 CMD_EXEC_EXPRESSION JAVA
116 CMD_WRITE_TO_CONSOLE PYDB
117 CMD_CHANGE_VARIABLE
118 CMD_RUN_TO_LINE
119 CMD_RELOAD_CODE
120 CMD_GET_COMPLETIONS JAVA
200 CMD_REDIRECT_OUTPUT JAVA streams to redirect as string -
'STDOUT' (redirect only STDOUT)
'STDERR' (redirect only STDERR)
'STDOUT STDERR' (redirect both streams)
500 series diagnostics/ok
501 VERSION either Version string (1.0) Currently just used at startup
502 RETURN either Depends on caller -
900 series: errors
901 ERROR either - This is reserved for unexpected errors.
* JAVA - remote debugger, the java end
* PYDB - pydevd, the python end
'''
import itertools
from _pydev_bundle.pydev_imports import _queue
from _pydev_imps._pydev_saved_modules import thread
from _pydev_imps._pydev_saved_modules import threading
from _pydev_imps._pydev_saved_modules import time
from _pydev_imps._pydev_saved_modules import socket
from socket import socket, AF_INET, SOCK_STREAM, SHUT_RD, SHUT_WR, SOL_SOCKET, SO_REUSEADDR, SHUT_RDWR, timeout
from _pydevd_bundle.pydevd_constants import DebugInfoHolder, get_thread_id, IS_JYTHON, IS_PY2, IS_PY3K, \
IS_PY36_OR_GREATER, STATE_RUN, dict_keys, ASYNC_EVAL_TIMEOUT_SEC, IS_IRONPYTHON, GlobalDebuggerHolder, \
get_global_debugger, GetGlobalDebugger, set_global_debugger # Keep for backward compatibility @UnusedImport
from _pydev_bundle.pydev_override import overrides
import json
import weakref
try:
from urllib import quote_plus, unquote, unquote_plus
except:
from urllib.parse import quote_plus, unquote, unquote_plus #@Reimport @UnresolvedImport
if IS_IRONPYTHON:
# redefine `unquote` for IronPython, since we use it only for logging messages, but it leads to SOF with IronPython
def unquote(s):
return s
from _pydevd_bundle import pydevd_console_integration
from _pydevd_bundle import pydevd_vars
import pydevd_tracing
from _pydevd_bundle import pydevd_xml
from _pydevd_bundle import pydevd_vm_type
from _pydevd_bundle import pydevd_bytecode_utils
from pydevd_file_utils import get_abs_path_real_path_and_base_from_frame, norm_file_to_client, is_real_file
import pydevd_file_utils
import os
import sys
import inspect
import traceback
from _pydevd_bundle.pydevd_utils import quote_smart as quote, compare_object_attrs_key, to_string, \
get_non_pydevd_threads, is_pandas_container, is_numpy_container
from _pydev_bundle.pydev_is_thread_alive import is_thread_alive
from _pydev_bundle import pydev_log
from _pydev_bundle import _pydev_completer
from pydevd_tracing import get_exception_traceback_str
from _pydevd_bundle import pydevd_console
from _pydev_bundle.pydev_monkey import disable_trace_thread_modules, enable_trace_thread_modules
try:
import cStringIO as StringIO #may not always be available @UnusedImport
except:
try:
import StringIO #@Reimport
except:
import io as StringIO
from _pydevd_bundle.pydevd_dont_trace_files import DONT_TRACE, PYDEV_FILE
get_file_type = DONT_TRACE.get
# CMD_XXX constants imported for backward compatibility
from _pydevd_bundle.pydevd_comm_constants import (
ID_TO_MEANING, CMD_RUN, CMD_LIST_THREADS, CMD_THREAD_CREATE, CMD_THREAD_KILL,
CMD_THREAD_SUSPEND, CMD_THREAD_RUN, CMD_STEP_INTO, CMD_STEP_OVER, CMD_STEP_RETURN, CMD_GET_VARIABLE,
CMD_SET_BREAK, CMD_REMOVE_BREAK, CMD_EVALUATE_EXPRESSION, CMD_GET_FRAME,
CMD_EXEC_EXPRESSION, CMD_WRITE_TO_CONSOLE, CMD_CHANGE_VARIABLE, CMD_RUN_TO_LINE,
CMD_RELOAD_CODE, CMD_GET_COMPLETIONS, CMD_CONSOLE_EXEC, CMD_ADD_EXCEPTION_BREAK,
CMD_REMOVE_EXCEPTION_BREAK, CMD_LOAD_SOURCE, CMD_ADD_DJANGO_EXCEPTION_BREAK,
CMD_REMOVE_DJANGO_EXCEPTION_BREAK, CMD_SET_NEXT_STATEMENT, CMD_SMART_STEP_INTO,
CMD_EXIT, CMD_SIGNATURE_CALL_TRACE, CMD_SET_PY_EXCEPTION, CMD_GET_FILE_CONTENTS,
CMD_SET_PROPERTY_TRACE, CMD_EVALUATE_CONSOLE_EXPRESSION, CMD_RUN_CUSTOM_OPERATION,
CMD_GET_BREAKPOINT_EXCEPTION, CMD_STEP_CAUGHT_EXCEPTION, CMD_SEND_CURR_EXCEPTION_TRACE,
CMD_SEND_CURR_EXCEPTION_TRACE_PROCEEDED, CMD_IGNORE_THROWN_EXCEPTION_AT, CMD_ENABLE_DONT_TRACE,
CMD_SHOW_CONSOLE, CMD_GET_ARRAY, CMD_STEP_INTO_MY_CODE, CMD_GET_CONCURRENCY_EVENT,
CMD_SHOW_RETURN_VALUES, CMD_SET_UNIT_TEST_DEBUGGING_MODE, CMD_INPUT_REQUESTED, CMD_GET_DESCRIPTION, CMD_PROCESS_CREATED,
CMD_SHOW_CYTHON_WARNING, CMD_LOAD_FULL_VALUE, CMD_GET_THREAD_STACK, CMD_THREAD_DUMP_TO_STDERR,
CMD_STOP_ON_START, CMD_GET_EXCEPTION_DETAILS, CMD_PROCESS_CREATED_MSG_RECEIVED, CMD_PYDEVD_JSON_CONFIG,
CMD_THREAD_SUSPEND_SINGLE_NOTIFICATION, CMD_THREAD_RESUME_SINGLE_NOTIFICATION,
CMD_REDIRECT_OUTPUT, CMD_GET_NEXT_STATEMENT_TARGETS, CMD_SET_PROJECT_ROOTS, CMD_VERSION,
CMD_RETURN, CMD_SET_PROTOCOL, CMD_ERROR, CMD_GET_SMART_STEP_INTO_VARIANTS, CMD_DATAVIEWER_ACTION,)
MAX_IO_MSG_SIZE = 1000 #if the io is too big, we'll not send all (could make the debugger too non-responsive)
#this number can be changed if there's need to do so
VERSION_STRING = "@@BUILD_NUMBER@@"
from _pydev_bundle._pydev_filesystem_encoding import getfilesystemencoding
file_system_encoding = getfilesystemencoding()
filesystem_encoding_is_utf8 = file_system_encoding.lower() in ('utf-8', 'utf_8', 'utf8')
class CommunicationRole(object):
"""The class that contains the constants of roles that `PyDB` can play in
the communication with the IDE.
"""
CLIENT = 0
SERVER = 1
#--------------------------------------------------------------------------------------------------- UTILITIES
#=======================================================================================================================
# pydevd_log
#=======================================================================================================================
def pydevd_log(level, *args):
""" levels are:
0 most serious warnings/errors
1 warnings/significant events
2 informational trace
"""
if level <= DebugInfoHolder.DEBUG_TRACE_LEVEL:
#yes, we can have errors printing if the console of the program has been finished (and we're still trying to print something)
try:
sys.stderr.write('%s\n' % (args,))
except:
pass
#------------------------------------------------------------------- ACTUAL COMM
#=======================================================================================================================
# PyDBDaemonThread
#=======================================================================================================================
class PyDBDaemonThread(threading.Thread):
created_pydb_daemon_threads = {}
def __init__(self, target_and_args=None):
'''
:param target_and_args:
tuple(func, args, kwargs) if this should be a function and args to run.
-- Note: use through run_as_pydevd_daemon_thread().
'''
threading.Thread.__init__(self)
self.killReceived = False
mark_as_pydevd_daemon_thread(self)
self._target_and_args = target_and_args
def run(self):
created_pydb_daemon = self.created_pydb_daemon_threads
created_pydb_daemon[self] = 1
try:
try:
if IS_JYTHON and not isinstance(threading.currentThread(), threading._MainThread):
# we shouldn't update sys.modules for the main thread, cause it leads to the second importing 'threading'
# module, and the new instance of main thread is created
import org.python.core as PyCore #@UnresolvedImport
ss = PyCore.PySystemState()
# Note: Py.setSystemState() affects only the current thread.
PyCore.Py.setSystemState(ss)
self._stop_trace()
self._warn_pydevd_thread_is_traced()
self._on_run()
except:
if sys is not None and traceback is not None:
traceback.print_exc()
finally:
del created_pydb_daemon[self]
def _on_run(self):
if self._target_and_args is not None:
target, args, kwargs = self._target_and_args
target(*args, **kwargs)
else:
raise NotImplementedError('Should be reimplemented by: %s' % self.__class__)
def do_kill_pydev_thread(self):
self.killReceived = True
def _stop_trace(self):
if self.pydev_do_not_trace:
pydevd_tracing.SetTrace(None) # no debugging on this thread
def _warn_pydevd_thread_is_traced(self):
if self.pydev_do_not_trace and sys.gettrace():
pydevd_log(1, "The debugger thread '%s' is traced which may lead to debugging performance issues." % self.__class__.__name__)
def mark_as_pydevd_daemon_thread(thread):
thread.pydev_do_not_trace = True
thread.is_pydev_daemon_thread = True
thread.daemon = True
def run_as_pydevd_daemon_thread(func, *args, **kwargs):
'''
Runs a function as a pydevd daemon thread (without any tracing in place).
'''
t = PyDBDaemonThread(target_and_args=(func, args, kwargs))
t.name = '%s (pydevd daemon thread)' % (func.__name__,)
t.start()
return t
#=======================================================================================================================
# ReaderThread
#=======================================================================================================================
class ReaderThread(PyDBDaemonThread):
""" reader thread reads and dispatches commands in an infinite loop """
def __init__(self, sock):
PyDBDaemonThread.__init__(self)
self.sock = sock
self.setName("pydevd.Reader")
from _pydevd_bundle.pydevd_process_net_command import process_net_command
self.process_net_command = process_net_command
self.global_debugger_holder = GlobalDebuggerHolder
def do_kill_pydev_thread(self):
#We must close the socket so that it doesn't stay halted there.
self.killReceived = True
try:
self.sock.shutdown(SHUT_RD) #shutdown the socket for read
except:
#just ignore that
pass
@overrides(PyDBDaemonThread._on_run)
def _on_run(self):
read_buffer = ""
try:
while not self.killReceived:
try:
r = self.sock.recv(1024)
except:
if not self.killReceived:
traceback.print_exc()
self.handle_except()
return #Finished communication.
#Note: the java backend is always expected to pass utf-8 encoded strings. We now work with unicode
#internally and thus, we may need to convert to the actual encoding where needed (i.e.: filenames
#on python 2 may need to be converted to the filesystem encoding).
if hasattr(r, 'decode'):
r = r.decode('utf-8')
read_buffer += r
if DebugInfoHolder.DEBUG_RECORD_SOCKET_READS:
sys.stderr.write(u'debugger: received >>%s<<\n' % (read_buffer,))
sys.stderr.flush()
if len(read_buffer) == 0:
self.handle_except()
break
while read_buffer.find(u'\n') != -1:
command, read_buffer = read_buffer.split(u'\n', 1)
args = command.split(u'\t', 2)
try:
cmd_id = int(args[0])
pydev_log.debug('Received command: %s %s\n' % (ID_TO_MEANING.get(str(cmd_id), '???'), command,))
self.process_command(cmd_id, int(args[1]), args[2])
except:
traceback.print_exc()
sys.stderr.write("Can't process net command: %s\n" % command)
sys.stderr.flush()
except:
traceback.print_exc()
self.handle_except()
def handle_except(self):
self.global_debugger_holder.global_dbg.finish_debugging_session()
def process_command(self, cmd_id, seq, text):
self.process_net_command(self.global_debugger_holder.global_dbg, cmd_id, seq, text)
#----------------------------------------------------------------------------------- SOCKET UTILITIES - WRITER
#=======================================================================================================================
# WriterThread
#=======================================================================================================================
class WriterThread(PyDBDaemonThread):
""" writer thread writes out the commands in an infinite loop """
def __init__(self, sock):
PyDBDaemonThread.__init__(self)
self.sock = sock
self.setName("pydevd.Writer")
self.cmdQueue = _queue.Queue()
if pydevd_vm_type.get_vm_type() == 'python':
self.timeout = 0
else:
self.timeout = 0.1
def add_command(self, cmd):
""" cmd is NetCommand """
if not self.killReceived: #we don't take new data after everybody die
self.cmdQueue.put(cmd)
@overrides(PyDBDaemonThread._on_run)
def _on_run(self):
""" just loop and write responses """
try:
while True:
try:
try:
cmd = self.cmdQueue.get(1, 0.1)
except _queue.Empty:
if self.killReceived:
try:
self.sock.shutdown(SHUT_WR)
self.sock.close()
except:
pass
return #break if queue is empty and killReceived
else:
continue
except:
#pydevd_log(0, 'Finishing debug communication...(1)')
#when liberating the thread here, we could have errors because we were shutting down
#but the thread was still not liberated
return
cmd.send(self.sock)
if cmd.id == CMD_EXIT:
break
if time is None:
break #interpreter shutdown
time.sleep(self.timeout)
except Exception:
GlobalDebuggerHolder.global_dbg.finish_debugging_session()
if DebugInfoHolder.DEBUG_TRACE_LEVEL >= 0:
traceback.print_exc()
def empty(self):
return self.cmdQueue.empty()
#--------------------------------------------------- CREATING THE SOCKET THREADS
#=======================================================================================================================
# start_server
#=======================================================================================================================
def start_server(port):
""" binds to a port, waits for the debugger to connect """
s = socket(AF_INET, SOCK_STREAM)
s.settimeout(None)
try:
from socket import SO_REUSEPORT
s.setsockopt(SOL_SOCKET, SO_REUSEPORT, 1)
except ImportError:
s.setsockopt(SOL_SOCKET, SO_REUSEADDR, 1)
s.bind(('', port))
pydevd_log(1, "Bound to port ", str(port))
try:
s.listen(1)
newSock, _addr = s.accept()
pydevd_log(1, "Connection accepted")
# closing server socket is not necessary but we don't need it
s.shutdown(SHUT_RDWR)
s.close()
return newSock
except:
sys.stderr.write("Could not bind to port: %s\n" % (port,))
sys.stderr.flush()
traceback.print_exc()
#=======================================================================================================================
# start_client
#=======================================================================================================================
def start_client(host, port):
""" connects to a host/port """
pydevd_log(1, "Connecting to ", host, ":", str(port))
s = socket(AF_INET, SOCK_STREAM)
# Set TCP keepalive on an open socket.
# It activates after 1 second (TCP_KEEPIDLE,) of idleness,
# then sends a keepalive ping once every 3 seconds (TCP_KEEPINTVL),
# and closes the connection after 5 failed ping (TCP_KEEPCNT), or 15 seconds
try:
from socket import IPPROTO_TCP, SO_KEEPALIVE, TCP_KEEPIDLE, TCP_KEEPINTVL, TCP_KEEPCNT
s.setsockopt(SOL_SOCKET, SO_KEEPALIVE, 1)
s.setsockopt(IPPROTO_TCP, TCP_KEEPIDLE, 1)
s.setsockopt(IPPROTO_TCP, TCP_KEEPINTVL, 3)
s.setsockopt(IPPROTO_TCP, TCP_KEEPCNT, 5)
except ImportError:
pass # May not be available everywhere.
try:
s.settimeout(10) # 10 seconds timeout
s.connect((host, port))
s.settimeout(None) # no timeout after connected
pydevd_log(1, "Connected.")
return s
except:
sys.stderr.write("Could not connect to %s: %s\n" % (host, port))
sys.stderr.flush()
traceback.print_exc()
raise
#------------------------------------------------------------------------------------ MANY COMMUNICATION STUFF
#=======================================================================================================================
# NetCommand
#=======================================================================================================================
class NetCommand:
""" Commands received/sent over the network.
Command can represent command received from the debugger,
or one to be sent by daemon.
"""
next_seq = 0 # sequence numbers
# Protocol where each line is a new message (text is quoted to prevent new lines).
QUOTED_LINE_PROTOCOL = 'quoted-line'
# Uses http protocol to provide a new message.
# i.e.: Content-Length:xxx\r\n\r\npayload
HTTP_PROTOCOL = 'http'
protocol = QUOTED_LINE_PROTOCOL
_showing_debug_info = 0
_show_debug_info_lock = threading.RLock()
def __init__(self, cmd_id, seq, text):
"""
If sequence is 0, new sequence will be generated (otherwise, this was the response
to a command from the client).
"""
self.id = cmd_id
if seq == 0:
NetCommand.next_seq += 2
seq = NetCommand.next_seq
self.seq = seq
if IS_PY2:
if isinstance(text, unicode):
text = text.encode('utf-8')
else:
assert isinstance(text, str)
else:
assert isinstance(text, str)
if DebugInfoHolder.DEBUG_TRACE_LEVEL >= 1:
self._show_debug_info(cmd_id, seq, text)
if self.protocol == self.HTTP_PROTOCOL:
msg = '%s\t%s\t%s\n' % (cmd_id, seq, text)
else:
encoded = quote(to_string(text), '/<>_=" \t')
msg = '%s\t%s\t%s\n' % (cmd_id, seq, encoded)
if IS_PY2:
assert isinstance(msg, str) # i.e.: bytes
as_bytes = msg
else:
if isinstance(msg, str):
msg = msg.encode('utf-8')
assert isinstance(msg, bytes)
as_bytes = msg
self._as_bytes = as_bytes
def send(self, sock):
as_bytes = self._as_bytes
if self.protocol == self.HTTP_PROTOCOL:
sock.sendall(('Content-Length: %s\r\n\r\n' % len(as_bytes)).encode('ascii'))
sock.sendall(as_bytes)
@classmethod
def _show_debug_info(cls, cmd_id, seq, text):
with cls._show_debug_info_lock:
# Only one thread each time (rlock).
if cls._showing_debug_info:
# avoid recursing in the same thread (just printing could create
# a new command when redirecting output).
return
cls._showing_debug_info += 1
try:
out_message = 'sending cmd --> '
out_message += "%20s" % ID_TO_MEANING.get(str(cmd_id), 'UNKNOWN')
out_message += ' '
out_message += text.replace('\n', ' ')
try:
sys.stderr.write('%s\n' % (out_message,))
except:
pass
finally:
cls._showing_debug_info -= 1
#=======================================================================================================================
# NetCommandFactory
#=======================================================================================================================
class NetCommandFactory:
def _thread_to_xml(self, thread):
""" thread information as XML """
name = pydevd_xml.make_valid_xml_value(thread.getName())
cmdText = '<thread name="%s" id="%s" />' % (quote(name), get_thread_id(thread))
return cmdText
def make_error_message(self, seq, text):
cmd = NetCommand(CMD_ERROR, seq, text)
if DebugInfoHolder.DEBUG_TRACE_LEVEL > 2:
sys.stderr.write("Error: %s" % (text,))
return cmd
def make_protocol_set_message(self, seq):
return NetCommand(CMD_SET_PROTOCOL, seq, '')
def make_thread_created_message(self, thread):
cmdText = "<xml>" + self._thread_to_xml(thread) + "</xml>"
return NetCommand(CMD_THREAD_CREATE, 0, cmdText)
def make_process_created_message(self):
cmdText = '<process/>'
return NetCommand(CMD_PROCESS_CREATED, 0, cmdText)
def make_show_warning_message(self, message_id):
try:
cmdText = '<xml><warning id="%s" /></xml>' % message_id
return NetCommand(CMD_SHOW_CYTHON_WARNING, 0, cmdText)
except:
return self.make_error_message(0, get_exception_traceback_str())
def make_custom_frame_created_message(self, frameId, frameDescription):
frameDescription = pydevd_xml.make_valid_xml_value(frameDescription)
cmdText = '<xml><thread name="%s" id="%s"/></xml>' % (frameDescription, frameId)
return NetCommand(CMD_THREAD_CREATE, 0, cmdText)
def make_list_threads_message(self, seq):
""" returns thread listing as XML """
try:
threads = get_non_pydevd_threads()
cmd_text = ["<xml>"]
append = cmd_text.append
for thread in threads:
if is_thread_alive(thread):
append(self._thread_to_xml(thread))
append("</xml>")
return NetCommand(CMD_RETURN, seq, ''.join(cmd_text))
except:
return self.make_error_message(seq, get_exception_traceback_str())
def make_get_thread_stack_message(self, seq, thread_id, topmost_frame, must_be_suspended=False):
"""
Returns thread stack as XML.
:param be_suspended: If True and the thread is not suspended, returns None.
"""
try:
# If frame is None, the return is an empty frame list.
cmd_text = ['<xml><thread id="%s">' % (thread_id,)]
if topmost_frame is not None:
try:
# Note: if we detect that we're already stopped in a given place within
# the debugger, use that stack instead of creating a new one with the
# current position (this is needed because when an uncaught exception
# is reported for a given frame we are actually stopped in a different
# place within the debugger).
frame = topmost_frame
thread_stack_str = ''
while frame is not None:
if frame.f_code.co_name == 'do_wait_suspend' and frame.f_code.co_filename.endswith('pydevd.py'):
thread_stack_str = frame.f_locals.get('thread_stack_str')
break
frame = frame.f_back
else:
# Could not find stack of suspended frame...
if must_be_suspended:
return None
cmd_text.append(thread_stack_str or self.make_thread_stack_str(topmost_frame))
finally:
topmost_frame = None
cmd_text.append('</thread></xml>')
return NetCommand(CMD_GET_THREAD_STACK, seq, ''.join(cmd_text))
except:
return self.make_error_message(seq, get_exception_traceback_str())
def make_variable_changed_message(self, seq, payload):
# notify debugger that value was changed successfully
return NetCommand(CMD_RETURN, seq, payload)
def make_io_message(self, v, ctx):
'''
@param v: the message to pass to the debug server
@param ctx: 1 for stdio 2 for stderr
'''
try:
if len(v) > MAX_IO_MSG_SIZE:
v = v[0:MAX_IO_MSG_SIZE]
v += '...'
v = pydevd_xml.make_valid_xml_value(quote(v, '/>_= '))
return NetCommand(str(CMD_WRITE_TO_CONSOLE), 0, '<xml><io s="%s" ctx="%s"/></xml>' % (v, ctx))
except:
return self.make_error_message(0, get_exception_traceback_str())
def make_version_message(self, seq):
try:
return NetCommand(CMD_VERSION, seq, VERSION_STRING)
except:
return self.make_error_message(seq, get_exception_traceback_str())
def make_thread_killed_message(self, id):
try:
return NetCommand(CMD_THREAD_KILL, 0, str(id))
except:
return self.make_error_message(0, get_exception_traceback_str())
def make_thread_stack_str(self, frame, frame_to_lineno=None):
'''
:param frame_to_lineno:
If available, the line number for the frame will be gotten from this dict,
otherwise frame.f_lineno will be used (needed for unhandled exceptions as
the place where we report may be different from the place where it's raised).
'''
if frame_to_lineno is None:
frame_to_lineno = {}
make_valid_xml_value = pydevd_xml.make_valid_xml_value
cmd_text_list = []
append = cmd_text_list.append
curr_frame = frame
frame = None # Clear frame reference
try:
while curr_frame:
my_id = id(curr_frame)
if curr_frame.f_code is None:
break # Iron Python sometimes does not have it!
method_name = curr_frame.f_code.co_name # method name (if in method) or ? if global
if method_name is None:
break # Iron Python sometimes does not have it!
abs_path_real_path_and_base = get_abs_path_real_path_and_base_from_frame(curr_frame)
if get_file_type(abs_path_real_path_and_base[2]) == PYDEV_FILE:
# Syntax errors are a special case in which we don't want to skip the debugger files.
# When a syntax error happens, we stop either in the `execfile` or `_exec` function.
exception_info, is_syntax_error = curr_frame.f_locals.get('__exception__'), False
if exception_info:
is_syntax_error = exception_info[0] is SyntaxError
if not is_syntax_error:
# Skip pydevd files.
curr_frame = curr_frame.f_back
continue
my_file = abs_path_real_path_and_base[0]
if is_real_file(my_file):
# if filename is Jupyter cell id
my_file = pydevd_file_utils.norm_file_to_client(abs_path_real_path_and_base[0])
if file_system_encoding.lower() != "utf-8" and hasattr(my_file, "decode"):
# my_file is a byte string encoded using the file system encoding
# convert it to utf8
my_file = my_file.decode(file_system_encoding).encode("utf-8")
#print "file is ", my_file
#my_file = inspect.getsourcefile(curr_frame) or inspect.getfile(frame)
lineno = frame_to_lineno.get(curr_frame, curr_frame.f_lineno)
# print("line is ", lineno)
# Note: variables are all gotten 'on-demand'.
append('<frame id="%s" name="%s" ' % (my_id , make_valid_xml_value(method_name)))
append('file="%s" line="%s">' % (make_valid_xml_value(my_file), lineno))
append("</frame>")
curr_frame = curr_frame.f_back
except:
traceback.print_exc()
curr_frame = None # Clear frame reference
return ''.join(cmd_text_list)
def make_thread_suspend_str(
self,
thread_id,
frame,
stop_reason=None,
message=None,
suspend_type="trace",
frame_to_lineno=None
):
"""
:return tuple(str,str):
Returns tuple(thread_suspended_str, thread_stack_str).
i.e.:
(
'''
<xml>
<thread id="id" stop_reason="reason">
<frame id="id" name="functionName " file="file" line="line">
</frame>
</thread>
</xml>
'''
,
'''
<frame id="id" name="functionName " file="file" line="line">
</frame>
'''
)
"""
make_valid_xml_value = pydevd_xml.make_valid_xml_value
cmd_text_list = []
append = cmd_text_list.append
cmd_text_list.append('<xml>')
if message:
message = make_valid_xml_value(message)
append('<thread id="%s"' % (thread_id,))
if stop_reason is not None:
append(' stop_reason="%s"' % (stop_reason,))
if message is not None:
append(' message="%s"' % (message,))
if suspend_type is not None:
append(' suspend_type="%s"' % (suspend_type,))
append('>')
thread_stack_str = self.make_thread_stack_str(frame, frame_to_lineno)
append(thread_stack_str)
append("</thread></xml>")
return ''.join(cmd_text_list), thread_stack_str
def make_thread_suspend_message(self, thread_id, frame, stop_reason, message, suspend_type, frame_to_lineno=None):
try:
thread_suspend_str, thread_stack_str = self.make_thread_suspend_str(
thread_id, frame, stop_reason, message, suspend_type, frame_to_lineno=frame_to_lineno)
cmd = NetCommand(CMD_THREAD_SUSPEND, 0, thread_suspend_str)
cmd.thread_stack_str = thread_stack_str
cmd.thread_suspend_str = thread_suspend_str
return cmd
except:
return self.make_error_message(0, get_exception_traceback_str())
def make_thread_suspend_single_notification(self, thread_id, stop_reason):
try:
return NetCommand(CMD_THREAD_SUSPEND_SINGLE_NOTIFICATION, 0, json.dumps(
{'thread_id': thread_id, 'stop_reason': stop_reason}))
except:
return self.make_error_message(0, get_exception_traceback_str())
def make_thread_resume_single_notification(self, thread_id):
try:
return NetCommand(CMD_THREAD_RESUME_SINGLE_NOTIFICATION, 0, json.dumps(
{'thread_id': thread_id}))
except:
return self.make_error_message(0, get_exception_traceback_str())
def make_thread_run_message(self, thread_id, reason):
try:
return NetCommand(CMD_THREAD_RUN, 0, "%s\t%s" % (thread_id, reason))
except:
return self.make_error_message(0, get_exception_traceback_str())
def make_get_variable_message(self, seq, payload):
try:
return NetCommand(CMD_GET_VARIABLE, seq, payload)
except Exception:
return self.make_error_message(seq, get_exception_traceback_str())
def make_get_array_message(self, seq, payload):
try:
return NetCommand(CMD_GET_ARRAY, seq, payload)
except Exception:
return self.make_error_message(seq, get_exception_traceback_str())
def make_successful_dataviewer_action_message(self, seq, payload):
try:
return NetCommand(CMD_DATAVIEWER_ACTION, seq, payload)
except Exception:
return self.make_error_message(seq, get_exception_traceback_str())
def make_get_description_message(self, seq, payload):
try:
return NetCommand(CMD_GET_DESCRIPTION, seq, payload)
except Exception:
return self.make_error_message(seq, get_exception_traceback_str())
def make_get_frame_message(self, seq, payload):
try:
return NetCommand(CMD_GET_FRAME, seq, payload)
except Exception:
return self.make_error_message(seq, get_exception_traceback_str())
def make_evaluate_expression_message(self, seq, payload):
try:
return NetCommand(CMD_EVALUATE_EXPRESSION, seq, payload)
except Exception:
return self.make_error_message(seq, get_exception_traceback_str())
def make_get_completions_message(self, seq, payload):
try:
return NetCommand(CMD_GET_COMPLETIONS, seq, payload)
except Exception:
return self.make_error_message(seq, get_exception_traceback_str())
def make_get_file_contents(self, seq, payload):
try:
return NetCommand(CMD_GET_FILE_CONTENTS, seq, payload)
except Exception:
return self.make_error_message(seq, get_exception_traceback_str())
def make_send_breakpoint_exception_message(self, seq, payload):
try:
return NetCommand(CMD_GET_BREAKPOINT_EXCEPTION, seq, payload)
except Exception:
return self.make_error_message(seq, get_exception_traceback_str())
def _make_send_curr_exception_trace_str(self, thread_id, exc_type, exc_desc, trace_obj):
while trace_obj.tb_next is not None:
trace_obj = trace_obj.tb_next
exc_type = pydevd_xml.make_valid_xml_value(str(exc_type)).replace('\t', ' ') or 'exception: type unknown'
exc_desc = pydevd_xml.make_valid_xml_value(str(exc_desc)).replace('\t', ' ') or 'exception: no description'
thread_suspend_str, thread_stack_str = self.make_thread_suspend_str(
thread_id, trace_obj.tb_frame, CMD_SEND_CURR_EXCEPTION_TRACE, '')
return exc_type, exc_desc, thread_suspend_str, thread_stack_str
def make_send_curr_exception_trace_message(self, seq, thread_id, curr_frame_id, exc_type, exc_desc, trace_obj):
try:
exc_type, exc_desc, thread_suspend_str, _thread_stack_str = self._make_send_curr_exception_trace_str(
thread_id, exc_type, exc_desc, trace_obj)
payload = str(curr_frame_id) + '\t' + exc_type + "\t" + exc_desc + "\t" + thread_suspend_str
return NetCommand(CMD_SEND_CURR_EXCEPTION_TRACE, seq, payload)
except Exception:
return self.make_error_message(seq, get_exception_traceback_str())
def make_get_exception_details_message(self, seq, thread_id, topmost_frame):
"""Returns exception details as XML """
try:
# If the debugger is not suspended, just return the thread and its id.
cmd_text = ['<xml><thread id="%s" ' % (thread_id,)]
if topmost_frame is not None:
try:
frame = topmost_frame
topmost_frame = None
while frame is not None:
if frame.f_code.co_name == 'do_wait_suspend' and frame.f_code.co_filename.endswith('pydevd.py'):
arg = frame.f_locals.get('arg', None)
if arg is not None:
exc_type, exc_desc, _thread_suspend_str, thread_stack_str = self._make_send_curr_exception_trace_str(
thread_id, *arg)
cmd_text.append('exc_type="%s" ' % (exc_type,))
cmd_text.append('exc_desc="%s" ' % (exc_desc,))
cmd_text.append('>')
cmd_text.append(thread_stack_str)
break
frame = frame.f_back
else:
cmd_text.append('>')
finally:
frame = None
cmd_text.append('</thread></xml>')
return NetCommand(CMD_GET_EXCEPTION_DETAILS, seq, ''.join(cmd_text))
except:
return self.make_error_message(seq, get_exception_traceback_str())
def make_send_curr_exception_trace_proceeded_message(self, seq, thread_id):
try:
return NetCommand(CMD_SEND_CURR_EXCEPTION_TRACE_PROCEEDED, 0, str(thread_id))
except:
return self.make_error_message(0, get_exception_traceback_str())
def make_send_console_message(self, seq, payload):
try:
return NetCommand(CMD_EVALUATE_CONSOLE_EXPRESSION, seq, payload)
except Exception:
return self.make_error_message(seq, get_exception_traceback_str())
def make_custom_operation_message(self, seq, payload):
try:
return NetCommand(CMD_RUN_CUSTOM_OPERATION, seq, payload)
except Exception:
return self.make_error_message(seq, get_exception_traceback_str())
def make_load_source_message(self, seq, source, dbg=None):
try:
net = NetCommand(CMD_LOAD_SOURCE, seq, '%s' % source)
except:
net = self.make_error_message(0, get_exception_traceback_str())
if dbg:
dbg.writer.add_command(net)
return net
def make_show_console_message(self, thread_id, frame):
try:
thread_suspended_str, _thread_stack_str = self.make_thread_suspend_str(thread_id, frame, CMD_SHOW_CONSOLE, '')
return NetCommand(CMD_SHOW_CONSOLE, 0, thread_suspended_str)
except:
return self.make_error_message(0, get_exception_traceback_str())
def make_input_requested_message(self, started):
try:
return NetCommand(CMD_INPUT_REQUESTED, 0, str(started))
except:
return self.make_error_message(0, get_exception_traceback_str())
def make_set_next_stmnt_status_message(self, seq, is_success, exception_msg):
try:
message = str(is_success) + '\t' + exception_msg
return NetCommand(CMD_SET_NEXT_STATEMENT, int(seq), message)
except:
return self.make_error_message(0, get_exception_traceback_str())
def make_load_full_value_message(self, seq, payload):
try:
return NetCommand(CMD_LOAD_FULL_VALUE, seq, payload)
except Exception:
return self.make_error_message(seq, get_exception_traceback_str())
def make_exit_message(self):
try:
net = NetCommand(CMD_EXIT, 0, '')
except:
net = self.make_error_message(0, get_exception_traceback_str())
return net
def make_get_next_statement_targets_message(self, seq, payload):
try:
return NetCommand(CMD_GET_NEXT_STATEMENT_TARGETS, seq, payload)
except Exception:
return self.make_error_message(seq, get_exception_traceback_str())
INTERNAL_TERMINATE_THREAD = 1
INTERNAL_SUSPEND_THREAD = 2
#=======================================================================================================================
# InternalThreadCommand
#=======================================================================================================================
class InternalThreadCommand:
""" internal commands are generated/executed by the debugger.
The reason for their existence is that some commands have to be executed
on specific threads. These are the InternalThreadCommands that get
get posted to PyDB.cmdQueue.
"""
def __init__(self, thread_id):
self.thread_id = thread_id
def can_be_executed_by(self, thread_id):
'''By default, it must be in the same thread to be executed
'''
return self.thread_id == thread_id or self.thread_id.endswith('|' + thread_id)
def do_it(self, dbg):
raise NotImplementedError("you have to override do_it")
class ReloadCodeCommand(InternalThreadCommand):
def __init__(self, module_name, thread_id):
self.thread_id = thread_id
self.module_name = module_name
self.executed = False
self.lock = thread.allocate_lock()
def can_be_executed_by(self, thread_id):
if self.thread_id == '*':
return True #Any thread can execute it!
return InternalThreadCommand.can_be_executed_by(self, thread_id)
def do_it(self, dbg):
self.lock.acquire()
try:
if self.executed:
return
self.executed = True
finally:
self.lock.release()
module_name = self.module_name
if module_name not in sys.modules:
if '.' in module_name:
new_module_name = module_name.split('.')[-1]
if new_module_name in sys.modules:
module_name = new_module_name
if module_name not in sys.modules:
sys.stderr.write('pydev debugger: Unable to find module to reload: "' + module_name + '".\n')
# Too much info...
# sys.stderr.write('pydev debugger: This usually means you are trying to reload the __main__ module (which cannot be reloaded).\n')
else:
sys.stderr.write('pydev debugger: Start reloading module: "' + module_name + '" ... \n')
from _pydevd_bundle import pydevd_reload
if pydevd_reload.xreload(sys.modules[module_name]):
sys.stderr.write('pydev debugger: reload finished\n')
else:
sys.stderr.write('pydev debugger: reload finished without applying any change\n')
#=======================================================================================================================
# InternalGetThreadStack
#=======================================================================================================================
class InternalGetThreadStack(InternalThreadCommand):
'''
This command will either wait for a given thread to be paused to get its stack or will provide
it anyways after a timeout (in which case the stack will be gotten but local variables won't
be available and it'll not be possible to interact with the frame as it's not actually
stopped in a breakpoint).
'''
def __init__(self, seq, thread_id, py_db, set_additional_thread_info, timeout=.5):
InternalThreadCommand.__init__(self, thread_id)
self._py_db = weakref.ref(py_db)
self._timeout = time.time() + timeout
self.seq = seq
self._cmd = None
# Note: receives set_additional_thread_info to avoid a circular import
# in this module.
self._set_additional_thread_info = set_additional_thread_info
@overrides(InternalThreadCommand.can_be_executed_by)
def can_be_executed_by(self, _thread_id):
timed_out = time.time() >= self._timeout
py_db = self._py_db()
t = pydevd_find_thread_by_id(self.thread_id)
frame = None
if t and not getattr(t, 'pydev_do_not_trace', None):
additional_info = self._set_additional_thread_info(t)
frame = additional_info.get_topmost_frame(t)
try:
self._cmd = py_db.cmd_factory.make_get_thread_stack_message(
self.seq, self.thread_id, frame, must_be_suspended=not timed_out)
finally:
frame = None
t = None
return self._cmd is not None or timed_out
@overrides(InternalThreadCommand.do_it)
def do_it(self, dbg):
if self._cmd is not None:
dbg.writer.add_command(self._cmd)
self._cmd = None
#=======================================================================================================================
# InternalRunThread
#=======================================================================================================================
class InternalRunThread(InternalThreadCommand):
def do_it(self, dbg):
t = pydevd_find_thread_by_id(self.thread_id)
if t:
t.additional_info.pydev_step_cmd = -1
t.additional_info.pydev_step_stop = None
t.additional_info.pydev_state = STATE_RUN
#=======================================================================================================================
# InternalStepThread
#=======================================================================================================================
class InternalStepThread(InternalThreadCommand):
def __init__(self, thread_id, cmd_id):
self.thread_id = thread_id
self.cmd_id = cmd_id
def do_it(self, dbg):
t = pydevd_find_thread_by_id(self.thread_id)
if t:
t.additional_info.pydev_step_cmd = self.cmd_id
t.additional_info.pydev_state = STATE_RUN
#=======================================================================================================================
# InternalSetNextStatementThread
#=======================================================================================================================
class InternalSetNextStatementThread(InternalThreadCommand):
def __init__(self, thread_id, cmd_id, line, func_name, seq=0):
self.thread_id = thread_id
self.cmd_id = cmd_id
self.line = line
self.seq = seq
if IS_PY2:
if isinstance(func_name, unicode):
# On cython with python 2.X it requires an str, not unicode (but on python 3.3 it should be a str, not bytes).
func_name = func_name.encode('utf-8')
self.func_name = func_name
def do_it(self, dbg):
t = pydevd_find_thread_by_id(self.thread_id)
if t:
t.additional_info.pydev_step_cmd = self.cmd_id
t.additional_info.pydev_next_line = int(self.line)
t.additional_info.pydev_func_name = self.func_name
t.additional_info.pydev_state = STATE_RUN
t.additional_info.pydev_message = str(self.seq)
class InternalSmartStepInto(InternalThreadCommand):
def __init__(self, thread_id, frame_id, cmd_id, func_name, line, call_order, start_line, end_line, seq=0):
self.thread_id = thread_id
self.cmd_id = cmd_id
self.line = line
self.start_line = start_line
self.end_line = end_line
self.seq = seq
self.call_order = call_order
if IS_PY2:
if isinstance(func_name, unicode):
# On cython with python 2.X it requires an str, not unicode (but on python 3.3 it should be a str, not bytes).
func_name = func_name.encode('utf-8')
self.func_name = func_name
def do_it(self, dbg):
t = pydevd_find_thread_by_id(self.thread_id)
if t:
t.additional_info.pydev_step_cmd = self.cmd_id
t.additional_info.pydev_next_line = int(self.line)
t.additional_info.pydev_func_name = self.func_name
t.additional_info.pydev_state = STATE_RUN
t.additional_info.pydev_message = str(self.seq)
t.additional_info.pydev_smart_step_context.call_order = int(self.call_order)
t.additional_info.pydev_smart_step_context.start_line = int(self.start_line)
t.additional_info.pydev_smart_step_context.end_line = int(self.end_line)
#=======================================================================================================================
# InternalGetVariable
#=======================================================================================================================
class InternalGetVariable(InternalThreadCommand):
""" gets the value of a variable """
def __init__(self, seq, thread_id, frame_id, scope, attrs):
self.sequence = seq
self.thread_id = thread_id
self.frame_id = frame_id
self.scope = scope
self.attributes = attrs
def do_it(self, dbg):
""" Converts request into python variable """
try:
xml = StringIO.StringIO()
xml.write("<xml>")
_typeName, val_dict = pydevd_vars.resolve_compound_variable_fields(self.thread_id, self.frame_id, self.scope, self.attributes)
if val_dict is None:
val_dict = {}
# assume properly ordered if resolver returns 'OrderedDict'
# check type as string to support OrderedDict backport for older Python
keys = dict_keys(val_dict)
if not (_typeName == "OrderedDict" or val_dict.__class__.__name__ == "OrderedDict" or IS_PY36_OR_GREATER):
keys.sort(key=compare_object_attrs_key)
for k in keys:
val = val_dict[k]
evaluate_full_value = pydevd_xml.should_evaluate_full_value(val)
xml.write(pydevd_xml.var_to_xml(val, k, evaluate_full_value=evaluate_full_value))
xml.write("</xml>")
cmd = dbg.cmd_factory.make_get_variable_message(self.sequence, xml.getvalue())
xml.close()
dbg.writer.add_command(cmd)
except Exception:
cmd = dbg.cmd_factory.make_error_message(
self.sequence, "Error resolving variables %s" % (get_exception_traceback_str(),))
dbg.writer.add_command(cmd)
#=======================================================================================================================
# InternalGetArray
#=======================================================================================================================
class InternalGetArray(InternalThreadCommand):
def __init__(self, seq, roffset, coffset, rows, cols, format, thread_id, frame_id, scope, attrs):
self.sequence = seq
self.thread_id = thread_id
self.frame_id = frame_id
self.scope = scope
self.name = attrs.split("\t")[-1]
self.attrs = attrs
self.roffset = int(roffset)
self.coffset = int(coffset)
self.rows = int(rows)
self.cols = int(cols)
self.format = format
def do_it(self, dbg):
try:
frame = pydevd_vars.find_frame(self.thread_id, self.frame_id)
var = pydevd_vars.eval_in_context(self.name, frame.f_globals, frame.f_locals)
xml = pydevd_vars.table_like_struct_to_xml(var, self.name, self.roffset, self.coffset, self.rows, self.cols, self.format)
cmd = dbg.cmd_factory.make_get_array_message(self.sequence, xml)
dbg.writer.add_command(cmd)
except:
cmd = dbg.cmd_factory.make_error_message(self.sequence, "Error resolving array: " + get_exception_traceback_str())
dbg.writer.add_command(cmd)
#=======================================================================================================================
# InternalDataViewerAction
#=======================================================================================================================
class InternalDataViewerAction(InternalThreadCommand):
def __init__(self, sequence, thread_id, frame_id, var, action, args):
self.sequence = sequence
self.thread_id = thread_id
self.frame_id = frame_id
self.var = var
self.action = action
self.args = args
def do_it(self, dbg):
try:
frame = pydevd_vars.find_frame(self.thread_id, self.frame_id)
tmp_var = pydevd_vars.eval_in_context(self.var, frame.f_globals, frame.f_locals)
self.act(tmp_var, self.action, self.args)
cmd = dbg.cmd_factory.make_successful_dataviewer_action_message(
self.sequence,
"Successful execution")
dbg.writer.add_command(cmd)
except Exception as e:
cmd = dbg.cmd_factory.make_error_message(
self.sequence,
type(e).__name__ + "\nError exporting frame: " + get_exception_traceback_str())
dbg.writer.add_command(cmd)
@staticmethod
def act(tmp_var, action, args):
if action == 'EXPORT':
return InternalDataViewerAction.export_action(tmp_var, args)
@staticmethod
def get_type_info(var):
tp = type(var)
tp_name = tp.__name__
tp_qualifier = getattr(tp, "__module__", "")
return tp_qualifier, tp_name
@staticmethod
def export_action(var, args):
# args: (filepath)
filepath = args[0]
extension = filepath.rsplit('.', 1)[1].lower()
tp_qualifier, tp_name = InternalDataViewerAction.get_type_info(var)
if is_pandas_container(tp_qualifier, tp_name, var):
if extension in ('csv', 'tsv'):
delim = ',' if extension == 'csv' else '\t'
var.to_csv(filepath, sep=delim)
else:
raise AttributeError("Format '{}' is not supported".format(extension))
elif is_numpy_container(tp_qualifier, tp_name, var):
try:
import numpy as np
except ImportError:
# Strange. We have an instance of numpy array but we failed to import numpy
raise
if extension in ('csv', 'tsv'):
delim = ',' if extension == 'csv' else '\t'
np.savetxt(filepath, var, fmt="%s", delimiter=delim)
else:
raise AttributeError("Format '{}' is not supported".format(extension))
else:
raise AttributeError("Type {} is not supported".format(type(var)))
#=======================================================================================================================
# InternalChangeVariable
#=======================================================================================================================
class InternalChangeVariable(InternalThreadCommand):
""" changes the value of a variable """
def __init__(self, seq, thread_id, frame_id, scope, attr, expression):
self.sequence = seq
self.thread_id = thread_id
self.frame_id = frame_id
self.scope = scope
self.attr = attr
self.expression = expression
def do_it(self, dbg):
""" Converts request into python variable """
try:
result = pydevd_vars.change_attr_expression(self.thread_id, self.frame_id, self.attr, self.expression, dbg)
xml = "<xml>"
xml += pydevd_xml.var_to_xml(result, "")
xml += "</xml>"
cmd = dbg.cmd_factory.make_variable_changed_message(self.sequence, xml)
dbg.writer.add_command(cmd)
except Exception:
cmd = dbg.cmd_factory.make_error_message(self.sequence, "Error changing variable attr:%s expression:%s traceback:%s" % (self.attr, self.expression, get_exception_traceback_str()))
dbg.writer.add_command(cmd)
#=======================================================================================================================
# InternalGetFrame
#=======================================================================================================================
class InternalGetFrame(InternalThreadCommand):
""" gets the value of a variable """
def __init__(self, seq, thread_id, frame_id):
self.sequence = seq
self.thread_id = thread_id
self.frame_id = frame_id
def do_it(self, dbg):
""" Converts request into python variable """
try:
frame = pydevd_vars.find_frame(self.thread_id, self.frame_id)
if frame is not None:
hidden_ns = pydevd_console_integration.get_ipython_hidden_vars()
xml = "<xml>"
xml += pydevd_xml.frame_vars_to_xml(frame.f_locals, hidden_ns)
del frame
xml += "</xml>"
cmd = dbg.cmd_factory.make_get_frame_message(self.sequence, xml)
dbg.writer.add_command(cmd)
else:
#pydevd_vars.dump_frames(self.thread_id)
#don't print this error: frame not found: means that the client is not synchronized (but that's ok)
cmd = dbg.cmd_factory.make_error_message(self.sequence, "Frame not found: %s from thread: %s" % (self.frame_id, self.thread_id))
dbg.writer.add_command(cmd)
except:
cmd = dbg.cmd_factory.make_error_message(self.sequence, "Error resolving frame: %s from thread: %s" % (self.frame_id, self.thread_id))
dbg.writer.add_command(cmd)
class InternalGetSmartStepIntoVariants(InternalThreadCommand):
def __init__(self, seq, thread_id, frame_id, start_line, end_line):
self.sequence = seq
self.thread_id = thread_id
self.frame_id = frame_id
self.start_line = int(start_line)
self.end_line = int(end_line)
def do_it(self, dbg):
try:
frame = pydevd_vars.find_frame(self.thread_id, self.frame_id)
variants = pydevd_bytecode_utils.calculate_smart_step_into_variants(frame, self.start_line, self.end_line)
xml = "<xml>"
for name, is_visited in variants:
xml += '<variant name="%s" isVisited="%s"></variant>' % (quote(name), str(is_visited).lower())
xml += "</xml>"
cmd = NetCommand(CMD_GET_SMART_STEP_INTO_VARIANTS, self.sequence, xml)
dbg.writer.add_command(cmd)
except:
pydevd_log(1, traceback.format_exc())
cmd = dbg.cmd_factory.make_error_message(self.sequence, "Error getting smart step into veriants for frame: %s from thread: %s"
% (self.frame_id, self.thread_id))
self._reset_smart_step_context()
dbg.writer.add_command(cmd)
def _reset_smart_step_context(self):
t = pydevd_find_thread_by_id(self.thread_id)
if t:
try:
t.additional_info.pydev_smart_step_context.reset()
except:
pydevd_log(1, "Error while resetting smart step into context for thread %s" % self.thread_id)
#=======================================================================================================================
# InternalGetNextStatementTargets
#=======================================================================================================================
class InternalGetNextStatementTargets(InternalThreadCommand):
""" gets the valid line numbers for use with set next statement """
def __init__(self, seq, thread_id, frame_id):
self.sequence = seq
self.thread_id = thread_id
self.frame_id = frame_id
def do_it(self, dbg):
""" Converts request into set of line numbers """
try:
frame = pydevd_vars.find_frame(self.thread_id, self.frame_id)
if frame is not None:
code = frame.f_code
xml = "<xml>"
if hasattr(code, 'co_lnotab'):
lineno = code.co_firstlineno
lnotab = code.co_lnotab
for i in itertools.islice(lnotab, 1, len(lnotab), 2):
if isinstance(i, int):
lineno = lineno + i
else:
# in python 2 elements in co_lnotab are of type str
lineno = lineno + ord(i)
xml += "<line>%d</line>" % (lineno,)
else:
xml += "<line>%d</line>" % (frame.f_lineno,)
del frame
xml += "</xml>"
cmd = dbg.cmd_factory.make_get_next_statement_targets_message(self.sequence, xml)
dbg.writer.add_command(cmd)
else:
cmd = dbg.cmd_factory.make_error_message(self.sequence, "Frame not found: %s from thread: %s" % (self.frame_id, self.thread_id))
dbg.writer.add_command(cmd)
except:
cmd = dbg.cmd_factory.make_error_message(self.sequence, "Error resolving frame: %s from thread: %s" % (self.frame_id, self.thread_id))
dbg.writer.add_command(cmd)
#=======================================================================================================================
# InternalEvaluateExpression
#=======================================================================================================================
class InternalEvaluateExpression(InternalThreadCommand):
""" gets the value of a variable """
def __init__(self, seq, thread_id, frame_id, expression, doExec, doTrim, temp_name):
self.sequence = seq
self.thread_id = thread_id
self.frame_id = frame_id
self.expression = expression
self.doExec = doExec
self.doTrim = doTrim
self.temp_name = temp_name
def do_it(self, dbg):
""" Converts request into python variable """
try:
result = pydevd_vars.evaluate_expression(self.thread_id, self.frame_id, self.expression, self.doExec)
if self.temp_name != "":
pydevd_vars.change_attr_expression(self.thread_id, self.frame_id, self.temp_name, self.expression, dbg, result)
xml = "<xml>"
xml += pydevd_xml.var_to_xml(result, self.expression, self.doTrim)
xml += "</xml>"
cmd = dbg.cmd_factory.make_evaluate_expression_message(self.sequence, xml)
dbg.writer.add_command(cmd)
except:
exc = get_exception_traceback_str()
cmd = dbg.cmd_factory.make_error_message(self.sequence, "Error evaluating expression " + exc)
dbg.writer.add_command(cmd)
#=======================================================================================================================
# InternalGetCompletions
#=======================================================================================================================
class InternalGetCompletions(InternalThreadCommand):
""" Gets the completions in a given scope """
def __init__(self, seq, thread_id, frame_id, act_tok):
self.sequence = seq
self.thread_id = thread_id
self.frame_id = frame_id
self.act_tok = act_tok
def do_it(self, dbg):
""" Converts request into completions """
try:
remove_path = None
try:
frame = pydevd_vars.find_frame(self.thread_id, self.frame_id)
if frame is not None:
msg = _pydev_completer.generate_completions_as_xml(frame, self.act_tok)
cmd = dbg.cmd_factory.make_get_completions_message(self.sequence, msg)
dbg.writer.add_command(cmd)
else:
cmd = dbg.cmd_factory.make_error_message(self.sequence, "InternalGetCompletions: Frame not found: %s from thread: %s" % (self.frame_id, self.thread_id))
dbg.writer.add_command(cmd)
finally:
if remove_path is not None:
sys.path.remove(remove_path)
except:
exc = get_exception_traceback_str()
sys.stderr.write('%s\n' % (exc,))
cmd = dbg.cmd_factory.make_error_message(self.sequence, "Error evaluating expression " + exc)
dbg.writer.add_command(cmd)
# =======================================================================================================================
# InternalGetDescription
# =======================================================================================================================
class InternalGetDescription(InternalThreadCommand):
""" Fetch the variable description stub from the debug console
"""
def __init__(self, seq, thread_id, frame_id, expression):
self.sequence = seq
self.thread_id = thread_id
self.frame_id = frame_id
self.expression = expression
def do_it(self, dbg):
""" Get completions and write back to the client
"""
try:
frame = pydevd_vars.find_frame(self.thread_id, self.frame_id)
description = pydevd_console.get_description(frame, self.thread_id, self.frame_id, self.expression)
description = pydevd_xml.make_valid_xml_value(quote(description, '/>_= \t'))
description_xml = '<xml><var name="" type="" value="%s"/></xml>' % description
cmd = dbg.cmd_factory.make_get_description_message(self.sequence, description_xml)
dbg.writer.add_command(cmd)
except:
exc = get_exception_traceback_str()
cmd = dbg.cmd_factory.make_error_message(self.sequence, "Error in fetching description" + exc)
dbg.writer.add_command(cmd)
#=======================================================================================================================
# InternalGetBreakpointException
#=======================================================================================================================
class InternalGetBreakpointException(InternalThreadCommand):
""" Send details of exception raised while evaluating conditional breakpoint """
def __init__(self, thread_id, exc_type, stacktrace):
self.sequence = 0
self.thread_id = thread_id
self.stacktrace = stacktrace
self.exc_type = exc_type
def do_it(self, dbg):
try:
callstack = "<xml>"
makeValid = pydevd_xml.make_valid_xml_value
for filename, line, methodname, methodobj in self.stacktrace:
if file_system_encoding.lower() != "utf-8" and hasattr(filename, "decode"):
# filename is a byte string encoded using the file system encoding
# convert it to utf8
filename = filename.decode(file_system_encoding).encode("utf-8")
callstack += '<frame thread_id = "%s" file="%s" line="%s" name="%s" obj="%s" />' \
% (self.thread_id, makeValid(filename), line, makeValid(methodname), makeValid(methodobj))
callstack += "</xml>"
cmd = dbg.cmd_factory.make_send_breakpoint_exception_message(self.sequence, self.exc_type + "\t" + callstack)
dbg.writer.add_command(cmd)
except:
exc = get_exception_traceback_str()
sys.stderr.write('%s\n' % (exc,))
cmd = dbg.cmd_factory.make_error_message(self.sequence, "Error Sending Exception: " + exc)
dbg.writer.add_command(cmd)
#=======================================================================================================================
# InternalSendCurrExceptionTrace
#=======================================================================================================================
class InternalSendCurrExceptionTrace(InternalThreadCommand):
""" Send details of the exception that was caught and where we've broken in.
"""
def __init__(self, thread_id, arg, curr_frame_id):
'''
:param arg: exception type, description, traceback object
'''
self.sequence = 0
self.thread_id = thread_id
self.curr_frame_id = curr_frame_id
self.arg = arg
def do_it(self, dbg):
try:
cmd = dbg.cmd_factory.make_send_curr_exception_trace_message(self.sequence, self.thread_id, self.curr_frame_id, *self.arg)
del self.arg
dbg.writer.add_command(cmd)
except:
exc = get_exception_traceback_str()
sys.stderr.write('%s\n' % (exc,))
cmd = dbg.cmd_factory.make_error_message(self.sequence, "Error Sending Current Exception Trace: " + exc)
dbg.writer.add_command(cmd)
#=======================================================================================================================
# InternalSendCurrExceptionTraceProceeded
#=======================================================================================================================
class InternalSendCurrExceptionTraceProceeded(InternalThreadCommand):
""" Send details of the exception that was caught and where we've broken in.
"""
def __init__(self, thread_id):
self.sequence = 0
self.thread_id = thread_id
def do_it(self, dbg):
try:
cmd = dbg.cmd_factory.make_send_curr_exception_trace_proceeded_message(self.sequence, self.thread_id)
dbg.writer.add_command(cmd)
except:
exc = get_exception_traceback_str()
sys.stderr.write('%s\n' % (exc,))
cmd = dbg.cmd_factory.make_error_message(self.sequence, "Error Sending Current Exception Trace Proceeded: " + exc)
dbg.writer.add_command(cmd)
#=======================================================================================================================
# InternalEvaluateConsoleExpression
#=======================================================================================================================
class InternalEvaluateConsoleExpression(InternalThreadCommand):
""" Execute the given command in the debug console """
def __init__(self, seq, thread_id, frame_id, line, buffer_output=True):
self.sequence = seq
self.thread_id = thread_id
self.frame_id = frame_id
self.line = line
self.buffer_output = buffer_output
def do_it(self, dbg):
""" Create an XML for console output, error and more (true/false)
<xml>
<output message=output_message></output>
<error message=error_message></error>
<more>true/false</more>
</xml>
"""
try:
frame = pydevd_vars.find_frame(self.thread_id, self.frame_id)
if frame is not None:
console_message = pydevd_console.execute_console_command(
frame, self.thread_id, self.frame_id, self.line, self.buffer_output)
cmd = dbg.cmd_factory.make_send_console_message(self.sequence, console_message.to_xml())
else:
from _pydevd_bundle.pydevd_console import ConsoleMessage
console_message = ConsoleMessage()
console_message.add_console_message(
pydevd_console.CONSOLE_ERROR,
"Select the valid frame in the debug view (thread: %s, frame: %s invalid)" % (self.thread_id, self.frame_id),
)
cmd = dbg.cmd_factory.make_error_message(self.sequence, console_message.to_xml())
except:
exc = get_exception_traceback_str()
cmd = dbg.cmd_factory.make_error_message(self.sequence, "Error evaluating expression " + exc)
dbg.writer.add_command(cmd)
#=======================================================================================================================
# InternalRunCustomOperation
#=======================================================================================================================
class InternalRunCustomOperation(InternalThreadCommand):
""" Run a custom command on an expression
"""
def __init__(self, seq, thread_id, frame_id, scope, attrs, style, encoded_code_or_file, fnname):
self.sequence = seq
self.thread_id = thread_id
self.frame_id = frame_id
self.scope = scope
self.attrs = attrs
self.style = style
self.code_or_file = unquote_plus(encoded_code_or_file)
self.fnname = fnname
def do_it(self, dbg):
try:
res = pydevd_vars.custom_operation(self.thread_id, self.frame_id, self.scope, self.attrs,
self.style, self.code_or_file, self.fnname)
resEncoded = quote_plus(res)
cmd = dbg.cmd_factory.make_custom_operation_message(self.sequence, resEncoded)
dbg.writer.add_command(cmd)
except:
exc = get_exception_traceback_str()
cmd = dbg.cmd_factory.make_error_message(self.sequence, "Error in running custom operation" + exc)
dbg.writer.add_command(cmd)
#=======================================================================================================================
# InternalConsoleGetCompletions
#=======================================================================================================================
class InternalConsoleGetCompletions(InternalThreadCommand):
""" Fetch the completions in the debug console
"""
def __init__(self, seq, thread_id, frame_id, act_tok):
self.sequence = seq
self.thread_id = thread_id
self.frame_id = frame_id
self.act_tok = act_tok
def do_it(self, dbg):
""" Get completions and write back to the client
"""
try:
frame = pydevd_vars.find_frame(self.thread_id, self.frame_id)
completions_xml = pydevd_console.get_completions(frame, self.act_tok)
cmd = dbg.cmd_factory.make_send_console_message(self.sequence, completions_xml)
dbg.writer.add_command(cmd)
except:
exc = get_exception_traceback_str()
cmd = dbg.cmd_factory.make_error_message(self.sequence, "Error in fetching completions" + exc)
dbg.writer.add_command(cmd)
#=======================================================================================================================
# InternalConsoleExec
#=======================================================================================================================
class InternalConsoleExec(InternalThreadCommand):
""" gets the value of a variable """
def __init__(self, seq, thread_id, frame_id, expression):
self.sequence = seq
self.thread_id = thread_id
self.frame_id = frame_id
self.expression = expression
def do_it(self, dbg):
""" Converts request into python variable """
try:
try:
#don't trace new threads created by console command
disable_trace_thread_modules()
result = pydevd_console_integration.console_exec(self.thread_id, self.frame_id, self.expression, dbg)
xml = "<xml>"
xml += pydevd_xml.var_to_xml(result, "")
xml += "</xml>"
cmd = dbg.cmd_factory.make_evaluate_expression_message(self.sequence, xml)
dbg.writer.add_command(cmd)
except:
exc = get_exception_traceback_str()
sys.stderr.write('%s\n' % (exc,))
cmd = dbg.cmd_factory.make_error_message(self.sequence, "Error evaluating console expression " + exc)
dbg.writer.add_command(cmd)
finally:
enable_trace_thread_modules()
sys.stderr.flush()
sys.stdout.flush()
#=======================================================================================================================
# InternalLoadFullValue
#=======================================================================================================================
class InternalLoadFullValue(InternalThreadCommand):
"""
Loads values asynchronously
"""
def __init__(self, seq, thread_id, frame_id, vars):
self.sequence = seq
self.thread_id = thread_id
self.frame_id = frame_id
self.vars = vars
def do_it(self, dbg):
"""Starts a thread that will load values asynchronously"""
try:
var_objects = []
for variable in self.vars:
variable = variable.strip()
if len(variable) > 0:
if '\t' in variable: # there are attributes beyond scope
scope, attrs = variable.split('\t', 1)
name = attrs[0]
else:
scope, attrs = (variable, None)
name = scope
var_obj = pydevd_vars.getVariable(self.thread_id, self.frame_id, scope, attrs)
var_objects.append((var_obj, name))
t = GetValueAsyncThreadDebug(dbg, self.sequence, var_objects)
t.start()
except:
exc = get_exception_traceback_str()
sys.stderr.write('%s\n' % (exc,))
cmd = dbg.cmd_factory.make_error_message(self.sequence, "Error evaluating variable %s " % exc)
dbg.writer.add_command(cmd)
class AbstractGetValueAsyncThread(PyDBDaemonThread):
"""
Abstract class for a thread, which evaluates values for async variables
"""
def __init__(self, frame_accessor, seq, var_objects):
PyDBDaemonThread.__init__(self)
self.frame_accessor = frame_accessor
self.seq = seq
self.var_objs = var_objects
self.cancel_event = threading.Event()
def send_result(self, xml):
raise NotImplementedError()
@overrides(PyDBDaemonThread._on_run)
def _on_run(self):
start = time.time()
xml = StringIO.StringIO()
xml.write("<xml>")
for (var_obj, name) in self.var_objs:
current_time = time.time()
if current_time - start > ASYNC_EVAL_TIMEOUT_SEC or self.cancel_event.is_set():
break
xml.write(pydevd_xml.var_to_xml(var_obj, name, evaluate_full_value=True))
xml.write("</xml>")
self.send_result(xml)
xml.close()
class GetValueAsyncThreadDebug(AbstractGetValueAsyncThread):
"""
A thread for evaluation async values, which returns result for debugger
Create message and send it via writer thread
"""
def send_result(self, xml):
if self.frame_accessor is not None:
cmd = self.frame_accessor.cmd_factory.make_load_full_value_message(self.seq, xml.getvalue())
self.frame_accessor.writer.add_command(cmd)
class GetValueAsyncThreadConsole(AbstractGetValueAsyncThread):
"""
A thread for evaluation async values, which returns result for Console
Send result directly to Console's server
"""
def send_result(self, xml):
if self.frame_accessor is not None:
self.frame_accessor.ReturnFullValue(self.seq, xml.getvalue())
#=======================================================================================================================
# pydevd_find_thread_by_id
#=======================================================================================================================
def pydevd_find_thread_by_id(thread_id):
try:
# there was a deadlock here when I did not remove the tracing function when thread was dead
threads = threading.enumerate()
for i in threads:
tid = get_thread_id(i)
if thread_id == tid or thread_id.endswith('|' + tid):
return i
# This can happen when a request comes for a thread which was previously removed.
pydevd_log(1, "Could not find thread %s\n" % thread_id)
pydevd_log(1, "Available: %s\n" % [get_thread_id(t) for t in threads])
except:
traceback.print_exc()
return None
|
apache-2.0
|
rohanp/scikit-learn
|
examples/cluster/plot_digits_linkage.py
|
1
|
2959
|
"""
=============================================================================
Various Agglomerative Clustering on a 2D embedding of digits
=============================================================================
An illustration of various linkage option for agglomerative clustering on
a 2D embedding of the digits dataset.
The goal of this example is to show intuitively how the metrics behave, and
not to find good clusters for the digits. This is why the example works on a
2D embedding.
What this example shows us is the behavior "rich getting richer" of
agglomerative clustering that tends to create uneven cluster sizes.
This behavior is especially pronounced for the average linkage strategy,
that ends up with a couple of singleton clusters.
"""
# Authors: Gael Varoquaux
# License: BSD 3 clause (C) INRIA 2014
print(__doc__)
from time import time
import numpy as np
from scipy import ndimage
from matplotlib import pyplot as plt
from sklearn import manifold, datasets
digits = datasets.load_digits(n_class=10)
X = digits.data
y = digits.target
n_samples, n_features = X.shape
np.random.seed(0)
def nudge_images(X, y):
# Having a larger dataset shows more clearly the behavior of the
# methods, but we multiply the size of the dataset only by 2, as the
# cost of the hierarchical clustering methods are strongly
# super-linear in n_samples
shift = lambda x: ndimage.shift(x.reshape((8, 8)),
.3 * np.random.normal(size=2),
mode='constant',
).ravel()
X = np.concatenate([X, np.apply_along_axis(shift, 1, X)])
Y = np.concatenate([y, y], axis=0)
return X, Y
X, y = nudge_images(X, y)
#----------------------------------------------------------------------
# Visualize the clustering
def plot_clustering(X_red, X, labels, title=None):
x_min, x_max = np.min(X_red, axis=0), np.max(X_red, axis=0)
X_red = (X_red - x_min) / (x_max - x_min)
plt.figure(figsize=(6, 4))
for i in range(X_red.shape[0]):
plt.text(X_red[i, 0], X_red[i, 1], str(y[i]),
color=plt.cm.spectral(labels[i] / 10.),
fontdict={'weight': 'bold', 'size': 9})
plt.xticks([])
plt.yticks([])
if title is not None:
plt.title(title, size=17)
plt.axis('off')
plt.tight_layout()
#----------------------------------------------------------------------
# 2D embedding of the digits dataset
print("Computing embedding")
X_red = manifold.LaplacianEigenmap(n_components=2).fit_transform(X)
print("Done.")
from sklearn.cluster import AgglomerativeClustering
for linkage in ('ward', 'average', 'complete'):
clustering = AgglomerativeClustering(linkage=linkage, n_clusters=10)
t0 = time()
clustering.fit(X_red)
print("%s : %.2fs" % (linkage, time() - t0))
plot_clustering(X_red, X, clustering.labels_, "%s linkage" % linkage)
plt.show()
|
bsd-3-clause
|
potash/scikit-learn
|
examples/cluster/plot_feature_agglomeration_vs_univariate_selection.py
|
87
|
3903
|
"""
==============================================
Feature agglomeration vs. univariate selection
==============================================
This example compares 2 dimensionality reduction strategies:
- univariate feature selection with Anova
- feature agglomeration with Ward hierarchical clustering
Both methods are compared in a regression problem using
a BayesianRidge as supervised estimator.
"""
# Author: Alexandre Gramfort <[email protected]>
# License: BSD 3 clause
print(__doc__)
import shutil
import tempfile
import numpy as np
import matplotlib.pyplot as plt
from scipy import linalg, ndimage
from sklearn.feature_extraction.image import grid_to_graph
from sklearn import feature_selection
from sklearn.cluster import FeatureAgglomeration
from sklearn.linear_model import BayesianRidge
from sklearn.pipeline import Pipeline
from sklearn.externals.joblib import Memory
from sklearn.model_selection import GridSearchCV
from sklearn.model_selection import KFold
###############################################################################
# Generate data
n_samples = 200
size = 40 # image size
roi_size = 15
snr = 5.
np.random.seed(0)
mask = np.ones([size, size], dtype=np.bool)
coef = np.zeros((size, size))
coef[0:roi_size, 0:roi_size] = -1.
coef[-roi_size:, -roi_size:] = 1.
X = np.random.randn(n_samples, size ** 2)
for x in X: # smooth data
x[:] = ndimage.gaussian_filter(x.reshape(size, size), sigma=1.0).ravel()
X -= X.mean(axis=0)
X /= X.std(axis=0)
y = np.dot(X, coef.ravel())
noise = np.random.randn(y.shape[0])
noise_coef = (linalg.norm(y, 2) / np.exp(snr / 20.)) / linalg.norm(noise, 2)
y += noise_coef * noise # add noise
###############################################################################
# Compute the coefs of a Bayesian Ridge with GridSearch
cv = KFold(2) # cross-validation generator for model selection
ridge = BayesianRidge()
cachedir = tempfile.mkdtemp()
mem = Memory(cachedir=cachedir, verbose=1)
# Ward agglomeration followed by BayesianRidge
connectivity = grid_to_graph(n_x=size, n_y=size)
ward = FeatureAgglomeration(n_clusters=10, connectivity=connectivity,
memory=mem)
clf = Pipeline([('ward', ward), ('ridge', ridge)])
# Select the optimal number of parcels with grid search
clf = GridSearchCV(clf, {'ward__n_clusters': [10, 20, 30]}, n_jobs=1, cv=cv)
clf.fit(X, y) # set the best parameters
coef_ = clf.best_estimator_.steps[-1][1].coef_
coef_ = clf.best_estimator_.steps[0][1].inverse_transform(coef_)
coef_agglomeration_ = coef_.reshape(size, size)
# Anova univariate feature selection followed by BayesianRidge
f_regression = mem.cache(feature_selection.f_regression) # caching function
anova = feature_selection.SelectPercentile(f_regression)
clf = Pipeline([('anova', anova), ('ridge', ridge)])
# Select the optimal percentage of features with grid search
clf = GridSearchCV(clf, {'anova__percentile': [5, 10, 20]}, cv=cv)
clf.fit(X, y) # set the best parameters
coef_ = clf.best_estimator_.steps[-1][1].coef_
coef_ = clf.best_estimator_.steps[0][1].inverse_transform(coef_.reshape(1, -1))
coef_selection_ = coef_.reshape(size, size)
###############################################################################
# Inverse the transformation to plot the results on an image
plt.close('all')
plt.figure(figsize=(7.3, 2.7))
plt.subplot(1, 3, 1)
plt.imshow(coef, interpolation="nearest", cmap=plt.cm.RdBu_r)
plt.title("True weights")
plt.subplot(1, 3, 2)
plt.imshow(coef_selection_, interpolation="nearest", cmap=plt.cm.RdBu_r)
plt.title("Feature Selection")
plt.subplot(1, 3, 3)
plt.imshow(coef_agglomeration_, interpolation="nearest", cmap=plt.cm.RdBu_r)
plt.title("Feature Agglomeration")
plt.subplots_adjust(0.04, 0.0, 0.98, 0.94, 0.16, 0.26)
plt.show()
# Attempt to remove the temporary cachedir, but don't worry if it fails
shutil.rmtree(cachedir, ignore_errors=True)
|
bsd-3-clause
|
aflaxman/scikit-learn
|
examples/cluster/plot_mini_batch_kmeans.py
|
53
|
4096
|
"""
====================================================================
Comparison of the K-Means and MiniBatchKMeans clustering algorithms
====================================================================
We want to compare the performance of the MiniBatchKMeans and KMeans:
the MiniBatchKMeans is faster, but gives slightly different results (see
:ref:`mini_batch_kmeans`).
We will cluster a set of data, first with KMeans and then with
MiniBatchKMeans, and plot the results.
We will also plot the points that are labelled differently between the two
algorithms.
"""
print(__doc__)
import time
import numpy as np
import matplotlib.pyplot as plt
from sklearn.cluster import MiniBatchKMeans, KMeans
from sklearn.metrics.pairwise import pairwise_distances_argmin
from sklearn.datasets.samples_generator import make_blobs
# #############################################################################
# Generate sample data
np.random.seed(0)
batch_size = 45
centers = [[1, 1], [-1, -1], [1, -1]]
n_clusters = len(centers)
X, labels_true = make_blobs(n_samples=3000, centers=centers, cluster_std=0.7)
# #############################################################################
# Compute clustering with Means
k_means = KMeans(init='k-means++', n_clusters=3, n_init=10)
t0 = time.time()
k_means.fit(X)
t_batch = time.time() - t0
# #############################################################################
# Compute clustering with MiniBatchKMeans
mbk = MiniBatchKMeans(init='k-means++', n_clusters=3, batch_size=batch_size,
n_init=10, max_no_improvement=10, verbose=0)
t0 = time.time()
mbk.fit(X)
t_mini_batch = time.time() - t0
# #############################################################################
# Plot result
fig = plt.figure(figsize=(8, 3))
fig.subplots_adjust(left=0.02, right=0.98, bottom=0.05, top=0.9)
colors = ['#4EACC5', '#FF9C34', '#4E9A06']
# We want to have the same colors for the same cluster from the
# MiniBatchKMeans and the KMeans algorithm. Let's pair the cluster centers per
# closest one.
k_means_cluster_centers = np.sort(k_means.cluster_centers_, axis=0)
mbk_means_cluster_centers = np.sort(mbk.cluster_centers_, axis=0)
k_means_labels = pairwise_distances_argmin(X, k_means_cluster_centers)
mbk_means_labels = pairwise_distances_argmin(X, mbk_means_cluster_centers)
order = pairwise_distances_argmin(k_means_cluster_centers,
mbk_means_cluster_centers)
# KMeans
ax = fig.add_subplot(1, 3, 1)
for k, col in zip(range(n_clusters), colors):
my_members = k_means_labels == k
cluster_center = k_means_cluster_centers[k]
ax.plot(X[my_members, 0], X[my_members, 1], 'w',
markerfacecolor=col, marker='.')
ax.plot(cluster_center[0], cluster_center[1], 'o', markerfacecolor=col,
markeredgecolor='k', markersize=6)
ax.set_title('KMeans')
ax.set_xticks(())
ax.set_yticks(())
plt.text(-3.5, 1.8, 'train time: %.2fs\ninertia: %f' % (
t_batch, k_means.inertia_))
# MiniBatchKMeans
ax = fig.add_subplot(1, 3, 2)
for k, col in zip(range(n_clusters), colors):
my_members = mbk_means_labels == order[k]
cluster_center = mbk_means_cluster_centers[order[k]]
ax.plot(X[my_members, 0], X[my_members, 1], 'w',
markerfacecolor=col, marker='.')
ax.plot(cluster_center[0], cluster_center[1], 'o', markerfacecolor=col,
markeredgecolor='k', markersize=6)
ax.set_title('MiniBatchKMeans')
ax.set_xticks(())
ax.set_yticks(())
plt.text(-3.5, 1.8, 'train time: %.2fs\ninertia: %f' %
(t_mini_batch, mbk.inertia_))
# Initialise the different array to all False
different = (mbk_means_labels == 4)
ax = fig.add_subplot(1, 3, 3)
for k in range(n_clusters):
different += ((k_means_labels == k) != (mbk_means_labels == order[k]))
identic = np.logical_not(different)
ax.plot(X[identic, 0], X[identic, 1], 'w',
markerfacecolor='#bbbbbb', marker='.')
ax.plot(X[different, 0], X[different, 1], 'w',
markerfacecolor='m', marker='.')
ax.set_title('Difference')
ax.set_xticks(())
ax.set_yticks(())
plt.show()
|
bsd-3-clause
|
garibaldu/radioblobs
|
code/code_1d/with_skewed_gen_gauss/score_DirMult.py
|
1
|
16149
|
import numpy as np
import numpy.random as rng
import pylab as pl
import matplotlib as mpl
import matplotlib.pyplot as plt
import matplotlib.mlab as mp
import copy, sys
import math
import optparse
import scipy.signal
import scipy.special.basic as sp
import scipy.optimize as sop
source_to_background_ratio = np.log(0.1/0.9)
def sigm(t):
return 1.0/(1.0+ np.exp(-t))
def skew_gen_gauss(x,mid):
dev = x - mid
beta, alpha = (5.-0.5)*rng.random()+0.5, 8.*rng.random()+ 6.
ggd = beta/(2*alpha*math.gamma(1.0/beta)) * np.exp(-np.power(np.abs(dev)/alpha, beta))
shape = ggd * sigm(rng.normal()*dev)
height = (5-0.5)*rng.random()+0.5
shape = height * shape/shape.max()
return shape
#TODO: random seed?
def make_dirichlet_bins(data,num_bins,strategy,num_dirs=50,alpha=10.,stretch_factor=None,total_alpha=None,safety_gap=np.inf):
z = copy.copy(data)
z.sort()
top, bottom = z[-1], z[0]
alphas = [alpha]*num_bins #can only do eqocc and width for now
dirs = rng.dirichlet(alphas,num_dirs)
mybins = np.zeros((num_dirs,num_bins+1))
mybins[:,0] = bottom
mybins[:,-1] = top
if strategy == 'eqocc': #(roughly) equal occupancies
num_datapts = z.size
for d in range(dirs.shape[0]):
props = (np.cumsum(dirs[d])*num_datapts)[:-1]
for p in range(len(props)):
mybins[d,p+1] = (z[props[p]] + z[props[p]+1])/2
elif strategy == 'width': #(roughly) equal width
datarange = top - bottom
for d in range(dirs.shape[0]):
props = np.cumsum(dirs[d])[:-1]
for p in range(len(props)):
mybins[d,p+1] = props[p] * datarange
elif strategy == 'expocc':
print "strategy expocc not implemented for dirichlet bins yet"
sys.exit(-1)
elif strategy == 'dexpocc':
print "strategy dexpocc not implemented for dirichlet bins yet"
sys.exit(-1)
else:
sys.exit('Not a valid binning strategy')
#safety gap
mybins[:,0] -= safety_gap
mybins[:,-1] += safety_gap
#return bin borders
return mybins
def make_bin_borders(data,num_bins,strategy='eqocc',safety_gap=np.inf,fname=None,prop=0.5):
z = copy.copy(data)
z.sort()
top, bottom = z[-1], z[0]
mybins = []
if strategy == 'eqocc': #Equal occupancies
step = len(z)/num_bins
for i in range(0,len(z)-step+1,step):
mybins.append(z[i])
mybins.append(z[-1]) # ie. these are really bin BORDERS.
elif strategy == 'width': #Equal width
step = (top-bottom)/(num_bins+0.1)
mybins = [bottom + x*step for x in range(0, num_bins)]
mybins.append(z[-1]) # the last one.
elif strategy == 'expocc':
# This binning strategy places fewer pixels in each successive
# bin by a constant multiplicative factor (eg. a half), so it
# gives exponentially decreasing occupancy. BUT NOTE: with
# #bins set, AND the factor set, the final bin size CAN'T be.
i=0
magic_fraction = prop
index = 0
while len(mybins)<num_bins:
mybins.append(z[index])
index = min(index+ceil(magic_fraction * (len(z)-index)), len(z)-1)
mybins.append(z[-1]) # ie. these are really bin BORDERS.
elif strategy == 'dexpocc':
# As for 'expocc' but the size of the data and the proportion determine
# num of bins (num bins can't be set by user)
num = z.size
last = 0
mybins.append(z[0])
while num > 0:
n = math.ceil(num*prop)
mybins.append(z[last+n-1])
last += n
num -= n
elif strategy == 'fromfile':
if fname == None:
sys.exit('Please supply a file name')
else:
mybins = np.genfromtxt(fname)
else:
sys.exit('Not a valid binning strategy')
# Now ensure the borders are big enough to catch new data that's out-of-range.
mybins[-1] += safety_gap
mybins[0] -= safety_gap
return mybins
def get_BG(fname):
"""Get background alpha vector from LDA output"""
CWT = np.delete(np.delete(np.genfromtxt(fname,comments='#'),0,1),0,0)
#"biggest" topic is background (return this as background alpha vector)
t0 = CWT[:,0]
t1 = CWT[:,1]
if np.sum(t0) > np.sum(t1):
return t0
else:
return t1
def make_alphaBG(BINS,N,Z,dirichlet):
if dirichlet:
alpha_BGs = np.zeros((BINS.shape[0],BINS.shape[1]-1))
K = BINS.shape[1]-1
Cxk = np.zeros((N,K))
for b in range(BINS.shape[0]):
alpha_BGs[b] = np.histogram(np.ravel(Z),bins=BINS[b])[0]
for i in range(K-1):
Cxk[:,i]+=np.asarray((Z>=BINS[b,i])&(Z<BINS[b,i+1]),dtype=int)
Cxk[:,K-1]+=np.asarray((Z>=BINS[b,K-1])&(Z<=BINS[b,K]),dtype=int)
alpha_BG = np.mean(alpha_BGs,axis=0) + 1.0
Cxk /= float(BINS.shape[0])
else:
alpha_BG = np.histogram(np.ravel(Z),bins=BINS)[0] + 1.0
K = len(BINS)-1
Cxk = np.zeros((N,K))
for i in range(K-1):
Cxk[:,i]=np.asarray((Z>=BINS[i])&(Z<BINS[i+1]),dtype=int)
Cxk[:,K-1]=np.asarray((Z>=BINS[K-1])&(Z<=BINS[K]),dtype=int)
Cxk = Cxk.T
return Cxk, alpha_BG
############################ SCORE METHODS ############################
def calc_params(md, sigma):
Cxk_slice = Cxk
xb = np.arange(0,N,dtype='float')
wgts = np.exp(-(np.power((xb-md),2.)/(2.*np.power(sigma,2.))))
nk = np.sum(wgts*Cxk_slice,axis=1)
return Cxk_slice, xb, wgts, nk
def calc_full(n, alphas):
""" Calculate the log likelihood under DirMult distribution with alphas=avec, given data counts of nvec."""
lg_sum_alphas = math.lgamma(alphas.sum())
sum_lg_alphas = np.sum(scipy.special.gammaln(alphas))
lg_sum_alphas_n = math.lgamma(alphas.sum() + n.sum())
sum_lg_alphas_n = np.sum(scipy.special.gammaln(n+alphas))
return lg_sum_alphas - sum_lg_alphas - lg_sum_alphas_n + sum_lg_alphas_n
def score_wrapper(theta, args):
""" Calculate and return the score """
md,sigma = theta
alpha_SRC,alpha_BG = args
Cxk_slice, xb, wgts, nk = calc_params(md, sigma)
SRC_term = calc_full(nk, alpha_SRC)
BG_term = calc_full(nk, alpha_BG)
s1 = SRC_term - BG_term
nk_n = (alpha_BG/alpha_BG.sum()) * np.sum(nk)
SRC_term_n = calc_full(nk_n, alpha_SRC)
BG_term_n = calc_full(nk_n, alpha_BG)
s2 = SRC_term_n - BG_term_n
score = s1 - s2
return -score
############################ GRADIENT METHODS ############################
def calc_gradients(x,sigma,m,wx):
""" Calculate gradients for m and sigma for a given m, sigma, and window[xposl:xposr]
Returns two x-length vectors."""
grad_m = (wx*(x-m))/(np.power(sigma,2.))
grad_sigma = (wx*(np.power((m-x),2.)))/(np.power(sigma,3.))
return grad_m, grad_sigma
def calc_grad_weight(nks, alphaS, alphaB, N, AB, AS):
""" Calculate the weights for each bin k. Returns k-length vector."""
K = nks.size
w = sp.psi(nks + alphaS) - sp.psi(nks+alphaB) + sp.psi(N+AB) - sp.psi(N+AS)
return w
def calc_fullgrad(wgt,data,gradient):
""" Calculate full gradient: wgts * (data * grad) """
full = np.dot(wgt, (np.sum(data*gradient,axis=1) ))
return full
def gradient_wrapper(theta, args):
""" Calculate and return the gradient """
md,sigma = theta
alpha_SRC,alpha_BG = args
Cxk_slice, xb, wgts, nk = calc_params(md, sigma)
grad_m,grad_sigma = calc_gradients(xb,sigma,md,wgts)
w = calc_grad_weight(nk,alpha_SRC,alpha_BG,np.sum(nk),alpha_BG.sum(),alpha_SRC.sum())
gm = calc_fullgrad(w,Cxk_slice,grad_m)
gs = calc_fullgrad(w,Cxk_slice,grad_sigma)
nk_n = (alpha_BG/alpha_BG.sum()) * np.sum(nk)
wn = calc_grad_weight(nk_n,alpha_SRC,alpha_BG,np.sum(nk_n),alpha_BG.sum(),alpha_SRC.sum())
gmn = calc_fullgrad(wn,Cxk_slice,grad_m)
gsn = calc_fullgrad(wn,Cxk_slice,grad_sigma)
return [-(gm-gmn),-(gs-gsn)]
if __name__ == "__main__":
parser = optparse.OptionParser(usage="usage %prog [options]")
parser.add_option("-n","--numbins",type = "int",dest = "K",default=0,
help="number of bins (ignored if strategy is dexpocc or fromfile)")
parser.add_option("-b","--bins_fname",dest = "bfname",
help="bin borders filename")
parser.add_option("-s","--binning_strategy",dest = "strategy",
help="eqocc, width, expocc, dexpocc or fromfile. "
"MANDATORY OPTION.")
parser.add_option("-p","--prop",type="float",dest="prop",default=0.5,
help="proportion to decrease bin occupancy by (for use "
"with dexpocc; else ignored. DEFAULT VALUE = 0.5)")
parser.add_option("-d","--datafile",dest = "infile",
help="a list of numbers: 1D data to be read in (can't be "
"used with --rngseed)")
parser.add_option("-r","--rngseed",type = "int",dest = "seed",
help="an int to make random data up (can't be used with "
"--datafile)")
parser.add_option("-q","--hard",action="store_true",dest="hard",default=False,
help="make hard/rectangular windows (default = soft/squared"
" exponential)")
parser.add_option("-t","--dirichlet",action="store_true",dest="dirichlet",default=False,
help="make dirichlet bin borders (incompatible with \"from file\" binning stratgegy)")
parser.add_option("-o","--nohisto",action="store_true",dest="nohisto",default=False,
help="no histo in fig")
parser.add_option("-C","--CWT_fname",dest="CWT",
help="give CWT filename if background alphas from LDA "
"file to be used (can't be used with --local or --seed)\n")
opts, args = parser.parse_args()
EXIT = False
if opts.strategy is None:
print "ERROR: you must supply a binning strategy\n"
EXIT = True
if opts.infile and opts.seed:
print "ERROR: supply EITHER a datafile OR a random seed to make up data\n"
EXIT = True
if opts.seed and opts.CWT:
print "ERROR: background alphas from CWT can't be used with randomly generated data\n"
EXIT = True
if opts.dirichlet and opts.strategy=="fromfile":
print "ERROR: dirichlet bin borders are incompatible with using bin borders from file\n"
EXIT = True
if EXIT:
parser.print_help()
sys.exit(-1)
strategy = opts.strategy
outfile = 'DirModel_%s' % strategy
K = opts.K
if opts.seed:
seed = opts.seed
# make an "image"
rng.seed(seed) # seed the random number generator here
N = 500 #number of pixels in a fake test image
noise_size=1.0
x = np.arange(N)
# make up the 'shapes' of the sources
mid1, mid2, mid3 = (N-20)*rng.random()+10,(N-20)*rng.random()+10,(N-20)*rng.random()+10
print 'Random sources placed at ',mid1, mid2, mid3
spread1 = 8.*rng.random()+ 6. # length scale
shape1 = (4.5*rng.rand()+0.5)*np.exp(-0.5*np.power((x-mid1)*1.0/spread1,2.0))
shape2 = skew_gen_gauss(x,mid2)
shape3 = skew_gen_gauss(x,mid3)
# noise character of sources
variance = np.abs(noise_size*(1.0 - shape1 + shape2)) # source 3 has no variance effect
#variance = variance + x/float(len(x)) # to mimic steady change over large scales
noise = rng.normal(0,variance,x.shape)
# mean_intensity character of sources
mean = shape1 + shape2 + shape3
y = mean + noise
outfile += '_r%d_m%d-%d-%d' % (seed, int(mid1), int(mid2), int(mid3))
else: # it's not a digit, so it's a filename. File should be just list of numbers.
infile = opts.infile
y = np.genfromtxt(infile)
x = np.arange(len(y))
N = len(y)
outfile += '_%s' % infile
#make bins (here, from the naked image)
if opts.dirichlet:
outfile += '_dirichletborders'
BINS = make_dirichlet_bins(y,K,strategy)
if K == 0:
K = BINS.shape[1] - 1
print 'Note: an example overall histogram: (using the first of the dirichlet histograms)'
print np.histogram(y,bins=BINS[0])[0]
else:
BINS = make_bin_borders(y,K,strategy,safety_gap=np.inf,fname=opts.bfname,prop=opts.prop)
if K == 0:
K = len(BINS) - 1
print 'Note: this makes the overall histogram this: (reality-check the final one especially)'
print np.histogram(y,bins=BINS)[0]
outfile += '_K%d' % K
#get background alphas from LDA output, if specified
if opts.CWT:
alpha_BG = get_BG(opts.CWT)
outfile += '_LDA'
else:
# bogus, but we're setting the background alphas as if there were
# no sources in the image at the moment....
Cxk,alpha_BG = make_alphaBG(BINS,N,y,opts.dirichlet)
alpha_SRC = 1.0 * np.ones(alpha_BG.shape)
max_spread = N
max_wd = max_spread/2
outfile += '_fullscore'
if opts.hard:
outfile += '_hardborders'
outfile += '_optima'
#do gradient ascent
num_top = 3
num_iters = 50
messages = np.zeros(9)
top_scores=np.zeros((num_top,5))
# m bounds: 0,max_spread; sigma bounds: 0,max_wd
Bounds = [(0,max_spread),(0,max_wd)]
print "gradient descent ... "
#np.seterr(divide='raise')
for i in range(num_top):
optima = np.zeros((num_iters,5))
for j in range(num_iters):
print '------------------------------------------\niter %s.%s\n' % (i,j)
md = int(rng.rand()*max_spread)
sigma = int(rng.rand()*(max_wd/10.))
print '\nmd: %s, sigma: %s\n' % (md, sigma)
theta = [md,sigma]
args = [alpha_SRC, alpha_BG]
#sltn, its, rc = sop.fmin_tnc(score_wrapper, theta, gradient_wrapper, [args], bounds=Bounds, maxfun=1000, fmin=-1e10)
sltn, its, rc = sop.fmin_tnc(score_wrapper, theta, args=[args], approx_grad=True, bounds=Bounds, fmin=-1e10,accuracy=1e-16)
sc = score_wrapper(sltn, args)
optima[j,:2] = sltn
optima[j,2] = -sc
optima[j,3:] = gradient_wrapper(sltn,args)
messages[rc] += 1
top_opt = scipy.delete(optima, np.where(np.isnan(optima)), 0)
top_opt = top_opt[np.argsort(top_opt[:,2])][-1]
top_scores[i] = top_opt
#remove best source
top_md=top_opt[0]
top_sig=top_opt[1]
y[top_md-top_sig:top_md+top_sig+1]=np.nan
Cxk,alpha_BGs = make_alphaBG(BINS,N,y,opts.dirichlet)
print '%s local minimum' % messages[0]
print '%s fconverged' % messages[1]
print '%s xconverged' % messages[2]
print '%s max functions reached' % messages[3]
print '%s linear search failed' % messages[4]
print '%s constant' % messages[5]
print '%s no progress' % messages[6]
print '%s user aborted' % messages[7]
print '%s infeasible' % messages[8]
for i in range(top_scores.shape[0]):
print top_scores[i]
np.savetxt(outfile,top_scores)
plt.clf()
#data
plt.plot(y,'k.')
plt.plot(shape1,'b-')
plt.plot(shape2,'b-')
plt.plot(shape3,'b-')
#optima
found1 = np.exp(-0.5*np.power((x-top_scores[0,0])*1.0/top_scores[0,1],2.0))
plt.plot(found1,'r-')
found2 = np.exp(-0.5*np.power((x-top_scores[1,0])*1.0/top_scores[1,1],2.0))
plt.plot(found2,'r-')
found3 = np.exp(-0.5*np.power((x-top_scores[2,0])*1.0/top_scores[2,1],2.0))
plt.plot(found3,'r-')
plt.savefig(outfile)
#print shapes
outfile += '_GT'
with file(outfile, 'w') as out:
out.write('# %s %s \n' % (mid1, spread1))
np.savetxt(out, shape1)
out.write('# %s \n' % (mid2))
np.savetxt(out, shape2)
out.write('# %s \n' % (mid3))
np.savetxt(out, shape3)
|
gpl-2.0
|
betogulliver/JupyterWorkflow
|
jupyterworkflow/data.py
|
1
|
1112
|
import os
from urllib import urlretrieve
import pandas as pd
FREEMONT_URL = "https://data.seattle.gov/api/views/65db-xm6k/rows.csv?accessType=DOWNLOAD"
def get_freemont_data(filename="Freemont.csv", url=FREEMONT_URL,
force_download=False) :
"""Download and cache the Freemont data
Parameters
----------
filename : string (optional)
location to save the data
url : string (optional)
web location of the data
force_download : bool (optional)
if True, force redownload of data
Returns
-------
data : pandas.DataFrame
The freemont bridge data
"""
if force_download or not os.path.exists(filename) :
urlretrieve(url, filename)
data = pd.read_csv("Freemont.csv", index_col="Date")#, parse_dates=True) # XXX: sloooooow
try:
data.index = pd.to_datetime(data.index, format="%m/%d/%Y %I:%M:%S %p") # XXX: fast
except TypeError:
data.index = pd.to_datetime(data.index)
data.columns = ['West', "East"]
data['Total'] = data["West"] + data["East"]
return data
|
mit
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.