repo_name
stringlengths 6
112
| path
stringlengths 4
204
| copies
stringlengths 1
3
| size
stringlengths 4
6
| content
stringlengths 714
810k
| license
stringclasses 15
values |
---|---|---|---|---|---|
dssg/wikienergy | disaggregator/build/pandas/pandas/stats/var.py | 16 | 16319 | from __future__ import division
from pandas.compat import range, lrange, zip, reduce
from pandas import compat
import numpy as np
from pandas.core.base import StringMixin
from pandas.util.decorators import cache_readonly
from pandas.core.frame import DataFrame
from pandas.core.panel import Panel
from pandas.core.series import Series
import pandas.stats.common as common
from pandas.stats.math import inv
from pandas.stats.ols import _combine_rhs
class VAR(StringMixin):
"""
Estimates VAR(p) regression on multivariate time series data
presented in pandas data structures.
Parameters
----------
data : DataFrame or dict of Series
p : lags to include
"""
def __init__(self, data, p=1, intercept=True):
try:
import statsmodels.tsa.vector_ar.api as sm_var
except ImportError:
import scikits.statsmodels.tsa.var as sm_var
self._data = DataFrame(_combine_rhs(data))
self._p = p
self._columns = self._data.columns
self._index = self._data.index
self._intercept = intercept
@cache_readonly
def aic(self):
"""Returns the Akaike information criterion."""
return self._ic['aic']
@cache_readonly
def bic(self):
"""Returns the Bayesian information criterion."""
return self._ic['bic']
@cache_readonly
def beta(self):
"""
Returns a DataFrame, where each column x1 contains the betas
calculated by regressing the x1 column of the VAR input with
the lagged input.
Returns
-------
DataFrame
"""
d = dict([(key, value.beta)
for (key, value) in compat.iteritems(self.ols_results)])
return DataFrame(d)
def forecast(self, h):
"""
Returns a DataFrame containing the forecasts for 1, 2, ..., n time
steps. Each column x1 contains the forecasts of the x1 column.
Parameters
----------
n: int
Number of time steps ahead to forecast.
Returns
-------
DataFrame
"""
forecast = self._forecast_raw(h)[:, 0, :]
return DataFrame(forecast, index=lrange(1, 1 + h),
columns=self._columns)
def forecast_cov(self, h):
"""
Returns the covariance of the forecast residuals.
Returns
-------
DataFrame
"""
return [DataFrame(value, index=self._columns, columns=self._columns)
for value in self._forecast_cov_raw(h)]
def forecast_std_err(self, h):
"""
Returns the standard errors of the forecast residuals.
Returns
-------
DataFrame
"""
return DataFrame(self._forecast_std_err_raw(h),
index=lrange(1, 1 + h), columns=self._columns)
@cache_readonly
def granger_causality(self):
"""Returns the f-stats and p-values from the Granger Causality Test.
If the data consists of columns x1, x2, x3, then we perform the
following regressions:
x1 ~ L(x2, x3)
x1 ~ L(x1, x3)
x1 ~ L(x1, x2)
The f-stats of these results are placed in the 'x1' column of the
returned DataFrame. We then repeat for x2, x3.
Returns
-------
Dict, where 'f-stat' returns the DataFrame containing the f-stats,
and 'p-value' returns the DataFrame containing the corresponding
p-values of the f-stats.
"""
from pandas.stats.api import ols
from scipy.stats import f
d = {}
for col in self._columns:
d[col] = {}
for i in range(1, 1 + self._p):
lagged_data = self._lagged_data[i].filter(
self._columns - [col])
for key, value in compat.iteritems(lagged_data):
d[col][_make_param_name(i, key)] = value
f_stat_dict = {}
p_value_dict = {}
for col, y in compat.iteritems(self._data):
ssr_full = (self.resid[col] ** 2).sum()
f_stats = []
p_values = []
for col2 in self._columns:
result = ols(y=y, x=d[col2])
resid = result.resid
ssr_reduced = (resid ** 2).sum()
M = self._p
N = self._nobs
K = self._k * self._p + 1
f_stat = ((ssr_reduced - ssr_full) / M) / (ssr_full / (N - K))
f_stats.append(f_stat)
p_value = f.sf(f_stat, M, N - K)
p_values.append(p_value)
f_stat_dict[col] = Series(f_stats, self._columns)
p_value_dict[col] = Series(p_values, self._columns)
f_stat_mat = DataFrame(f_stat_dict)
p_value_mat = DataFrame(p_value_dict)
return {
'f-stat': f_stat_mat,
'p-value': p_value_mat,
}
@cache_readonly
def ols_results(self):
"""
Returns the results of the regressions:
x_1 ~ L(X)
x_2 ~ L(X)
...
x_k ~ L(X)
where X = [x_1, x_2, ..., x_k]
and L(X) represents the columns of X lagged 1, 2, ..., n lags
(n is the user-provided number of lags).
Returns
-------
dict
"""
from pandas.stats.api import ols
d = {}
for i in range(1, 1 + self._p):
for col, series in compat.iteritems(self._lagged_data[i]):
d[_make_param_name(i, col)] = series
result = dict([(col, ols(y=y, x=d, intercept=self._intercept))
for col, y in compat.iteritems(self._data)])
return result
@cache_readonly
def resid(self):
"""
Returns the DataFrame containing the residuals of the VAR regressions.
Each column x1 contains the residuals generated by regressing the x1
column of the input against the lagged input.
Returns
-------
DataFrame
"""
d = dict([(col, series.resid)
for (col, series) in compat.iteritems(self.ols_results)])
return DataFrame(d, index=self._index)
@cache_readonly
def summary(self):
template = """
%(banner_top)s
Number of Observations: %(nobs)d
AIC: %(aic).3f
BIC: %(bic).3f
%(banner_coef)s
%(coef_table)s
%(banner_end)s
"""
params = {
'banner_top': common.banner('Summary of VAR'),
'banner_coef': common.banner('Summary of Estimated Coefficients'),
'banner_end': common.banner('End of Summary'),
'coef_table': self.beta,
'aic': self.aic,
'bic': self.bic,
'nobs': self._nobs,
}
return template % params
@cache_readonly
def _alpha(self):
"""
Returns array where the i-th element contains the intercept
when regressing the i-th column of self._data with the lagged data.
"""
if self._intercept:
return self._beta_raw[-1]
else:
return np.zeros(self._k)
@cache_readonly
def _beta_raw(self):
return np.array([list(self.beta[col].values()) for col in self._columns]).T
def _trans_B(self, h):
"""
Returns 0, 1, ..., (h-1)-th power of transpose of B as defined in
equation (4) on p. 142 of the Stata 11 Time Series reference book.
"""
result = [np.eye(1 + self._k * self._p)]
row1 = np.zeros((1, 1 + self._k * self._p))
row1[0, 0] = 1
v = self._alpha.reshape((self._k, 1))
row2 = np.hstack(tuple([v] + self._lag_betas))
m = self._k * (self._p - 1)
row3 = np.hstack((
np.zeros((m, 1)),
np.eye(m),
np.zeros((m, self._k))
))
trans_B = np.vstack((row1, row2, row3)).T
result.append(trans_B)
for i in range(2, h):
result.append(np.dot(trans_B, result[i - 1]))
return result
@cache_readonly
def _x(self):
values = np.array([
list(self._lagged_data[i][col].values())
for i in range(1, 1 + self._p)
for col in self._columns
]).T
x = np.hstack((np.ones((len(values), 1)), values))[self._p:]
return x
@cache_readonly
def _cov_beta(self):
cov_resid = self._sigma
x = self._x
inv_cov_x = inv(np.dot(x.T, x))
return np.kron(inv_cov_x, cov_resid)
def _data_xs(self, i):
"""
Returns the cross-section of the data at the given timestep.
"""
return self._data.values[i]
def _forecast_cov_raw(self, n):
resid = self._forecast_cov_resid_raw(n)
# beta = self._forecast_cov_beta_raw(n)
# return [a + b for a, b in zip(resid, beta)]
# TODO: ignore the beta forecast std err until it's verified
return resid
def _forecast_cov_beta_raw(self, n):
"""
Returns the covariance of the beta errors for the forecast at
1, 2, ..., n timesteps.
"""
p = self._p
values = self._data.values
T = len(values) - self._p - 1
results = []
for h in range(1, n + 1):
psi = self._psi(h)
trans_B = self._trans_B(h)
sum = 0
cov_beta = self._cov_beta
for t in range(T + 1):
index = t + p
y = values.take(lrange(index, index - p, -1), axis=0).ravel()
trans_Z = np.hstack(([1], y))
trans_Z = trans_Z.reshape(1, len(trans_Z))
sum2 = 0
for i in range(h):
ZB = np.dot(trans_Z, trans_B[h - 1 - i])
prod = np.kron(ZB, psi[i])
sum2 = sum2 + prod
sum = sum + chain_dot(sum2, cov_beta, sum2.T)
results.append(sum / (T + 1))
return results
def _forecast_cov_resid_raw(self, h):
"""
Returns the covariance of the residual errors for the forecast at
1, 2, ..., h timesteps.
"""
psi_values = self._psi(h)
sum = 0
result = []
for i in range(h):
psi = psi_values[i]
sum = sum + chain_dot(psi, self._sigma, psi.T)
result.append(sum)
return result
def _forecast_raw(self, h):
"""
Returns the forecast at 1, 2, ..., h timesteps in the future.
"""
k = self._k
result = []
for i in range(h):
sum = self._alpha.reshape(1, k)
for j in range(self._p):
beta = self._lag_betas[j]
idx = i - j
if idx > 0:
y = result[idx - 1]
else:
y = self._data_xs(idx - 1)
sum = sum + np.dot(beta, y.T).T
result.append(sum)
return np.array(result)
def _forecast_std_err_raw(self, h):
"""
Returns the standard error of the forecasts
at 1, 2, ..., n timesteps.
"""
return np.array([np.sqrt(np.diag(value))
for value in self._forecast_cov_raw(h)])
@cache_readonly
def _ic(self):
"""
Returns the Akaike/Bayesian information criteria.
"""
RSS = self._rss
k = self._p * (self._k * self._p + 1)
n = self._nobs * self._k
return {'aic': 2 * k + n * np.log(RSS / n),
'bic': n * np.log(RSS / n) + k * np.log(n)}
@cache_readonly
def _k(self):
return len(self._columns)
@cache_readonly
def _lag_betas(self):
"""
Returns list of B_i, where B_i represents the (k, k) matrix
with the j-th row containing the betas of regressing the j-th
column of self._data with self._data lagged i time steps.
First element is B_1, second element is B_2, etc.
"""
k = self._k
b = self._beta_raw
return [b[k * i: k * (i + 1)].T for i in range(self._p)]
@cache_readonly
def _lagged_data(self):
return dict([(i, self._data.shift(i))
for i in range(1, 1 + self._p)])
@cache_readonly
def _nobs(self):
return len(self._data) - self._p
def _psi(self, h):
"""
psi value used for calculating standard error.
Returns [psi_0, psi_1, ..., psi_(h - 1)]
"""
k = self._k
result = [np.eye(k)]
for i in range(1, h):
result.append(sum(
[np.dot(result[i - j], self._lag_betas[j - 1])
for j in range(1, 1 + i)
if j <= self._p]))
return result
@cache_readonly
def _resid_raw(self):
resid = np.array([self.ols_results[col]._resid_raw
for col in self._columns])
return resid
@cache_readonly
def _rss(self):
"""Returns the sum of the squares of the residuals."""
return (self._resid_raw ** 2).sum()
@cache_readonly
def _sigma(self):
"""Returns covariance of resids."""
k = self._k
n = self._nobs
resid = self._resid_raw
return np.dot(resid, resid.T) / (n - k)
def __unicode__(self):
return self.summary
def lag_select(data, max_lags=5, ic=None):
"""
Select number of lags based on a variety of information criteria
Parameters
----------
data : DataFrame-like
max_lags : int
Maximum number of lags to evaluate
ic : {None, 'aic', 'bic', ...}
Choosing None will just display the results
Returns
-------
None
"""
pass
class PanelVAR(VAR):
"""
Performs Vector Autoregression on panel data.
Parameters
----------
data: Panel or dict of DataFrame
lags: int
"""
def __init__(self, data, lags, intercept=True):
self._data = _prep_panel_data(data)
self._p = lags
self._intercept = intercept
self._columns = self._data.items
@cache_readonly
def _nobs(self):
"""Returns the number of observations."""
_, timesteps, entities = self._data.values.shape
return (timesteps - self._p) * entities
@cache_readonly
def _rss(self):
"""Returns the sum of the squares of the residuals."""
return (self.resid.values ** 2).sum()
def forecast(self, h):
"""
Returns the forecasts at 1, 2, ..., n timesteps in the future.
"""
forecast = self._forecast_raw(h).T.swapaxes(1, 2)
index = lrange(1, 1 + h)
w = Panel(forecast, items=self._data.items, major_axis=index,
minor_axis=self._data.minor_axis)
return w
@cache_readonly
def resid(self):
"""
Returns the DataFrame containing the residuals of the VAR regressions.
Each column x1 contains the residuals generated by regressing the x1
column of the input against the lagged input.
Returns
-------
DataFrame
"""
d = dict([(key, value.resid)
for (key, value) in compat.iteritems(self.ols_results)])
return Panel.fromDict(d)
def _data_xs(self, i):
return self._data.values[:, i, :].T
@cache_readonly
def _sigma(self):
"""Returns covariance of resids."""
k = self._k
resid = _drop_incomplete_rows(self.resid.toLong().values)
n = len(resid)
return np.dot(resid.T, resid) / (n - k)
def _prep_panel_data(data):
"""Converts the given data into a Panel."""
if isinstance(data, Panel):
return data
return Panel.fromDict(data)
def _drop_incomplete_rows(array):
mask = np.isfinite(array).all(1)
indices = np.arange(len(array))[mask]
return array.take(indices, 0)
def _make_param_name(lag, name):
return 'L%d.%s' % (lag, name)
def chain_dot(*matrices):
"""
Returns the dot product of the given matrices.
Parameters
----------
matrices: argument list of ndarray
"""
return reduce(lambda x, y: np.dot(y, x), matrices[::-1])
| mit |
arbenson/tensor-sc | scripts/create_mat_file.py | 2 | 1701 |
import matplotlib as mpl
import matplotlib.pyplot as plt
import scipy.io as sio
import sys
'''
Create a .mat file of all the data so that we can plot things in Matlab.
'''
def get_best(num_cut_sweep, suffix):
best = {}
total = len(num_cut_sweep)
for i, num_cut in enumerate(num_cut_sweep):
min_size = min(i + 1, total - (i + 1))
if suffix == 'density' and num_cut > 1.0:
val = 0.0
else:
val = num_cut
if min_size in best:
if suffix == 'density':
best[min_size] = max(val, best[min_size])
else:
best[min_size] = min(val, best[min_size])
else:
best[min_size] = val
return zip(*sorted(best.items()))
all_data = {}
def gather_data(data, suffix):
for cut_type in ['msc', 'dl', 'lap', 'alap', 'cocluster_u', 'cocluster_v', 'random']:
key = '%s-filter_%s_%s' % (data, cut_type, suffix)
with open(key + '.txt') as f:
num_cut_sweep = [float(line) for line in f]
# Format for Matlab
key = key.split('/')[-1]
key = key.replace('-', '_')
all_data[key] = get_best(num_cut_sweep, suffix)
for data_set in ['soc-Slashdot0811', 'wiki-Vote', 'as-caida20071105',
'email-Enron', 'soc-Epinions1', 'amazon0312',
'twitter_combined', 'email-EuAll', 'cit-HepPh',
'web-Stanford', 'wiki-RfA-net', 'wiki-Talk']:
gather_data('d3c_cond_results/' + data_set, 'd3c_cond')
gather_data('num_cut_results/' + data_set, 'num_cut')
gather_data('density_results/' + data_set, 'density')
sio.savemat('cut_data.mat', all_data)
| bsd-2-clause |
deepesch/scikit-learn | sklearn/ensemble/tests/test_weight_boosting.py | 22 | 16769 | """Testing for the boost module (sklearn.ensemble.boost)."""
import numpy as np
from sklearn.utils.testing import assert_array_equal, assert_array_less
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_equal, assert_true
from sklearn.utils.testing import assert_raises, assert_raises_regexp
from sklearn.cross_validation import train_test_split
from sklearn.grid_search import GridSearchCV
from sklearn.ensemble import AdaBoostClassifier
from sklearn.ensemble import AdaBoostRegressor
from sklearn.ensemble import weight_boosting
from scipy.sparse import csc_matrix
from scipy.sparse import csr_matrix
from scipy.sparse import coo_matrix
from scipy.sparse import dok_matrix
from scipy.sparse import lil_matrix
from sklearn.svm import SVC, SVR
from sklearn.tree import DecisionTreeClassifier, DecisionTreeRegressor
from sklearn.utils import shuffle
from sklearn import datasets
# Common random state
rng = np.random.RandomState(0)
# Toy sample
X = [[-2, -1], [-1, -1], [-1, -2], [1, 1], [1, 2], [2, 1]]
y_class = ["foo", "foo", "foo", 1, 1, 1] # test string class labels
y_regr = [-1, -1, -1, 1, 1, 1]
T = [[-1, -1], [2, 2], [3, 2]]
y_t_class = ["foo", 1, 1]
y_t_regr = [-1, 1, 1]
# Load the iris dataset and randomly permute it
iris = datasets.load_iris()
perm = rng.permutation(iris.target.size)
iris.data, iris.target = shuffle(iris.data, iris.target, random_state=rng)
# Load the boston dataset and randomly permute it
boston = datasets.load_boston()
boston.data, boston.target = shuffle(boston.data, boston.target,
random_state=rng)
def test_samme_proba():
# Test the `_samme_proba` helper function.
# Define some example (bad) `predict_proba` output.
probs = np.array([[1, 1e-6, 0],
[0.19, 0.6, 0.2],
[-999, 0.51, 0.5],
[1e-6, 1, 1e-9]])
probs /= np.abs(probs.sum(axis=1))[:, np.newaxis]
# _samme_proba calls estimator.predict_proba.
# Make a mock object so I can control what gets returned.
class MockEstimator(object):
def predict_proba(self, X):
assert_array_equal(X.shape, probs.shape)
return probs
mock = MockEstimator()
samme_proba = weight_boosting._samme_proba(mock, 3, np.ones_like(probs))
assert_array_equal(samme_proba.shape, probs.shape)
assert_true(np.isfinite(samme_proba).all())
# Make sure that the correct elements come out as smallest --
# `_samme_proba` should preserve the ordering in each example.
assert_array_equal(np.argmin(samme_proba, axis=1), [2, 0, 0, 2])
assert_array_equal(np.argmax(samme_proba, axis=1), [0, 1, 1, 1])
def test_classification_toy():
# Check classification on a toy dataset.
for alg in ['SAMME', 'SAMME.R']:
clf = AdaBoostClassifier(algorithm=alg, random_state=0)
clf.fit(X, y_class)
assert_array_equal(clf.predict(T), y_t_class)
assert_array_equal(np.unique(np.asarray(y_t_class)), clf.classes_)
assert_equal(clf.predict_proba(T).shape, (len(T), 2))
assert_equal(clf.decision_function(T).shape, (len(T),))
def test_regression_toy():
# Check classification on a toy dataset.
clf = AdaBoostRegressor(random_state=0)
clf.fit(X, y_regr)
assert_array_equal(clf.predict(T), y_t_regr)
def test_iris():
# Check consistency on dataset iris.
classes = np.unique(iris.target)
clf_samme = prob_samme = None
for alg in ['SAMME', 'SAMME.R']:
clf = AdaBoostClassifier(algorithm=alg)
clf.fit(iris.data, iris.target)
assert_array_equal(classes, clf.classes_)
proba = clf.predict_proba(iris.data)
if alg == "SAMME":
clf_samme = clf
prob_samme = proba
assert_equal(proba.shape[1], len(classes))
assert_equal(clf.decision_function(iris.data).shape[1], len(classes))
score = clf.score(iris.data, iris.target)
assert score > 0.9, "Failed with algorithm %s and score = %f" % \
(alg, score)
# Somewhat hacky regression test: prior to
# ae7adc880d624615a34bafdb1d75ef67051b8200,
# predict_proba returned SAMME.R values for SAMME.
clf_samme.algorithm = "SAMME.R"
assert_array_less(0,
np.abs(clf_samme.predict_proba(iris.data) - prob_samme))
def test_boston():
# Check consistency on dataset boston house prices.
clf = AdaBoostRegressor(random_state=0)
clf.fit(boston.data, boston.target)
score = clf.score(boston.data, boston.target)
assert score > 0.85
def test_staged_predict():
# Check staged predictions.
rng = np.random.RandomState(0)
iris_weights = rng.randint(10, size=iris.target.shape)
boston_weights = rng.randint(10, size=boston.target.shape)
# AdaBoost classification
for alg in ['SAMME', 'SAMME.R']:
clf = AdaBoostClassifier(algorithm=alg, n_estimators=10)
clf.fit(iris.data, iris.target, sample_weight=iris_weights)
predictions = clf.predict(iris.data)
staged_predictions = [p for p in clf.staged_predict(iris.data)]
proba = clf.predict_proba(iris.data)
staged_probas = [p for p in clf.staged_predict_proba(iris.data)]
score = clf.score(iris.data, iris.target, sample_weight=iris_weights)
staged_scores = [
s for s in clf.staged_score(
iris.data, iris.target, sample_weight=iris_weights)]
assert_equal(len(staged_predictions), 10)
assert_array_almost_equal(predictions, staged_predictions[-1])
assert_equal(len(staged_probas), 10)
assert_array_almost_equal(proba, staged_probas[-1])
assert_equal(len(staged_scores), 10)
assert_array_almost_equal(score, staged_scores[-1])
# AdaBoost regression
clf = AdaBoostRegressor(n_estimators=10, random_state=0)
clf.fit(boston.data, boston.target, sample_weight=boston_weights)
predictions = clf.predict(boston.data)
staged_predictions = [p for p in clf.staged_predict(boston.data)]
score = clf.score(boston.data, boston.target, sample_weight=boston_weights)
staged_scores = [
s for s in clf.staged_score(
boston.data, boston.target, sample_weight=boston_weights)]
assert_equal(len(staged_predictions), 10)
assert_array_almost_equal(predictions, staged_predictions[-1])
assert_equal(len(staged_scores), 10)
assert_array_almost_equal(score, staged_scores[-1])
def test_gridsearch():
# Check that base trees can be grid-searched.
# AdaBoost classification
boost = AdaBoostClassifier(base_estimator=DecisionTreeClassifier())
parameters = {'n_estimators': (1, 2),
'base_estimator__max_depth': (1, 2),
'algorithm': ('SAMME', 'SAMME.R')}
clf = GridSearchCV(boost, parameters)
clf.fit(iris.data, iris.target)
# AdaBoost regression
boost = AdaBoostRegressor(base_estimator=DecisionTreeRegressor(),
random_state=0)
parameters = {'n_estimators': (1, 2),
'base_estimator__max_depth': (1, 2)}
clf = GridSearchCV(boost, parameters)
clf.fit(boston.data, boston.target)
def test_pickle():
# Check pickability.
import pickle
# Adaboost classifier
for alg in ['SAMME', 'SAMME.R']:
obj = AdaBoostClassifier(algorithm=alg)
obj.fit(iris.data, iris.target)
score = obj.score(iris.data, iris.target)
s = pickle.dumps(obj)
obj2 = pickle.loads(s)
assert_equal(type(obj2), obj.__class__)
score2 = obj2.score(iris.data, iris.target)
assert_equal(score, score2)
# Adaboost regressor
obj = AdaBoostRegressor(random_state=0)
obj.fit(boston.data, boston.target)
score = obj.score(boston.data, boston.target)
s = pickle.dumps(obj)
obj2 = pickle.loads(s)
assert_equal(type(obj2), obj.__class__)
score2 = obj2.score(boston.data, boston.target)
assert_equal(score, score2)
def test_importances():
# Check variable importances.
X, y = datasets.make_classification(n_samples=2000,
n_features=10,
n_informative=3,
n_redundant=0,
n_repeated=0,
shuffle=False,
random_state=1)
for alg in ['SAMME', 'SAMME.R']:
clf = AdaBoostClassifier(algorithm=alg)
clf.fit(X, y)
importances = clf.feature_importances_
assert_equal(importances.shape[0], 10)
assert_equal((importances[:3, np.newaxis] >= importances[3:]).all(),
True)
def test_error():
# Test that it gives proper exception on deficient input.
assert_raises(ValueError,
AdaBoostClassifier(learning_rate=-1).fit,
X, y_class)
assert_raises(ValueError,
AdaBoostClassifier(algorithm="foo").fit,
X, y_class)
assert_raises(ValueError,
AdaBoostClassifier().fit,
X, y_class, sample_weight=np.asarray([-1]))
def test_base_estimator():
# Test different base estimators.
from sklearn.ensemble import RandomForestClassifier
from sklearn.svm import SVC
# XXX doesn't work with y_class because RF doesn't support classes_
# Shouldn't AdaBoost run a LabelBinarizer?
clf = AdaBoostClassifier(RandomForestClassifier())
clf.fit(X, y_regr)
clf = AdaBoostClassifier(SVC(), algorithm="SAMME")
clf.fit(X, y_class)
from sklearn.ensemble import RandomForestRegressor
from sklearn.svm import SVR
clf = AdaBoostRegressor(RandomForestRegressor(), random_state=0)
clf.fit(X, y_regr)
clf = AdaBoostRegressor(SVR(), random_state=0)
clf.fit(X, y_regr)
# Check that an empty discrete ensemble fails in fit, not predict.
X_fail = [[1, 1], [1, 1], [1, 1], [1, 1]]
y_fail = ["foo", "bar", 1, 2]
clf = AdaBoostClassifier(SVC(), algorithm="SAMME")
assert_raises_regexp(ValueError, "worse than random",
clf.fit, X_fail, y_fail)
def test_sample_weight_missing():
from sklearn.linear_model import LogisticRegression
from sklearn.cluster import KMeans
clf = AdaBoostClassifier(LogisticRegression(), algorithm="SAMME")
assert_raises(ValueError, clf.fit, X, y_regr)
clf = AdaBoostRegressor(LogisticRegression())
assert_raises(ValueError, clf.fit, X, y_regr)
clf = AdaBoostClassifier(KMeans(), algorithm="SAMME")
assert_raises(ValueError, clf.fit, X, y_regr)
clf = AdaBoostRegressor(KMeans())
assert_raises(ValueError, clf.fit, X, y_regr)
def test_sparse_classification():
# Check classification with sparse input.
class CustomSVC(SVC):
"""SVC variant that records the nature of the training set."""
def fit(self, X, y, sample_weight=None):
"""Modification on fit caries data type for later verification."""
super(CustomSVC, self).fit(X, y, sample_weight=sample_weight)
self.data_type_ = type(X)
return self
X, y = datasets.make_multilabel_classification(n_classes=1, n_samples=15,
n_features=5,
random_state=42)
# Flatten y to a 1d array
y = np.ravel(y)
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=0)
for sparse_format in [csc_matrix, csr_matrix, lil_matrix, coo_matrix,
dok_matrix]:
X_train_sparse = sparse_format(X_train)
X_test_sparse = sparse_format(X_test)
# Trained on sparse format
sparse_classifier = AdaBoostClassifier(
base_estimator=CustomSVC(probability=True),
random_state=1,
algorithm="SAMME"
).fit(X_train_sparse, y_train)
# Trained on dense format
dense_classifier = AdaBoostClassifier(
base_estimator=CustomSVC(probability=True),
random_state=1,
algorithm="SAMME"
).fit(X_train, y_train)
# predict
sparse_results = sparse_classifier.predict(X_test_sparse)
dense_results = dense_classifier.predict(X_test)
assert_array_equal(sparse_results, dense_results)
# decision_function
sparse_results = sparse_classifier.decision_function(X_test_sparse)
dense_results = dense_classifier.decision_function(X_test)
assert_array_equal(sparse_results, dense_results)
# predict_log_proba
sparse_results = sparse_classifier.predict_log_proba(X_test_sparse)
dense_results = dense_classifier.predict_log_proba(X_test)
assert_array_equal(sparse_results, dense_results)
# predict_proba
sparse_results = sparse_classifier.predict_proba(X_test_sparse)
dense_results = dense_classifier.predict_proba(X_test)
assert_array_equal(sparse_results, dense_results)
# score
sparse_results = sparse_classifier.score(X_test_sparse, y_test)
dense_results = dense_classifier.score(X_test, y_test)
assert_array_equal(sparse_results, dense_results)
# staged_decision_function
sparse_results = sparse_classifier.staged_decision_function(
X_test_sparse)
dense_results = dense_classifier.staged_decision_function(X_test)
for sprase_res, dense_res in zip(sparse_results, dense_results):
assert_array_equal(sprase_res, dense_res)
# staged_predict
sparse_results = sparse_classifier.staged_predict(X_test_sparse)
dense_results = dense_classifier.staged_predict(X_test)
for sprase_res, dense_res in zip(sparse_results, dense_results):
assert_array_equal(sprase_res, dense_res)
# staged_predict_proba
sparse_results = sparse_classifier.staged_predict_proba(X_test_sparse)
dense_results = dense_classifier.staged_predict_proba(X_test)
for sprase_res, dense_res in zip(sparse_results, dense_results):
assert_array_equal(sprase_res, dense_res)
# staged_score
sparse_results = sparse_classifier.staged_score(X_test_sparse,
y_test)
dense_results = dense_classifier.staged_score(X_test, y_test)
for sprase_res, dense_res in zip(sparse_results, dense_results):
assert_array_equal(sprase_res, dense_res)
# Verify sparsity of data is maintained during training
types = [i.data_type_ for i in sparse_classifier.estimators_]
assert all([(t == csc_matrix or t == csr_matrix)
for t in types])
def test_sparse_regression():
# Check regression with sparse input.
class CustomSVR(SVR):
"""SVR variant that records the nature of the training set."""
def fit(self, X, y, sample_weight=None):
"""Modification on fit caries data type for later verification."""
super(CustomSVR, self).fit(X, y, sample_weight=sample_weight)
self.data_type_ = type(X)
return self
X, y = datasets.make_regression(n_samples=15, n_features=50, n_targets=1,
random_state=42)
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=0)
for sparse_format in [csc_matrix, csr_matrix, lil_matrix, coo_matrix,
dok_matrix]:
X_train_sparse = sparse_format(X_train)
X_test_sparse = sparse_format(X_test)
# Trained on sparse format
sparse_classifier = AdaBoostRegressor(
base_estimator=CustomSVR(),
random_state=1
).fit(X_train_sparse, y_train)
# Trained on dense format
dense_classifier = dense_results = AdaBoostRegressor(
base_estimator=CustomSVR(),
random_state=1
).fit(X_train, y_train)
# predict
sparse_results = sparse_classifier.predict(X_test_sparse)
dense_results = dense_classifier.predict(X_test)
assert_array_equal(sparse_results, dense_results)
# staged_predict
sparse_results = sparse_classifier.staged_predict(X_test_sparse)
dense_results = dense_classifier.staged_predict(X_test)
for sprase_res, dense_res in zip(sparse_results, dense_results):
assert_array_equal(sprase_res, dense_res)
types = [i.data_type_ for i in sparse_classifier.estimators_]
assert all([(t == csc_matrix or t == csr_matrix)
for t in types])
| bsd-3-clause |
imaculate/scikit-learn | examples/linear_model/plot_lasso_model_selection.py | 311 | 5431 | """
===================================================
Lasso model selection: Cross-Validation / AIC / BIC
===================================================
Use the Akaike information criterion (AIC), the Bayes Information
criterion (BIC) and cross-validation to select an optimal value
of the regularization parameter alpha of the :ref:`lasso` estimator.
Results obtained with LassoLarsIC are based on AIC/BIC criteria.
Information-criterion based model selection is very fast, but it
relies on a proper estimation of degrees of freedom, are
derived for large samples (asymptotic results) and assume the model
is correct, i.e. that the data are actually generated by this model.
They also tend to break when the problem is badly conditioned
(more features than samples).
For cross-validation, we use 20-fold with 2 algorithms to compute the
Lasso path: coordinate descent, as implemented by the LassoCV class, and
Lars (least angle regression) as implemented by the LassoLarsCV class.
Both algorithms give roughly the same results. They differ with regards
to their execution speed and sources of numerical errors.
Lars computes a path solution only for each kink in the path. As a
result, it is very efficient when there are only of few kinks, which is
the case if there are few features or samples. Also, it is able to
compute the full path without setting any meta parameter. On the
opposite, coordinate descent compute the path points on a pre-specified
grid (here we use the default). Thus it is more efficient if the number
of grid points is smaller than the number of kinks in the path. Such a
strategy can be interesting if the number of features is really large
and there are enough samples to select a large amount. In terms of
numerical errors, for heavily correlated variables, Lars will accumulate
more errors, while the coordinate descent algorithm will only sample the
path on a grid.
Note how the optimal value of alpha varies for each fold. This
illustrates why nested-cross validation is necessary when trying to
evaluate the performance of a method for which a parameter is chosen by
cross-validation: this choice of parameter may not be optimal for unseen
data.
"""
print(__doc__)
# Author: Olivier Grisel, Gael Varoquaux, Alexandre Gramfort
# License: BSD 3 clause
import time
import numpy as np
import matplotlib.pyplot as plt
from sklearn.linear_model import LassoCV, LassoLarsCV, LassoLarsIC
from sklearn import datasets
diabetes = datasets.load_diabetes()
X = diabetes.data
y = diabetes.target
rng = np.random.RandomState(42)
X = np.c_[X, rng.randn(X.shape[0], 14)] # add some bad features
# normalize data as done by Lars to allow for comparison
X /= np.sqrt(np.sum(X ** 2, axis=0))
##############################################################################
# LassoLarsIC: least angle regression with BIC/AIC criterion
model_bic = LassoLarsIC(criterion='bic')
t1 = time.time()
model_bic.fit(X, y)
t_bic = time.time() - t1
alpha_bic_ = model_bic.alpha_
model_aic = LassoLarsIC(criterion='aic')
model_aic.fit(X, y)
alpha_aic_ = model_aic.alpha_
def plot_ic_criterion(model, name, color):
alpha_ = model.alpha_
alphas_ = model.alphas_
criterion_ = model.criterion_
plt.plot(-np.log10(alphas_), criterion_, '--', color=color,
linewidth=3, label='%s criterion' % name)
plt.axvline(-np.log10(alpha_), color=color, linewidth=3,
label='alpha: %s estimate' % name)
plt.xlabel('-log(alpha)')
plt.ylabel('criterion')
plt.figure()
plot_ic_criterion(model_aic, 'AIC', 'b')
plot_ic_criterion(model_bic, 'BIC', 'r')
plt.legend()
plt.title('Information-criterion for model selection (training time %.3fs)'
% t_bic)
##############################################################################
# LassoCV: coordinate descent
# Compute paths
print("Computing regularization path using the coordinate descent lasso...")
t1 = time.time()
model = LassoCV(cv=20).fit(X, y)
t_lasso_cv = time.time() - t1
# Display results
m_log_alphas = -np.log10(model.alphas_)
plt.figure()
ymin, ymax = 2300, 3800
plt.plot(m_log_alphas, model.mse_path_, ':')
plt.plot(m_log_alphas, model.mse_path_.mean(axis=-1), 'k',
label='Average across the folds', linewidth=2)
plt.axvline(-np.log10(model.alpha_), linestyle='--', color='k',
label='alpha: CV estimate')
plt.legend()
plt.xlabel('-log(alpha)')
plt.ylabel('Mean square error')
plt.title('Mean square error on each fold: coordinate descent '
'(train time: %.2fs)' % t_lasso_cv)
plt.axis('tight')
plt.ylim(ymin, ymax)
##############################################################################
# LassoLarsCV: least angle regression
# Compute paths
print("Computing regularization path using the Lars lasso...")
t1 = time.time()
model = LassoLarsCV(cv=20).fit(X, y)
t_lasso_lars_cv = time.time() - t1
# Display results
m_log_alphas = -np.log10(model.cv_alphas_)
plt.figure()
plt.plot(m_log_alphas, model.cv_mse_path_, ':')
plt.plot(m_log_alphas, model.cv_mse_path_.mean(axis=-1), 'k',
label='Average across the folds', linewidth=2)
plt.axvline(-np.log10(model.alpha_), linestyle='--', color='k',
label='alpha CV')
plt.legend()
plt.xlabel('-log(alpha)')
plt.ylabel('Mean square error')
plt.title('Mean square error on each fold: Lars (train time: %.2fs)'
% t_lasso_lars_cv)
plt.axis('tight')
plt.ylim(ymin, ymax)
plt.show()
| bsd-3-clause |
maxentile/advanced-ml-project | scripts/comparing_embeddings.py | 1 | 4645 | import pandas as pd
import numpy as np
import pylab as plt
import seaborn as sns
from sklearn import neighbors
from scipy.cluster import hierarchy
from scipy.spatial import distance
from scipy.spatial.distance import squareform,pdist
def one_nn_class_baseline(X,labels):
''' given a pointcloud X and labels, compute the classification accuracy of
1NN-classifier
'''
one_nn = neighbors.kneighbors_graph(X,2)
inds = np.zeros(len(X),dtype=int)
for i in range(len(X)):
inds[i] = [ind for ind in one_nn[i].indices if ind != i][0]
preds = Y[inds]
return 1.0*sum(preds==Y) / len(Y)
def one_nn_baseline(X,Y):
''' given two clouds of corresponding points, X and Y, report the fraction
of nearest-neighbors preserved.
Algorithm:
- each pair of corresponding points is labeled by an index i
- for each point in X, find its nearest neighbor, labeled j
- for the corresponding point in Y, find its nearest neighbor, labeled j'
- if j==j', then the nearest neighbors of i are preserved
- return number of preserved neighbors / the total number possible'''
# 2, since self is counted as a neighbor by the neighbors module
one_nn_X = neighbors.kneighbors_graph(X,2)
one_nn_Y = neighbors.kneighbors_graph(Y,2)
sames = 0
for i in range(len(X)):
neighbor_X = one_nn_X[i].indices[one_nn_X[i].indices!=i][0]
neighbor_Y = one_nn_Y[i].indices[one_nn_Y[i].indices!=i][0]
if neighbor_X == neighbor_Y:
sames+=1
same_frac = 1.0*sames / len(X)
return same_frac
def knn_baseline(X,Y,k=5):
''' generalization of the one_nn_baseline algorithm...
given two clouds of corresponding points, X and Y, and a parameter k,
compute the fraction of each k-nearest-neighborhood conserved
return the overall fraction of neighborhoods preserved, as well as the
fraction of each local neighborhood preserved
'''
k = k+1 # since self is counted as a neighbor in the kneighbors graph
knn_X = neighbors.kneighbors_graph(X,k)
knn_Y = neighbors.kneighbors_graph(Y,k)
sames = np.zeros(len(X))
for i in range(len(X)):
neighbors_X = set(knn_X[i].indices[knn_X[i].indices!=i])
neighbors_Y = set(knn_Y[i].indices[knn_Y[i].indices!=i])
sames[i] = len(neighbors_X.intersection(neighbors_Y))
same_frac = 1.0*sum(sames) / (len(X)*(k-1))
return same_frac, sames
def knn_baseline_curve(X,Y,ks=range(1,50)):
''' slightly less wasteful way to sweep over a range of ks
when computing knn_baseline, if computing the neighbors graph is expensive'''
max_k = max(ks)+1 # since self is counted as a neighbor in the kneighbors graph
knn_X = neighbors.kneighbors_graph(X,max_k)
knn_Y = neighbors.kneighbors_graph(Y,max_k)
sames = np.zeros(len(ks))
for ind,k in enumerate(ks):
for i in range(len(X)):
neighbors_X = set(knn_X[i].indices[knn_X[i].indices!=i][:k])
neighbors_Y = set(knn_Y[i].indices[knn_Y[i].indices!=i][:k])
sames[ind] += len(neighbors_X.intersection(neighbors_Y))
sames[ind] /= (len(X)*(k))
return sames
def plot_1nn_classification_comparison():
fig, ax = pl.subplots()
barlist = ax.bar(range(len(vec)),vec)
pl.hlines(one_nn_class_baseline(X,Y),0,len(vec),linestyles='--')
pl.xlabel('Algorithm')
pl.ylabel('1NN Classification Accuracy')
pl.title('1NN Classification in Low-Dimensional Embeddings')
baseline_names = ['PCA','Isomap','LLE']
pl.xticks(range(len(vec)), baseline_names + method_names,rotation=30)
#pl.ylim(0.25,1.0)
def autolabel(rects):
# attach some text labels
for rect in rects:
height = rect.get_height()
ax.text(rect.get_x()+rect.get_width()/2., height-0.075, '{0:.2f}'.format(height),
ha='center', va='bottom',color='white')
autolabel(barlist)
for i in range(len(baseline_names)):
barlist[i].set_color('gray')
for i in range(len(baseline_names),len(vec)):
barlist[i].set_color('blue')
pl.savefig('../figures/embedding-comparison.pdf')
#def plot_neighborhood_preservation()
# metrics of cluster preservation
linkages = ['single','complete','ward','average','weighted','centroid','median']
def pairwise_cophenetic_distances(X,linkage='single'):
return hierarchy.cophenet(hierarchy.linkage(X,linkage))
def cophenetic_distance_preservation(orig,embedding,linkage='single'):
orig_d = pairwise_cophenetic_distances(orig,linkage)
embed_d = pairwise_cophenetic_distances(embedding,linkage)
return spearmanr(orig_d,embed_d)
| mit |
treycausey/scikit-learn | sklearn/preprocessing/label.py | 3 | 13306 | # Authors: Alexandre Gramfort <[email protected]>
# Mathieu Blondel <[email protected]>
# Olivier Grisel <[email protected]>
# Andreas Mueller <[email protected]>
# License: BSD 3 clause
import numpy as np
from ..base import BaseEstimator, TransformerMixin
from ..utils.fixes import np_version
from ..utils import deprecated, column_or_1d
from ..utils.multiclass import unique_labels
from ..utils.multiclass import type_of_target
from ..externals import six
zip = six.moves.zip
map = six.moves.map
__all__ = [
'label_binarize',
'LabelBinarizer',
'LabelEncoder',
]
def _check_numpy_unicode_bug(labels):
"""Check that user is not subject to an old numpy bug
Fixed in master before 1.7.0:
https://github.com/numpy/numpy/pull/243
"""
if np_version[:3] < (1, 7, 0) and labels.dtype.kind == 'U':
raise RuntimeError("NumPy < 1.7.0 does not implement searchsorted"
" on unicode data correctly. Please upgrade"
" NumPy to use LabelEncoder with unicode inputs.")
class LabelEncoder(BaseEstimator, TransformerMixin):
"""Encode labels with value between 0 and n_classes-1.
Attributes
----------
`classes_` : array of shape (n_class,)
Holds the label for each class.
Examples
--------
`LabelEncoder` can be used to normalize labels.
>>> from sklearn import preprocessing
>>> le = preprocessing.LabelEncoder()
>>> le.fit([1, 2, 2, 6])
LabelEncoder()
>>> le.classes_
array([1, 2, 6])
>>> le.transform([1, 1, 2, 6]) #doctest: +ELLIPSIS
array([0, 0, 1, 2]...)
>>> le.inverse_transform([0, 0, 1, 2])
array([1, 1, 2, 6])
It can also be used to transform non-numerical labels (as long as they are
hashable and comparable) to numerical labels.
>>> le = preprocessing.LabelEncoder()
>>> le.fit(["paris", "paris", "tokyo", "amsterdam"])
LabelEncoder()
>>> list(le.classes_)
['amsterdam', 'paris', 'tokyo']
>>> le.transform(["tokyo", "tokyo", "paris"]) #doctest: +ELLIPSIS
array([2, 2, 1]...)
>>> list(le.inverse_transform([2, 2, 1]))
['tokyo', 'tokyo', 'paris']
"""
def _check_fitted(self):
if not hasattr(self, "classes_"):
raise ValueError("LabelEncoder was not fitted yet.")
def fit(self, y):
"""Fit label encoder
Parameters
----------
y : array-like of shape (n_samples,)
Target values.
Returns
-------
self : returns an instance of self.
"""
y = column_or_1d(y, warn=True)
_check_numpy_unicode_bug(y)
self.classes_ = np.unique(y)
return self
def fit_transform(self, y):
"""Fit label encoder and return encoded labels
Parameters
----------
y : array-like of shape [n_samples]
Target values.
Returns
-------
y : array-like of shape [n_samples]
"""
y = column_or_1d(y, warn=True)
_check_numpy_unicode_bug(y)
self.classes_, y = np.unique(y, return_inverse=True)
return y
def transform(self, y):
"""Transform labels to normalized encoding.
Parameters
----------
y : array-like of shape [n_samples]
Target values.
Returns
-------
y : array-like of shape [n_samples]
"""
self._check_fitted()
classes = np.unique(y)
_check_numpy_unicode_bug(classes)
if len(np.intersect1d(classes, self.classes_)) < len(classes):
diff = np.setdiff1d(classes, self.classes_)
raise ValueError("y contains new labels: %s" % str(diff))
return np.searchsorted(self.classes_, y)
def inverse_transform(self, y):
"""Transform labels back to original encoding.
Parameters
----------
y : numpy array of shape [n_samples]
Target values.
Returns
-------
y : numpy array of shape [n_samples]
"""
self._check_fitted()
y = np.asarray(y)
return self.classes_[y]
class LabelBinarizer(BaseEstimator, TransformerMixin):
"""Binarize labels in a one-vs-all fashion
Several regression and binary classification algorithms are
available in the scikit. A simple way to extend these algorithms
to the multi-class classification case is to use the so-called
one-vs-all scheme.
At learning time, this simply consists in learning one regressor
or binary classifier per class. In doing so, one needs to convert
multi-class labels to binary labels (belong or does not belong
to the class). LabelBinarizer makes this process easy with the
transform method.
At prediction time, one assigns the class for which the corresponding
model gave the greatest confidence. LabelBinarizer makes this easy
with the inverse_transform method.
Parameters
----------
neg_label : int (default: 0)
Value with which negative labels must be encoded.
pos_label : int (default: 1)
Value with which positive labels must be encoded.
Attributes
----------
`classes_` : array of shape [n_class]
Holds the label for each class.
`multilabel_` : boolean
True if the transformer was fitted on a multilabel rather than a
multiclass set of labels.
Examples
--------
>>> from sklearn import preprocessing
>>> lb = preprocessing.LabelBinarizer()
>>> lb.fit([1, 2, 6, 4, 2])
LabelBinarizer(neg_label=0, pos_label=1)
>>> lb.classes_
array([1, 2, 4, 6])
>>> lb.multilabel_
False
>>> lb.transform([1, 6])
array([[1, 0, 0, 0],
[0, 0, 0, 1]])
>>> lb.fit_transform([(1, 2), (3,)])
array([[1, 1, 0],
[0, 0, 1]])
>>> lb.classes_
array([1, 2, 3])
>>> lb.multilabel_
True
See also
--------
label_binarize : function to perform the transform operation of
LabelBinarizer with fixed classes.
"""
def __init__(self, neg_label=0, pos_label=1):
if neg_label >= pos_label:
raise ValueError("neg_label must be strictly less than pos_label.")
self.neg_label = neg_label
self.pos_label = pos_label
@property
@deprecated("Attribute `multilabel` was renamed to `multilabel_` in "
"0.14 and will be removed in 0.16")
def multilabel(self):
return self.multilabel_
def _check_fitted(self):
if not hasattr(self, "classes_"):
raise ValueError("LabelBinarizer was not fitted yet.")
def fit(self, y):
"""Fit label binarizer
Parameters
----------
y : numpy array of shape (n_samples,) or sequence of sequences
Target values. In the multilabel case the nested sequences can
have variable lengths.
Returns
-------
self : returns an instance of self.
"""
y_type = type_of_target(y)
self.multilabel_ = y_type.startswith('multilabel')
if self.multilabel_:
self.indicator_matrix_ = y_type == 'multilabel-indicator'
self.classes_ = unique_labels(y)
return self
def transform(self, y):
"""Transform multi-class labels to binary labels
The output of transform is sometimes referred to by some authors as the
1-of-K coding scheme.
Parameters
----------
y : numpy array of shape [n_samples] or sequence of sequences
Target values. In the multilabel case the nested sequences can
have variable lengths.
Returns
-------
Y : numpy array of shape [n_samples, n_classes]
"""
self._check_fitted()
y_is_multilabel = type_of_target(y).startswith('multilabel')
if y_is_multilabel and not self.multilabel_:
raise ValueError("The object was not fitted with multilabel"
" input.")
return label_binarize(y, self.classes_,
multilabel=self.multilabel_,
pos_label=self.pos_label,
neg_label=self.neg_label)
def inverse_transform(self, Y, threshold=None):
"""Transform binary labels back to multi-class labels
Parameters
----------
Y : numpy array of shape [n_samples, n_classes]
Target values.
threshold : float or None
Threshold used in the binary and multi-label cases.
Use 0 when:
- Y contains the output of decision_function (classifier)
Use 0.5 when:
- Y contains the output of predict_proba
If None, the threshold is assumed to be half way between
neg_label and pos_label.
Returns
-------
y : numpy array of shape [n_samples] or sequence of sequences
Target values. In the multilabel case the nested sequences can
have variable lengths.
Notes
-----
In the case when the binary labels are fractional
(probabilistic), inverse_transform chooses the class with the
greatest value. Typically, this allows to use the output of a
linear model's decision_function method directly as the input
of inverse_transform.
"""
self._check_fitted()
if threshold is None:
half = (self.pos_label - self.neg_label) / 2.0
threshold = self.neg_label + half
if self.multilabel_:
Y = np.array(Y > threshold, dtype=int)
# Return the predictions in the same format as in fit
if self.indicator_matrix_:
# Label indicator matrix format
return Y
else:
# Lists of tuples format
return [tuple(self.classes_[np.flatnonzero(Y[i])])
for i in range(Y.shape[0])]
if len(Y.shape) == 1 or Y.shape[1] == 1:
y = np.array(Y.ravel() > threshold, dtype=int)
else:
y = Y.argmax(axis=1)
return self.classes_[y]
def label_binarize(y, classes, multilabel=False, neg_label=0, pos_label=1):
"""Binarize labels in a one-vs-all fashion
Several regression and binary classification algorithms are
available in the scikit. A simple way to extend these algorithms
to the multi-class classification case is to use the so-called
one-vs-all scheme.
This function makes it possible to compute this transformation for a
fixed set of class labels known ahead of time.
Parameters
----------
y : array-like
Sequence of integer labels or multilabel data to encode.
classes : array-like of shape [n_classes]
Uniquely holds the label for each class.
multilabel : boolean
Set to true if y is encoding a multilabel tasks (with a variable
number of label assignements per sample) rather than a multiclass task
where one sample has one and only one label assigned.
neg_label: int (default: 0)
Value with which negative labels must be encoded.
pos_label: int (default: 1)
Value with which positive labels must be encoded.
Returns
-------
Y : numpy array of shape [n_samples, n_classes]
Examples
--------
>>> from sklearn.preprocessing import label_binarize
>>> label_binarize([1, 6], classes=[1, 2, 4, 6])
array([[1, 0, 0, 0],
[0, 0, 0, 1]])
The class ordering is preserved:
>>> label_binarize([1, 6], classes=[1, 6, 4, 2])
array([[1, 0, 0, 0],
[0, 1, 0, 0]])
>>> label_binarize([(1, 2), (6,), ()], multilabel=True,
... classes=[1, 6, 4, 2])
array([[1, 0, 0, 1],
[0, 1, 0, 0],
[0, 0, 0, 0]])
See also
--------
label_binarize : function to perform the transform operation of
LabelBinarizer with fixed classes.
"""
y_type = type_of_target(y)
if multilabel or len(classes) > 2:
Y = np.zeros((len(y), len(classes)), dtype=np.int)
else:
Y = np.zeros((len(y), 1), dtype=np.int)
Y += neg_label
if multilabel:
if y_type == "multilabel-indicator":
Y[y == 1] = pos_label
return Y
elif y_type == "multilabel-sequences":
# inverse map: label => column index
imap = dict((v, k) for k, v in enumerate(classes))
for i, label_tuple in enumerate(y):
for label in label_tuple:
Y[i, imap[label]] = pos_label
return Y
else:
raise ValueError("y should be in a multilabel format, "
"got %r" % (y,))
else:
y = column_or_1d(y)
if len(classes) == 2:
Y[y == classes[1], 0] = pos_label
return Y
elif len(classes) >= 2:
for i, k in enumerate(classes):
Y[y == k, i] = pos_label
return Y
else:
# Only one class, returns a matrix with all negative labels.
return Y
| bsd-3-clause |
macks22/scikit-learn | examples/classification/plot_digits_classification.py | 289 | 2397 | """
================================
Recognizing hand-written digits
================================
An example showing how the scikit-learn can be used to recognize images of
hand-written digits.
This example is commented in the
:ref:`tutorial section of the user manual <introduction>`.
"""
print(__doc__)
# Author: Gael Varoquaux <gael dot varoquaux at normalesup dot org>
# License: BSD 3 clause
# Standard scientific Python imports
import matplotlib.pyplot as plt
# Import datasets, classifiers and performance metrics
from sklearn import datasets, svm, metrics
# The digits dataset
digits = datasets.load_digits()
# The data that we are interested in is made of 8x8 images of digits, let's
# have a look at the first 3 images, stored in the `images` attribute of the
# dataset. If we were working from image files, we could load them using
# pylab.imread. Note that each image must have the same size. For these
# images, we know which digit they represent: it is given in the 'target' of
# the dataset.
images_and_labels = list(zip(digits.images, digits.target))
for index, (image, label) in enumerate(images_and_labels[:4]):
plt.subplot(2, 4, index + 1)
plt.axis('off')
plt.imshow(image, cmap=plt.cm.gray_r, interpolation='nearest')
plt.title('Training: %i' % label)
# To apply a classifier on this data, we need to flatten the image, to
# turn the data in a (samples, feature) matrix:
n_samples = len(digits.images)
data = digits.images.reshape((n_samples, -1))
# Create a classifier: a support vector classifier
classifier = svm.SVC(gamma=0.001)
# We learn the digits on the first half of the digits
classifier.fit(data[:n_samples / 2], digits.target[:n_samples / 2])
# Now predict the value of the digit on the second half:
expected = digits.target[n_samples / 2:]
predicted = classifier.predict(data[n_samples / 2:])
print("Classification report for classifier %s:\n%s\n"
% (classifier, metrics.classification_report(expected, predicted)))
print("Confusion matrix:\n%s" % metrics.confusion_matrix(expected, predicted))
images_and_predictions = list(zip(digits.images[n_samples / 2:], predicted))
for index, (image, prediction) in enumerate(images_and_predictions[:4]):
plt.subplot(2, 4, index + 5)
plt.axis('off')
plt.imshow(image, cmap=plt.cm.gray_r, interpolation='nearest')
plt.title('Prediction: %i' % prediction)
plt.show()
| bsd-3-clause |
dgwakeman/mne-python | mne/tests/test_report.py | 3 | 8942 | # Authors: Mainak Jas <[email protected]>
# Teon Brooks <[email protected]>
#
# License: BSD (3-clause)
import os
import os.path as op
import glob
import warnings
import shutil
from nose.tools import assert_true, assert_equal, assert_raises
from mne import Epochs, read_events, pick_types, read_evokeds
from mne.io import Raw
from mne.datasets import testing
from mne.report import Report
from mne.utils import (_TempDir, requires_mayavi, requires_nibabel,
requires_PIL, run_tests_if_main, slow_test)
from mne.viz import plot_trans
import matplotlib
matplotlib.use('Agg') # for testing don't use X server
data_dir = testing.data_path(download=False)
subjects_dir = op.join(data_dir, 'subjects')
report_dir = op.join(data_dir, 'MEG', 'sample')
raw_fname = op.join(report_dir, 'sample_audvis_trunc_raw.fif')
event_fname = op.join(report_dir, 'sample_audvis_trunc_raw-eve.fif')
cov_fname = op.join(report_dir, 'sample_audvis_trunc-cov.fif')
fwd_fname = op.join(report_dir, 'sample_audvis_trunc-meg-eeg-oct-6-fwd.fif')
trans_fname = op.join(report_dir, 'sample_audvis_trunc-trans.fif')
inv_fname = op.join(report_dir,
'sample_audvis_trunc-meg-eeg-oct-6-meg-inv.fif')
mri_fname = op.join(subjects_dir, 'sample', 'mri', 'T1.mgz')
base_dir = op.realpath(op.join(op.dirname(__file__), '..', 'io', 'tests',
'data'))
evoked_fname = op.join(base_dir, 'test-ave.fif')
# Set our plotters to test mode
warnings.simplefilter('always') # enable b/c these tests throw warnings
@slow_test
@testing.requires_testing_data
@requires_PIL
def test_render_report():
"""Test rendering -*.fif files for mne report.
"""
tempdir = _TempDir()
raw_fname_new = op.join(tempdir, 'temp_raw.fif')
event_fname_new = op.join(tempdir, 'temp_raw-eve.fif')
cov_fname_new = op.join(tempdir, 'temp_raw-cov.fif')
fwd_fname_new = op.join(tempdir, 'temp_raw-fwd.fif')
inv_fname_new = op.join(tempdir, 'temp_raw-inv.fif')
for a, b in [[raw_fname, raw_fname_new],
[event_fname, event_fname_new],
[cov_fname, cov_fname_new],
[fwd_fname, fwd_fname_new],
[inv_fname, inv_fname_new]]:
shutil.copyfile(a, b)
# create and add -epo.fif and -ave.fif files
epochs_fname = op.join(tempdir, 'temp-epo.fif')
evoked_fname = op.join(tempdir, 'temp-ave.fif')
raw = Raw(raw_fname_new)
picks = pick_types(raw.info, meg='mag', eeg=False) # faster with one type
epochs = Epochs(raw, read_events(event_fname), 1, -0.2, 0.2, picks=picks)
epochs.save(epochs_fname)
epochs.average().save(evoked_fname)
report = Report(info_fname=raw_fname_new, subjects_dir=subjects_dir)
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter('always')
report.parse_folder(data_path=tempdir, on_error='raise')
assert_true(len(w) >= 1)
# Check correct paths and filenames
fnames = glob.glob(op.join(tempdir, '*.fif'))
for fname in fnames:
assert_true(op.basename(fname) in
[op.basename(x) for x in report.fnames])
assert_true(''.join(report.html).find(op.basename(fname)) != -1)
assert_equal(len(report.fnames), len(fnames))
assert_equal(len(report.html), len(report.fnames))
# Check saving functionality
report.data_path = tempdir
report.save(fname=op.join(tempdir, 'report.html'), open_browser=False)
assert_true(op.isfile(op.join(tempdir, 'report.html')))
assert_equal(len(report.html), len(fnames))
assert_equal(len(report.html), len(report.fnames))
# Check saving same report to new filename
report.save(fname=op.join(tempdir, 'report2.html'), open_browser=False)
assert_true(op.isfile(op.join(tempdir, 'report2.html')))
# Check overwriting file
report.save(fname=op.join(tempdir, 'report.html'), open_browser=False,
overwrite=True)
assert_true(op.isfile(op.join(tempdir, 'report.html')))
# Check pattern matching with multiple patterns
pattern = ['*raw.fif', '*eve.fif']
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter('always')
report.parse_folder(data_path=tempdir, pattern=pattern)
assert_true(len(w) >= 1)
fnames = glob.glob(op.join(tempdir, '*.raw')) + \
glob.glob(op.join(tempdir, '*.raw'))
for fname in fnames:
assert_true(op.basename(fname) in
[op.basename(x) for x in report.fnames])
assert_true(''.join(report.html).find(op.basename(fname)) != -1)
@testing.requires_testing_data
@requires_mayavi
@requires_PIL
def test_render_add_sections():
"""Test adding figures/images to section.
"""
tempdir = _TempDir()
import matplotlib.pyplot as plt
report = Report(subjects_dir=subjects_dir)
# Check add_figs_to_section functionality
fig = plt.plot([1, 2], [1, 2])[0].figure
report.add_figs_to_section(figs=fig, # test non-list input
captions=['evoked response'], scale=1.2,
image_format='svg')
assert_raises(ValueError, report.add_figs_to_section, figs=[fig, fig],
captions='H')
assert_raises(ValueError, report.add_figs_to_section, figs=fig,
captions=['foo'], scale=0, image_format='svg')
assert_raises(ValueError, report.add_figs_to_section, figs=fig,
captions=['foo'], scale=1e-10, image_format='svg')
# need to recreate because calls above change size
fig = plt.plot([1, 2], [1, 2])[0].figure
# Check add_images_to_section
img_fname = op.join(tempdir, 'testimage.png')
fig.savefig(img_fname)
report.add_images_to_section(fnames=[img_fname],
captions=['evoked response'])
assert_raises(ValueError, report.add_images_to_section,
fnames=[img_fname, img_fname], captions='H')
evoked = read_evokeds(evoked_fname, condition='Left Auditory',
baseline=(-0.2, 0.0))
fig = plot_trans(evoked.info, trans_fname, subject='sample',
subjects_dir=subjects_dir)
report.add_figs_to_section(figs=fig, # test non-list input
captions='random image', scale=1.2)
@slow_test
@testing.requires_testing_data
@requires_mayavi
@requires_nibabel()
def test_render_mri():
"""Test rendering MRI for mne report.
"""
tempdir = _TempDir()
trans_fname_new = op.join(tempdir, 'temp-trans.fif')
for a, b in [[trans_fname, trans_fname_new]]:
shutil.copyfile(a, b)
report = Report(info_fname=raw_fname,
subject='sample', subjects_dir=subjects_dir)
with warnings.catch_warnings(record=True):
warnings.simplefilter('always')
report.parse_folder(data_path=tempdir, mri_decim=30, pattern='*',
n_jobs=2)
report.save(op.join(tempdir, 'report.html'), open_browser=False)
@testing.requires_testing_data
@requires_nibabel()
def test_render_mri_without_bem():
"""Test rendering MRI without BEM for mne report.
"""
tempdir = _TempDir()
os.mkdir(op.join(tempdir, 'sample'))
os.mkdir(op.join(tempdir, 'sample', 'mri'))
shutil.copyfile(mri_fname, op.join(tempdir, 'sample', 'mri', 'T1.mgz'))
report = Report(info_fname=raw_fname,
subject='sample', subjects_dir=tempdir)
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter('always')
report.parse_folder(tempdir)
assert_true(len(w) >= 1)
report.save(op.join(tempdir, 'report.html'), open_browser=False)
@testing.requires_testing_data
@requires_nibabel()
def test_add_htmls_to_section():
"""Test adding html str to mne report.
"""
report = Report(info_fname=raw_fname,
subject='sample', subjects_dir=subjects_dir)
html = '<b>MNE-Python is AWESOME</b>'
caption, section = 'html', 'html_section'
report.add_htmls_to_section(html, caption, section)
idx = report._sectionlabels.index('report_' + section)
html_compare = report.html[idx]
assert_equal(html, html_compare)
def test_validate_input():
report = Report()
items = ['a', 'b', 'c']
captions = ['Letter A', 'Letter B', 'Letter C']
section = 'ABCs'
comments = ['First letter of the alphabet.',
'Second letter of the alphabet',
'Third letter of the alphabet']
assert_raises(ValueError, report._validate_input, items, captions[:-1],
section, comments=None)
assert_raises(ValueError, report._validate_input, items, captions, section,
comments=comments[:-1])
values = report._validate_input(items, captions, section, comments=None)
items_new, captions_new, comments_new = values
assert_equal(len(comments_new), len(items))
run_tests_if_main()
| bsd-3-clause |
ilo10/scikit-learn | sklearn/tests/test_base.py | 216 | 7045 | # Author: Gael Varoquaux
# License: BSD 3 clause
import numpy as np
import scipy.sparse as sp
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_false
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_not_equal
from sklearn.utils.testing import assert_raises
from sklearn.base import BaseEstimator, clone, is_classifier
from sklearn.svm import SVC
from sklearn.pipeline import Pipeline
from sklearn.grid_search import GridSearchCV
from sklearn.utils import deprecated
#############################################################################
# A few test classes
class MyEstimator(BaseEstimator):
def __init__(self, l1=0, empty=None):
self.l1 = l1
self.empty = empty
class K(BaseEstimator):
def __init__(self, c=None, d=None):
self.c = c
self.d = d
class T(BaseEstimator):
def __init__(self, a=None, b=None):
self.a = a
self.b = b
class DeprecatedAttributeEstimator(BaseEstimator):
def __init__(self, a=None, b=None):
self.a = a
if b is not None:
DeprecationWarning("b is deprecated and renamed 'a'")
self.a = b
@property
@deprecated("Parameter 'b' is deprecated and renamed to 'a'")
def b(self):
return self._b
class Buggy(BaseEstimator):
" A buggy estimator that does not set its parameters right. "
def __init__(self, a=None):
self.a = 1
class NoEstimator(object):
def __init__(self):
pass
def fit(self, X=None, y=None):
return self
def predict(self, X=None):
return None
class VargEstimator(BaseEstimator):
"""Sklearn estimators shouldn't have vargs."""
def __init__(self, *vargs):
pass
#############################################################################
# The tests
def test_clone():
# Tests that clone creates a correct deep copy.
# We create an estimator, make a copy of its original state
# (which, in this case, is the current state of the estimator),
# and check that the obtained copy is a correct deep copy.
from sklearn.feature_selection import SelectFpr, f_classif
selector = SelectFpr(f_classif, alpha=0.1)
new_selector = clone(selector)
assert_true(selector is not new_selector)
assert_equal(selector.get_params(), new_selector.get_params())
selector = SelectFpr(f_classif, alpha=np.zeros((10, 2)))
new_selector = clone(selector)
assert_true(selector is not new_selector)
def test_clone_2():
# Tests that clone doesn't copy everything.
# We first create an estimator, give it an own attribute, and
# make a copy of its original state. Then we check that the copy doesn't
# have the specific attribute we manually added to the initial estimator.
from sklearn.feature_selection import SelectFpr, f_classif
selector = SelectFpr(f_classif, alpha=0.1)
selector.own_attribute = "test"
new_selector = clone(selector)
assert_false(hasattr(new_selector, "own_attribute"))
def test_clone_buggy():
# Check that clone raises an error on buggy estimators.
buggy = Buggy()
buggy.a = 2
assert_raises(RuntimeError, clone, buggy)
no_estimator = NoEstimator()
assert_raises(TypeError, clone, no_estimator)
varg_est = VargEstimator()
assert_raises(RuntimeError, clone, varg_est)
def test_clone_empty_array():
# Regression test for cloning estimators with empty arrays
clf = MyEstimator(empty=np.array([]))
clf2 = clone(clf)
assert_array_equal(clf.empty, clf2.empty)
clf = MyEstimator(empty=sp.csr_matrix(np.array([[0]])))
clf2 = clone(clf)
assert_array_equal(clf.empty.data, clf2.empty.data)
def test_clone_nan():
# Regression test for cloning estimators with default parameter as np.nan
clf = MyEstimator(empty=np.nan)
clf2 = clone(clf)
assert_true(clf.empty is clf2.empty)
def test_repr():
# Smoke test the repr of the base estimator.
my_estimator = MyEstimator()
repr(my_estimator)
test = T(K(), K())
assert_equal(
repr(test),
"T(a=K(c=None, d=None), b=K(c=None, d=None))"
)
some_est = T(a=["long_params"] * 1000)
assert_equal(len(repr(some_est)), 415)
def test_str():
# Smoke test the str of the base estimator
my_estimator = MyEstimator()
str(my_estimator)
def test_get_params():
test = T(K(), K())
assert_true('a__d' in test.get_params(deep=True))
assert_true('a__d' not in test.get_params(deep=False))
test.set_params(a__d=2)
assert_true(test.a.d == 2)
assert_raises(ValueError, test.set_params, a__a=2)
def test_get_params_deprecated():
# deprecated attribute should not show up as params
est = DeprecatedAttributeEstimator(a=1)
assert_true('a' in est.get_params())
assert_true('a' in est.get_params(deep=True))
assert_true('a' in est.get_params(deep=False))
assert_true('b' not in est.get_params())
assert_true('b' not in est.get_params(deep=True))
assert_true('b' not in est.get_params(deep=False))
def test_is_classifier():
svc = SVC()
assert_true(is_classifier(svc))
assert_true(is_classifier(GridSearchCV(svc, {'C': [0.1, 1]})))
assert_true(is_classifier(Pipeline([('svc', svc)])))
assert_true(is_classifier(Pipeline([('svc_cv',
GridSearchCV(svc, {'C': [0.1, 1]}))])))
def test_set_params():
# test nested estimator parameter setting
clf = Pipeline([("svc", SVC())])
# non-existing parameter in svc
assert_raises(ValueError, clf.set_params, svc__stupid_param=True)
# non-existing parameter of pipeline
assert_raises(ValueError, clf.set_params, svm__stupid_param=True)
# we don't currently catch if the things in pipeline are estimators
# bad_pipeline = Pipeline([("bad", NoEstimator())])
# assert_raises(AttributeError, bad_pipeline.set_params,
# bad__stupid_param=True)
def test_score_sample_weight():
from sklearn.tree import DecisionTreeClassifier
from sklearn.tree import DecisionTreeRegressor
from sklearn import datasets
rng = np.random.RandomState(0)
# test both ClassifierMixin and RegressorMixin
estimators = [DecisionTreeClassifier(max_depth=2),
DecisionTreeRegressor(max_depth=2)]
sets = [datasets.load_iris(),
datasets.load_boston()]
for est, ds in zip(estimators, sets):
est.fit(ds.data, ds.target)
# generate random sample weights
sample_weight = rng.randint(1, 10, size=len(ds.target))
# check that the score with and without sample weights are different
assert_not_equal(est.score(ds.data, ds.target),
est.score(ds.data, ds.target,
sample_weight=sample_weight),
msg="Unweighted and weighted scores "
"are unexpectedly equal")
| bsd-3-clause |
mmottahedi/neuralnilm_prototype | scripts/e412.py | 2 | 6395 | from __future__ import print_function, division
import matplotlib
import logging
from sys import stdout
matplotlib.use('Agg') # Must be before importing matplotlib.pyplot or pylab!
from neuralnilm import (Net, RealApplianceSource,
BLSTMLayer, DimshuffleLayer,
BidirectionalRecurrentLayer)
from neuralnilm.source import standardise, discretize, fdiff, power_and_fdiff
from neuralnilm.experiment import run_experiment, init_experiment
from neuralnilm.net import TrainingError
from neuralnilm.layers import MixtureDensityLayer
from neuralnilm.objectives import (scaled_cost, mdn_nll,
scaled_cost_ignore_inactive, ignore_inactive,
scaled_cost3)
from neuralnilm.plot import MDNPlotter, CentralOutputPlotter, Plotter
from neuralnilm.updates import clipped_nesterov_momentum
from lasagne.nonlinearities import sigmoid, rectify, tanh, identity
from lasagne.objectives import mse, binary_crossentropy
from lasagne.init import Uniform, Normal, Identity
from lasagne.layers import (LSTMLayer, DenseLayer, Conv1DLayer,
ReshapeLayer, FeaturePoolLayer, RecurrentLayer)
from lasagne.layers.batch_norm import BatchNormLayer
from lasagne.updates import nesterov_momentum, momentum
from functools import partial
import os
import __main__
from copy import deepcopy
from math import sqrt
import numpy as np
import theano.tensor as T
import gc
"""
e400
'learn_init': False
independently_centre_inputs : True
e401
input is in range [0,1]
"""
NAME = os.path.splitext(os.path.split(__main__.__file__)[1])[0]
#PATH = "/homes/dk3810/workspace/python/neuralnilm/figures"
PATH = "/data/dk3810/figures"
SAVE_PLOT_INTERVAL = 500
GRADIENT_STEPS = 100
source_dict = dict(
filename='/data/dk3810/ukdale.h5',
appliances=[
['fridge freezer', 'fridge', 'freezer']
# 'hair straighteners',
# 'television',
# 'dish washer',
# ['washer dryer', 'washing machine']
],
max_appliance_powers=[300, 500, 200, 2500, 2400],
# max_input_power=100,
max_diff = 100,
on_power_thresholds=[5] * 5,
min_on_durations=[60, 60, 60, 1800, 1800],
min_off_durations=[12, 12, 12, 1800, 600],
window=("2013-06-01", "2014-07-01"),
seq_length=512,
# random_window=64,
output_one_appliance=True,
boolean_targets=False,
train_buildings=[1],
validation_buildings=[1],
skip_probability=1,
skip_probability_for_first_appliance=0,
one_target_per_seq=False,
n_seq_per_batch=64,
# subsample_target=4,
include_diff=True,
include_power=False,
clip_appliance_power=True,
target_is_prediction=False,
# independently_center_inputs=True,
# standardise_input=True,
standardise_targets=True,
unit_variance_targets=False,
# input_padding=2,
lag=0,
clip_input=False
# classification=True
# reshape_target_to_2D=True
# input_stats={'mean': np.array([ 0.05526326], dtype=np.float32),
# 'std': np.array([ 0.12636775], dtype=np.float32)},
# target_stats={
# 'mean': np.array([ 0.04066789, 0.01881946,
# 0.24639061, 0.17608672, 0.10273963],
# dtype=np.float32),
# 'std': np.array([ 0.11449792, 0.07338708,
# 0.26608968, 0.33463112, 0.21250485],
# dtype=np.float32)}
)
N = 50
net_dict = dict(
save_plot_interval=SAVE_PLOT_INTERVAL,
# loss_function=partial(ignore_inactive, loss_func=mdn_nll, seq_length=SEQ_LENGTH),
# loss_function=lambda x, t: mdn_nll(x, t).mean(),
loss_function=lambda x, t: mse(x, t).mean(),
# loss_function=lambda x, t: binary_crossentropy(x, t).mean(),
# loss_function=lambda x, t: binary_crossentropy(x, t).mean(),
# loss_function=partial(scaled_cost, loss_func=mse),
# loss_function=ignore_inactive,
# loss_function=partial(scaled_cost3, ignore_inactive=False),
# updates_func=momentum,
updates_func=clipped_nesterov_momentum,
updates_kwargs={'clip_range': (0, 10)},
learning_rate=1e-3,
learning_rate_changes_by_iteration={
1000: 1e-4,
4000: 1e-5
# 800: 1e-4
# 500: 1e-3
# 4000: 1e-03,
# 6000: 5e-06,
# 7000: 1e-06
# 2000: 5e-06
# 3000: 1e-05
# 7000: 5e-06,
# 10000: 1e-06,
# 15000: 5e-07,
# 50000: 1e-07
},
do_save_activations=True,
# auto_reshape=False,
# plotter=CentralOutputPlotter
plotter=Plotter(n_seq_to_plot=10)
)
def exp_a(name):
# ReLU hidden layers
# linear output
# output one appliance
# 0% skip prob for first appliance
# 100% skip prob for other appliances
# input is diff
global source
# source_dict_copy = deepcopy(source_dict)
# source = RealApplianceSource(**source_dict_copy)
net_dict_copy = deepcopy(net_dict)
net_dict_copy.update(dict(
experiment_name=name,
source=source
))
net_dict_copy['layers_config']= [
{
'type': RecurrentLayer,
'num_units': 50,
'W_in_to_hid': Normal(std=1),
'W_hid_to_hid': Identity(scale=0.5),
'nonlinearity': rectify,
'learn_init': False,
'precompute_input': True
},
{
'type': DenseLayer,
'num_units': source.n_outputs,
'nonlinearity': None,
'W': Normal(std=1/sqrt(50))
}
]
net = Net(**net_dict_copy)
return net
def main():
# EXPERIMENTS = list('abcdefghijklmnopqrstuvwxyz')
# EXPERIMENTS = list('abcdefghi')
EXPERIMENTS = list('a')
for experiment in EXPERIMENTS:
full_exp_name = NAME + experiment
func_call = init_experiment(PATH, experiment, full_exp_name)
logger = logging.getLogger(full_exp_name)
try:
net = eval(func_call)
run_experiment(net, epochs=5000)
except KeyboardInterrupt:
logger.info("KeyboardInterrupt")
break
except Exception as exception:
logger.exception("Exception")
# raise
else:
del net.source.train_activations
gc.collect()
finally:
logging.shutdown()
if __name__ == "__main__":
main()
| mit |
xyguo/scikit-learn | sklearn/neighbors/tests/test_kde.py | 80 | 5560 | import numpy as np
from sklearn.utils.testing import (assert_allclose, assert_raises,
assert_equal)
from sklearn.neighbors import KernelDensity, KDTree, NearestNeighbors
from sklearn.neighbors.ball_tree import kernel_norm
from sklearn.pipeline import make_pipeline
from sklearn.datasets import make_blobs
from sklearn.model_selection import GridSearchCV
from sklearn.preprocessing import StandardScaler
def compute_kernel_slow(Y, X, kernel, h):
d = np.sqrt(((Y[:, None, :] - X) ** 2).sum(-1))
norm = kernel_norm(h, X.shape[1], kernel) / X.shape[0]
if kernel == 'gaussian':
return norm * np.exp(-0.5 * (d * d) / (h * h)).sum(-1)
elif kernel == 'tophat':
return norm * (d < h).sum(-1)
elif kernel == 'epanechnikov':
return norm * ((1.0 - (d * d) / (h * h)) * (d < h)).sum(-1)
elif kernel == 'exponential':
return norm * (np.exp(-d / h)).sum(-1)
elif kernel == 'linear':
return norm * ((1 - d / h) * (d < h)).sum(-1)
elif kernel == 'cosine':
return norm * (np.cos(0.5 * np.pi * d / h) * (d < h)).sum(-1)
else:
raise ValueError('kernel not recognized')
def test_kernel_density(n_samples=100, n_features=3):
rng = np.random.RandomState(0)
X = rng.randn(n_samples, n_features)
Y = rng.randn(n_samples, n_features)
for kernel in ['gaussian', 'tophat', 'epanechnikov',
'exponential', 'linear', 'cosine']:
for bandwidth in [0.01, 0.1, 1]:
dens_true = compute_kernel_slow(Y, X, kernel, bandwidth)
def check_results(kernel, bandwidth, atol, rtol):
kde = KernelDensity(kernel=kernel, bandwidth=bandwidth,
atol=atol, rtol=rtol)
log_dens = kde.fit(X).score_samples(Y)
assert_allclose(np.exp(log_dens), dens_true,
atol=atol, rtol=max(1E-7, rtol))
assert_allclose(np.exp(kde.score(Y)),
np.prod(dens_true),
atol=atol, rtol=max(1E-7, rtol))
for rtol in [0, 1E-5]:
for atol in [1E-6, 1E-2]:
for breadth_first in (True, False):
yield (check_results, kernel, bandwidth, atol, rtol)
def test_kernel_density_sampling(n_samples=100, n_features=3):
rng = np.random.RandomState(0)
X = rng.randn(n_samples, n_features)
bandwidth = 0.2
for kernel in ['gaussian', 'tophat']:
# draw a tophat sample
kde = KernelDensity(bandwidth, kernel=kernel).fit(X)
samp = kde.sample(100)
assert_equal(X.shape, samp.shape)
# check that samples are in the right range
nbrs = NearestNeighbors(n_neighbors=1).fit(X)
dist, ind = nbrs.kneighbors(X, return_distance=True)
if kernel == 'tophat':
assert np.all(dist < bandwidth)
elif kernel == 'gaussian':
# 5 standard deviations is safe for 100 samples, but there's a
# very small chance this test could fail.
assert np.all(dist < 5 * bandwidth)
# check unsupported kernels
for kernel in ['epanechnikov', 'exponential', 'linear', 'cosine']:
kde = KernelDensity(bandwidth, kernel=kernel).fit(X)
assert_raises(NotImplementedError, kde.sample, 100)
# non-regression test: used to return a scalar
X = rng.randn(4, 1)
kde = KernelDensity(kernel="gaussian").fit(X)
assert_equal(kde.sample().shape, (1, 1))
def test_kde_algorithm_metric_choice():
# Smoke test for various metrics and algorithms
rng = np.random.RandomState(0)
X = rng.randn(10, 2) # 2 features required for haversine dist.
Y = rng.randn(10, 2)
for algorithm in ['auto', 'ball_tree', 'kd_tree']:
for metric in ['euclidean', 'minkowski', 'manhattan',
'chebyshev', 'haversine']:
if algorithm == 'kd_tree' and metric not in KDTree.valid_metrics:
assert_raises(ValueError, KernelDensity,
algorithm=algorithm, metric=metric)
else:
kde = KernelDensity(algorithm=algorithm, metric=metric)
kde.fit(X)
y_dens = kde.score_samples(Y)
assert_equal(y_dens.shape, Y.shape[:1])
def test_kde_score(n_samples=100, n_features=3):
pass
#FIXME
#np.random.seed(0)
#X = np.random.random((n_samples, n_features))
#Y = np.random.random((n_samples, n_features))
def test_kde_badargs():
assert_raises(ValueError, KernelDensity,
algorithm='blah')
assert_raises(ValueError, KernelDensity,
bandwidth=0)
assert_raises(ValueError, KernelDensity,
kernel='blah')
assert_raises(ValueError, KernelDensity,
metric='blah')
assert_raises(ValueError, KernelDensity,
algorithm='kd_tree', metric='blah')
def test_kde_pipeline_gridsearch():
# test that kde plays nice in pipelines and grid-searches
X, _ = make_blobs(cluster_std=.1, random_state=1,
centers=[[0, 1], [1, 0], [0, 0]])
pipe1 = make_pipeline(StandardScaler(with_mean=False, with_std=False),
KernelDensity(kernel="gaussian"))
params = dict(kerneldensity__bandwidth=[0.001, 0.01, 0.1, 1, 10])
search = GridSearchCV(pipe1, param_grid=params, cv=5)
search.fit(X)
assert_equal(search.best_params_['kerneldensity__bandwidth'], .1)
| bsd-3-clause |
chuajiesheng/twitter-sentiment-analysis | document_clustering/cluster.py | 1 | 3790 | # coding=utf-8
# OS-level import
import sys
import os
import code
# Data related import
import numpy as np
import pandas as pd
import nltk
import re
import os
import codecs
from sklearn import feature_extraction
import mpld3
from nltk.stem.snowball import SnowballStemmer
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.metrics.pairwise import cosine_similarity
from sklearn.cluster import KMeans
from nltk.tokenize import TweetTokenizer
# Project related object
from utils import Reader
stopwords = nltk.corpus.stopwords.words('english')
stemmer = SnowballStemmer("english")
tknzr = TweetTokenizer(preserve_case=False, strip_handles=True, reduce_len=True)
def tokenize_and_stem(text):
# first tokenize by sentence, then by word to ensure that punctuation is caught as it's own token
tokens = [word for sent in nltk.sent_tokenize(text) for word in tknzr.tokenize(sent)]
filtered_tokens = []
# filter out any tokens not containing letters (e.g., numeric tokens, raw punctuation)
for token in tokens:
if re.search('[a-zA-Z]', token):
filtered_tokens.append(token)
stems = [stemmer.stem(t) for t in filtered_tokens]
return stems
def tokenize_only(text):
# first tokenize by sentence, then by word to ensure that punctuation is caught as it's own token
tokens = [word.lower() for sent in nltk.sent_tokenize(text) for word in tknzr.tokenize(sent)]
filtered_tokens = []
# filter out any tokens not containing letters (e.g., numeric tokens, raw punctuation)
for token in tokens:
if re.search('[a-zA-Z]', token):
filtered_tokens.append(token)
return filtered_tokens
if __name__ == '__main__':
reload(sys)
sys.setdefaultencoding('utf-8')
working_directory = os.getcwd()
files = Reader.read_directory(working_directory)
print '{} files available'.format(len(files))
# TODO: remove me
files = files[:800]
all_tweets = []
totalvocab_stemmed = []
totalvocab_tokenized = []
for f in files:
tweets = Reader.read_file(f)
selected_tweets = filter(lambda t: t.is_post() and t.language() == 'en', tweets)
texts = map(lambda t: t.body(), selected_tweets)
for i in texts:
allwords_stemmed = tokenize_and_stem(i) # for each item in 'all_tweets', tokenize/stem
totalvocab_stemmed.extend(allwords_stemmed) # extend the 'totalvocab_stemmed' list
allwords_tokenized = tokenize_only(i)
totalvocab_tokenized.extend(allwords_tokenized)
vocab_frame = pd.DataFrame({'words': totalvocab_tokenized}, index=totalvocab_stemmed)
print 'there are ' + str(vocab_frame.shape[0]) + ' items in vocab_frame'
tfidf_vectorizer = TfidfVectorizer(max_df=0.8, max_features=200000,
min_df=0.05, stop_words='english',
use_idf=True, tokenizer=tokenize_and_stem, ngram_range=(1, 3))
tfidf_matrix = tfidf_vectorizer.fit_transform(all_tweets) # fit the vectorizer to synopses
print 'td-idf matrix: {}'.format(tfidf_matrix.shape)
terms = tfidf_vectorizer.get_feature_names()
dist = 1 - cosine_similarity(tfidf_matrix)
num_clusters = 10
km = KMeans(n_clusters=num_clusters, verbose=0)
# code.interact(local=dict(globals(), **locals()))
km.fit(tfidf_matrix)
clusters = km.labels_.tolist()
order_centroids = km.cluster_centers_.argsort()[:, ::-1]
for i in range(num_clusters):
print 'Cluster {} words: '.format(str(i)),
for ind in order_centroids[i, :6]: # replace 6 with n words per cluster
print '{}'.format(vocab_frame.ix[terms[ind].split(' ')].values.tolist()[0][0].encode('utf-8', 'ignore')),
print ''
| apache-2.0 |
dongjoon-hyun/tensorflow | tensorflow/examples/learn/iris_custom_model.py | 43 | 3449 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Example of Estimator for Iris plant dataset."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from sklearn import datasets
from sklearn import metrics
from sklearn import model_selection
import tensorflow as tf
X_FEATURE = 'x' # Name of the input feature.
def my_model(features, labels, mode):
"""DNN with three hidden layers, and dropout of 0.1 probability."""
# Create three fully connected layers respectively of size 10, 20, and 10 with
# each layer having a dropout probability of 0.1.
net = features[X_FEATURE]
for units in [10, 20, 10]:
net = tf.layers.dense(net, units=units, activation=tf.nn.relu)
net = tf.layers.dropout(net, rate=0.1)
# Compute logits (1 per class).
logits = tf.layers.dense(net, 3, activation=None)
# Compute predictions.
predicted_classes = tf.argmax(logits, 1)
if mode == tf.estimator.ModeKeys.PREDICT:
predictions = {
'class': predicted_classes,
'prob': tf.nn.softmax(logits)
}
return tf.estimator.EstimatorSpec(mode, predictions=predictions)
# Compute loss.
loss = tf.losses.sparse_softmax_cross_entropy(labels=labels, logits=logits)
# Create training op.
if mode == tf.estimator.ModeKeys.TRAIN:
optimizer = tf.train.AdagradOptimizer(learning_rate=0.1)
train_op = optimizer.minimize(loss, global_step=tf.train.get_global_step())
return tf.estimator.EstimatorSpec(mode, loss=loss, train_op=train_op)
# Compute evaluation metrics.
eval_metric_ops = {
'accuracy': tf.metrics.accuracy(
labels=labels, predictions=predicted_classes)
}
return tf.estimator.EstimatorSpec(
mode, loss=loss, eval_metric_ops=eval_metric_ops)
def main(unused_argv):
iris = datasets.load_iris()
x_train, x_test, y_train, y_test = model_selection.train_test_split(
iris.data, iris.target, test_size=0.2, random_state=42)
classifier = tf.estimator.Estimator(model_fn=my_model)
# Train.
train_input_fn = tf.estimator.inputs.numpy_input_fn(
x={X_FEATURE: x_train}, y=y_train, num_epochs=None, shuffle=True)
classifier.train(input_fn=train_input_fn, steps=1000)
# Predict.
test_input_fn = tf.estimator.inputs.numpy_input_fn(
x={X_FEATURE: x_test}, y=y_test, num_epochs=1, shuffle=False)
predictions = classifier.predict(input_fn=test_input_fn)
y_predicted = np.array(list(p['class'] for p in predictions))
y_predicted = y_predicted.reshape(np.array(y_test).shape)
# Score with sklearn.
score = metrics.accuracy_score(y_test, y_predicted)
print('Accuracy (sklearn): {0:f}'.format(score))
# Score with tensorflow.
scores = classifier.evaluate(input_fn=test_input_fn)
print('Accuracy (tensorflow): {0:f}'.format(scores['accuracy']))
if __name__ == '__main__':
tf.app.run()
| apache-2.0 |
openfisca/openfisca-france-indirect-taxation | openfisca_france_indirect_taxation/examples/transports/compute_compare/compare_comptes_des_transports_vehicules.py | 4 | 2856 | # -*- coding: utf-8 -*-
"""
Created on Wed Aug 19 17:34:01 2015
@author: thomas.douenne
"""
# L'objectif de ce script est de décrire la différence entre la taille et la composition du parc tels que répertoriés
# dans les données des Comptes du Transport, et celles des enquêtes Budget des Familles.
# Import de modules généraux
from __future__ import division
import seaborn
# Import de données des Comptes des Transports et modules spécifiques à Openfisca
from ipp_macro_series_parser.agregats_transports.transports_cleaner import g2_1
from openfisca_france_indirect_taxation.examples.utils_example import get_input_data_frame, graph_builder_line
# Import d'une nouvelle palette de couleurs
seaborn.set_palette(seaborn.color_palette("Set2", 12))
# Sélection des données des Compte des Transports utilisées et organisation de la dataframe
comparaison_vehicules = g2_1[g2_1['categorie'] == u'Voitures particulières']
del comparaison_vehicules['categorie']
comparaison_vehicules = comparaison_vehicules.set_index('index')
comparaison_vehicules = comparaison_vehicules.transpose()
comparaison_vehicules.rename(columns = {'Total': 'total agregats', 'dont essence': 'essence agregats',
'dont Diesel': 'diesel agregats'}, inplace = True)
comparaison_vehicules['diesel bdf'] = 0
comparaison_vehicules['essence bdf'] = 0
comparaison_vehicules['total bdf'] = 0
# Sélection des données Budget des Familles utilisées et organisation de la dataframe
for year in [2000, 2005, 2011]:
aggregates_data_frame = get_input_data_frame(year)
df_nombre_vehicules_bdf = aggregates_data_frame[['veh_diesel'] + ['veh_essence'] + ['pondmen']]
nombre_vehicules_diesel_bdf = (
df_nombre_vehicules_bdf['veh_diesel'] * df_nombre_vehicules_bdf['pondmen']
).sum() / 1000
comparaison_vehicules.loc[comparaison_vehicules.index == year, 'diesel bdf'] = \
nombre_vehicules_diesel_bdf
nombre_vehicules_essence_bdf = (
df_nombre_vehicules_bdf['veh_essence'] * df_nombre_vehicules_bdf['pondmen']
).sum() / 1000
comparaison_vehicules.loc[comparaison_vehicules.index == year, 'essence bdf'] = \
nombre_vehicules_essence_bdf
nombre_vehicules_total_bdf = (
(df_nombre_vehicules_bdf['veh_essence'] + df_nombre_vehicules_bdf['veh_diesel']) *
df_nombre_vehicules_bdf['pondmen']
).sum() / 1000
comparaison_vehicules.loc[comparaison_vehicules.index == year, 'total bdf'] = \
nombre_vehicules_total_bdf
comparaison_vehicules = comparaison_vehicules[comparaison_vehicules['total bdf'] != 0]
# Réalisation de graphiques
graph_builder_line(comparaison_vehicules[['total bdf'] + ['total agregats']])
graph_builder_line(comparaison_vehicules[['diesel bdf'] + ['diesel agregats']])
graph_builder_line(comparaison_vehicules[['essence bdf'] + ['essence agregats']])
| agpl-3.0 |
iandriver/RNA-sequence-tools | RNA_Seq_analysis/make_monocle_data_pdgfra.py | 2 | 6290 | import os
import cPickle as pickle
import pandas as pd
import matplotlib
matplotlib.use('QT4Agg')
import matplotlib.pyplot as plt
import matplotlib.pylab as pl
from matplotlib.ticker import LinearLocator
import seaborn as sns
import numpy as np
from operator import itemgetter
#the file path where gene list will be and where new list will output
path_to_file = '/Volumes/Seq_data/cuffnorm_pdgfra_1_and_2'
#name of file containing gene
gene_file_source = 'go_search_genes_lung_all.txt'
base_name = 'pdgfra2_all_n2'
#load file gene
by_cell = pd.DataFrame.from_csv(os.path.join(path_to_file, base_name+'_outlier_filtered.txt'), sep='\t')
by_gene = by_cell.transpose()
#create list of genes
gene_list = by_cell.index.tolist()
#create cell list
cell_list = [x for x in list(by_cell.columns.values)]
df_by_gene1 = pd.DataFrame(by_gene, columns=gene_list, index=cell_list)
df_by_cell1 = pd.DataFrame(by_cell, columns=cell_list, index=gene_list)
def make_new_matrix(org_matrix_by_cell, gene_list_file):
split_on='_'
gene_df = pd.read_csv(os.path.join(path_to_file, gene_list_file), delimiter= '\t')
gene_list = gene_df['GeneID'].tolist()
group_list = gene_df['GroupID'].tolist()
gmatrix_df = org_matrix_by_cell[gene_list]
cmatrix_df = gmatrix_df.transpose()
score_df = pd.DataFrame(zip(gene_list, group_list), columns=['GeneID', 'GroupID'])
sample_data = pd.read_csv(os.path.join(path_to_file, 'samples.table'), delimiter= '\t', index_col=0)
by_sample = sample_data.transpose()
map_data = pd.read_csv(os.path.join(path_to_file, 'results_'+base_name+'_align.txt'), delimiter= '\t', index_col=0)
by_cell_map = map_data.transpose()
loading_data = pd.read_csv(os.path.join(path_to_file, 'Pdgfra_cell_loading_all.txt'), delimiter= '\t', index_col=0)
l_data = loading_data.transpose()
cell_list = gmatrix_df.index.tolist()
cell_data = []
cell_label_dict ={'pnx1':('pnx', 4, 'high', 'd4_Pdgfra_ctrl'), 'ctrl1':('ctrl',0, 'high', 'd0_Pdgfra_ctrl'), 'Low_pnx':('pnx',4, 'low', 'd4_PDGFRalpha_low'), 'Low_ctrl':('ctrl', 0, 'ctrl', 'd0_PDGFRalpha_low')}
new_cell_list = []
old_cell_list = []
single_cell_list = []
single_cell_rename = []
for cell in cell_list:
match = False
if cell[0:4] == 'pnx1':
k = 'pnx1'
tracking_id = 'd4_'+cell.split('_')[1]
match = True
num = int(cell.split('_')[1].strip('C'))
if cell.split('_')[1][1]== '0':
no_z = cell.split('_')[1][0]+cell.split('_')[1][2]
align_k = no_z+'_'+'Pdgfra-pnxd4'
else:
align_k = cell.split('_')[1]+'_'+'Pdgfra-pnxd4'
old_cell_list.append(cell)
new_cell_list.append(tracking_id)
elif cell[0:5] == 'ctrl1':
k = 'ctrl1'
tracking_id = 'd0_'+cell.split('_')[1]
num = int(cell.split('_')[1].strip('C'))
if cell.split('_')[1][1]== '0':
no_z = cell.split('_')[1][0]+cell.split('_')[1][2]
align_k = no_z+'_'+'Pdgfra-ctrl1'
else:
align_k = cell.split('_')[1]+'_'+'Pdgfra-ctrl1'
match = True
old_cell_list.append(cell)
new_cell_list.append(tracking_id)
elif cell[0:7] == 'Low_pnx':
k = 'Low_pnx'
tracking_id = 'd4_low_'+cell.split('_')[2]
num = int(cell.split('_')[2].strip('C'))
if cell.split('_')[2][1]== '0':
no_z = cell.split('_')[2][0]+cell.split('_')[2][2]
align_k = 'pdgfra_low_d4pnx_'+no_z
else:
align_k = 'pdgfra_low_d4pnx_'+cell.split('_')[2]
match = True
old_cell_list.append(cell)
new_cell_list.append(tracking_id)
elif cell[0:8] == 'Low_ctrl':
k = 'Low_ctrl'
tracking_id = 'd0_low_'+cell.split('_')[2]
num = int(cell.split('_')[2].strip('C'))
if cell.split('_')[2][1]== '0':
no_z = cell.split('_')[2][0]+cell.split('_')[2][2]
align_k = 'pdgfra_low_ctrl_'+no_z
else:
align_k = 'pdgfra_low_ctrl_'+cell.split('_')[2]
match = True
old_cell_list.append(cell)
new_cell_list.append(tracking_id)
if match:
tcf21_level = cmatrix_df[cell]['Tcf21']
if int(tcf21_level) >= 50:
Tcf21='high'
elif int(tcf21_level) < 50 and int(tcf21_level) >= 5 :
Tcf21='med'
elif int(tcf21_level) < 5:
Tcf21='low'
day = cell_label_dict[k][1]
condition = cell_label_dict[k][0]
hi_low = cell_label_dict[k][2]
loading_df = loading_data[cell_label_dict[k][3]]
loading = loading_df.iloc[num-1]
print num, tracking_id
if loading == '1':
single_cell = 'yes'
single_cell_list.append(cell)
single_cell_rename.append(tracking_id)
else:
single_cell = 'no'
total_mass = by_sample[cell+'_0'][1]
input_mass = by_cell_map[align_k][0]
per_mapped = by_cell_map[align_k][4]
c_data_tup = (tracking_id,total_mass,input_mass,per_mapped,condition,day,hi_low, Tcf21, single_cell)
print c_data_tup
cell_data.append(c_data_tup)
singlecell_cmatrix_df = cmatrix_df[single_cell_list]
singlecell_cmatrix_df.columns = single_cell_rename
singlecell_cmatrix_df.to_csv(os.path.join(path_to_file, 'single_cell_matrix.txt'), sep = '\t', index_col=0)
score_df.to_csv(os.path.join(path_to_file, 'gene_feature_data.txt'), sep = '\t', index=False)
new_cmatrix_df = cmatrix_df[old_cell_list]
new_cmatrix_df.columns = new_cell_list
new_cmatrix_df.to_csv(os.path.join(path_to_file, 'goterms_monocle_count_matrix.txt'), sep = '\t', index_col=0)
cell_data_df = pd.DataFrame(cell_data, columns=['tracking_id','total_mass','input_mass','per_mapped','condition','day', 'hi_low', 'Tcf21','single_cell'])
cell_data_df.to_csv(os.path.join(path_to_file, 'cell_feature_data.txt'), sep = '\t', index=False)
make_new_matrix(df_by_gene1, gene_file_source)
| mit |
HKUST-SING/tensorflow | tensorflow/examples/learn/iris_custom_model.py | 50 | 2613 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Example of Estimator for Iris plant dataset."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from sklearn import cross_validation
from sklearn import datasets
from sklearn import metrics
import tensorflow as tf
layers = tf.contrib.layers
learn = tf.contrib.learn
def my_model(features, target):
"""DNN with three hidden layers, and dropout of 0.1 probability."""
# Convert the target to a one-hot tensor of shape (length of features, 3) and
# with a on-value of 1 for each one-hot vector of length 3.
target = tf.one_hot(target, 3, 1, 0)
# Create three fully connected layers respectively of size 10, 20, and 10 with
# each layer having a dropout probability of 0.1.
normalizer_fn = layers.dropout
normalizer_params = {'keep_prob': 0.9}
features = layers.stack(
features,
layers.fully_connected, [10, 20, 10],
normalizer_fn=normalizer_fn,
normalizer_params=normalizer_params)
# Compute logits (1 per class) and compute loss.
logits = layers.fully_connected(features, 3, activation_fn=None)
loss = tf.losses.softmax_cross_entropy(target, logits)
# Create a tensor for training op.
train_op = tf.contrib.layers.optimize_loss(
loss,
tf.contrib.framework.get_global_step(),
optimizer='Adagrad',
learning_rate=0.1)
return ({
'class': tf.argmax(logits, 1),
'prob': tf.nn.softmax(logits)
}, loss, train_op)
def main(unused_argv):
iris = datasets.load_iris()
x_train, x_test, y_train, y_test = cross_validation.train_test_split(
iris.data, iris.target, test_size=0.2, random_state=42)
classifier = learn.Estimator(model_fn=my_model)
classifier.fit(x_train, y_train, steps=1000)
y_predicted = [
p['class'] for p in classifier.predict(
x_test, as_iterable=True)
]
score = metrics.accuracy_score(y_test, y_predicted)
print('Accuracy: {0:f}'.format(score))
if __name__ == '__main__':
tf.app.run()
| apache-2.0 |
mehdidc/scikit-learn | sklearn/svm/setup.py | 321 | 3157 | import os
from os.path import join
import numpy
from sklearn._build_utils import get_blas_info
def configuration(parent_package='', top_path=None):
from numpy.distutils.misc_util import Configuration
config = Configuration('svm', parent_package, top_path)
config.add_subpackage('tests')
# Section LibSVM
# we compile both libsvm and libsvm_sparse
config.add_library('libsvm-skl',
sources=[join('src', 'libsvm', 'libsvm_template.cpp')],
depends=[join('src', 'libsvm', 'svm.cpp'),
join('src', 'libsvm', 'svm.h')],
# Force C++ linking in case gcc is picked up instead
# of g++ under windows with some versions of MinGW
extra_link_args=['-lstdc++'],
)
libsvm_sources = ['libsvm.c']
libsvm_depends = [join('src', 'libsvm', 'libsvm_helper.c'),
join('src', 'libsvm', 'libsvm_template.cpp'),
join('src', 'libsvm', 'svm.cpp'),
join('src', 'libsvm', 'svm.h')]
config.add_extension('libsvm',
sources=libsvm_sources,
include_dirs=[numpy.get_include(),
join('src', 'libsvm')],
libraries=['libsvm-skl'],
depends=libsvm_depends,
)
### liblinear module
cblas_libs, blas_info = get_blas_info()
if os.name == 'posix':
cblas_libs.append('m')
liblinear_sources = ['liblinear.c',
join('src', 'liblinear', '*.cpp')]
liblinear_depends = [join('src', 'liblinear', '*.h'),
join('src', 'liblinear', 'liblinear_helper.c')]
config.add_extension('liblinear',
sources=liblinear_sources,
libraries=cblas_libs,
include_dirs=[join('..', 'src', 'cblas'),
numpy.get_include(),
blas_info.pop('include_dirs', [])],
extra_compile_args=blas_info.pop('extra_compile_args',
[]),
depends=liblinear_depends,
# extra_compile_args=['-O0 -fno-inline'],
** blas_info)
## end liblinear module
# this should go *after* libsvm-skl
libsvm_sparse_sources = ['libsvm_sparse.c']
config.add_extension('libsvm_sparse', libraries=['libsvm-skl'],
sources=libsvm_sparse_sources,
include_dirs=[numpy.get_include(),
join("src", "libsvm")],
depends=[join("src", "libsvm", "svm.h"),
join("src", "libsvm",
"libsvm_sparse_helper.c")])
return config
if __name__ == '__main__':
from numpy.distutils.core import setup
setup(**configuration(top_path='').todict())
| bsd-3-clause |
sbg2133/miscellaneous_projects | carina/velocityMaps/cft3.py | 1 | 4053 | import numpy as np
import matplotlib.pyplot as plt
from astropy.io import fits
import aplpy
from astropy.wcs import WCS
import sys, os
from getIQU import IQU
from astropy import coordinates as coord
from astropy.coordinates import SkyCoord
from astropy import units as u
plt.ion()
rootdir = '/home/wizwit/miscellaneous_projects/carina/carinaData'
#cube_filename = os.path.join(rootdir, 'mopraData/G287_288.-2.0_0.5.12CO.fits')
sigma_file = os.path.join(rootdir,\
'mopraData/G287_288.-2.0_0.5.12CO_sigma.fits')
blast250_file = os.path.join(rootdir,\
'smooth/3.0_arcmin/carinaneb_250_smoothed_3.0_rl.fits')
#hdulist = fits.open(cube_filename)
#data_cube = hdulist[0].data
#wcs = WCS(hdulist[0].header)
#disp = np.nanstd(data_cube, axis = 0)
hdulist_co12 = fits.open(sigma_file)
sigma = hdulist_co12[0].data
wcs_co12 = WCS(hdulist_co12[0].header)
#hdulist_250 = fits.open(blast250_filename)
#blast250 = hdulist_250[0].data
#wcs_250 = WCS(hdulist_250[0].header)
def getPsi(path_to_file):
I, Q, U, __, wcs = IQU(path_to_file)
Pvals = np.sqrt(Q**2 + U**2)
pvals = Pvals/I
# pvals /= pol_eff[band_idx]
psi = 0.5*np.arctan2(U,Q)
return I, Q, U, wcs, psi
I, __, __, wcs_250, psi = getPsi(blast250_file)
cdelta = 0.00277777777778 * 60 # deg * 60 arcmin/deg
nskip = np.int(np.round(1.0/cdelta)) # points per 3 arcmin
r = nskip*3
# points to use for S calculation
x = np.arange(psi.shape[1])[::nskip]
x = x[30:-30] # don't use edges
y = np.arange(psi.shape[0])[::nskip]
y = y[5:-5] # don't use edges
X,Y = np.meshgrid(x,y)
mask_ra, mask_dec = wcs_250.all_pix2world(X, Y, 0)
mask_gal_coords = SkyCoord(mask_ra*u.degree, mask_dec*u.degree, frame='fk5').galactic
Ix = np.arange(I.shape[1])
Iy = np.arange(I.shape[0])
IX, IY = np.meshgrid(Ix, Iy)
ra, dec = wcs_250.all_pix2world(IX, IY, 0)
radec = SkyCoord(ra*u.degree, dec*u.degree, frame='fk5')
x_sig = np.arange(sigma.shape[1])
y_sig = np.arange(sigma.shape[0])
XSIG, YSIG = np.meshgrid(x_sig, y_sig)
sig_ra, sig_dec = wcs_co12.all_pix2world(XSIG, YSIG, 0)
radec_sig = SkyCoord(sig_ra*u.degree, sig_dec*u.degree, frame='galactic').fk5
sig_ra = np.asarray(radec_sig.ra)
sig_dec = np.asarray(radec_sig.dec)
#plt.imshow(I, origin = "lower", cmap = "viridis")
#plt.pcolormesh(Iy,Ix,I)
#plt.scatter(Y, X, c = 'r')
pol_disp = np.load('pol_disp.npy')
"""
pol_disp = np.empty((len(Ix), len(Iy)))
pol_disp[:] = np.nan
for i in range(len(x)):
for j in range(len(y)):
mask_inner = (Ix[np.newaxis,:] - x[i])**2 + (Iy[:,np.newaxis] - y[j])**2 < (r)**2
mask_outer = (Ix[np.newaxis,:] - x[i])**2 + (Iy[:,np.newaxis] - y[j])**2 <= (r)**2
mask = mask_inner ^ mask_outer
comp_points = psi[mask]
center_point = psi[y[j]][x[i]]
S2 = (center_point - comp_points)**2
S2_debias = S2 - np.var(S2)
#S = np.sqrt((1.0/len(S2))*np.sum(S2))
S = np.sqrt((1.0/len(S2_debias))*np.sum(S2_debias))
pol_disp[x[i]][y[j]] = S
"""
#hdu_250 = fits.PrimaryHDU(I, header=wcs_250.to_header())
#hdu_sigma = fits.PrimaryHDU(sigma, header=wcs_co12.to_header())
#fig = plt.figure()
#f_sigma = aplpy.FITSFigure(hdu_sigma, figure = fig)
#f_sigma.show_colorscale(cmap = 'inferno')
#f_250 = aplpy.FITSFigure(hdu_250, figure = fig)
#f_250.show_colorscale(cmap = 'inferno')
sig_x, sig_y = wcs_co12.all_world2pix(mask_gal_coords.l, mask_gal_coords.b, 0)
# integer pixel values
sig_x = np.round(sig_x).astype('int')
sig_y = np.round(sig_y).astype('int')
sig_mask_ra, sig_mask_dec = wcs_co12.all_pix2world(sig_x, sig_y, 0)
sig_mask_fk5 = SkyCoord(sig_mask_ra*u.degree, sig_mask_dec*u.degree, frame='galactic').fk5
plt.figure()
plt.xlim(np.max(np.asarray(radec.ra)), np.min(np.asarray(radec.ra)))
plt.pcolormesh(radec.ra, radec.dec, I, cmap = "inferno")
plt.pcolormesh(radec.ra, radec.dec, np.transpose(pol_disp), alpha = 0.5)
plt.pcolormesh(radec_sig.ra, radec_sig.dec, np.fliplr(sigma), cmap = "viridis", alpha = 0.5)
plt.scatter(sig_mask_fk5.ra, sig_mask_fk5.dec, c = 'r')
plt.scatter(mask_ra, mask_dec, c = 'k', alpha = 0.5)
| gpl-3.0 |
adamgreenhall/scikit-learn | sklearn/linear_model/tests/test_sag.py | 93 | 25649 | # Authors: Danny Sullivan <[email protected]>
# Tom Dupre la Tour <[email protected]>
#
# Licence: BSD 3 clause
import math
import numpy as np
import scipy.sparse as sp
from sklearn.linear_model.sag import get_auto_step_size
from sklearn.linear_model.sag_fast import get_max_squared_sum
from sklearn.linear_model import LogisticRegression, Ridge
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_greater
from sklearn.utils.testing import assert_raise_message
from sklearn.utils.testing import ignore_warnings
from sklearn.utils import compute_class_weight
from sklearn.preprocessing import LabelEncoder
from sklearn.datasets import make_blobs
from sklearn.base import clone
# this is used for sag classification
def log_dloss(p, y):
z = p * y
# approximately equal and saves the computation of the log
if z > 18.0:
return math.exp(-z) * -y
if z < -18.0:
return -y
return -y / (math.exp(z) + 1.0)
def log_loss(p, y):
return np.mean(np.log(1. + np.exp(-y * p)))
# this is used for sag regression
def squared_dloss(p, y):
return p - y
def squared_loss(p, y):
return np.mean(0.5 * (p - y) * (p - y))
# function for measuring the log loss
def get_pobj(w, alpha, myX, myy, loss):
w = w.ravel()
pred = np.dot(myX, w)
p = loss(pred, myy)
p += alpha * w.dot(w) / 2.
return p
def sag(X, y, step_size, alpha, n_iter=1, dloss=None, sparse=False,
sample_weight=None, fit_intercept=True):
n_samples, n_features = X.shape[0], X.shape[1]
weights = np.zeros(X.shape[1])
sum_gradient = np.zeros(X.shape[1])
gradient_memory = np.zeros((n_samples, n_features))
intercept = 0.0
intercept_sum_gradient = 0.0
intercept_gradient_memory = np.zeros(n_samples)
rng = np.random.RandomState(77)
decay = 1.0
seen = set()
# sparse data has a fixed decay of .01
if sparse:
decay = .01
for epoch in range(n_iter):
for k in range(n_samples):
idx = int(rng.rand(1) * n_samples)
# idx = k
entry = X[idx]
seen.add(idx)
p = np.dot(entry, weights) + intercept
gradient = dloss(p, y[idx])
if sample_weight is not None:
gradient *= sample_weight[idx]
update = entry * gradient + alpha * weights
sum_gradient += update - gradient_memory[idx]
gradient_memory[idx] = update
if fit_intercept:
intercept_sum_gradient += (gradient -
intercept_gradient_memory[idx])
intercept_gradient_memory[idx] = gradient
intercept -= (step_size * intercept_sum_gradient
/ len(seen) * decay)
weights -= step_size * sum_gradient / len(seen)
return weights, intercept
def sag_sparse(X, y, step_size, alpha, n_iter=1,
dloss=None, sample_weight=None, sparse=False,
fit_intercept=True):
if step_size * alpha == 1.:
raise ZeroDivisionError("Sparse sag does not handle the case "
"step_size * alpha == 1")
n_samples, n_features = X.shape[0], X.shape[1]
weights = np.zeros(n_features)
sum_gradient = np.zeros(n_features)
last_updated = np.zeros(n_features, dtype=np.int)
gradient_memory = np.zeros(n_samples)
rng = np.random.RandomState(77)
intercept = 0.0
intercept_sum_gradient = 0.0
wscale = 1.0
decay = 1.0
seen = set()
c_sum = np.zeros(n_iter * n_samples)
# sparse data has a fixed decay of .01
if sparse:
decay = .01
counter = 0
for epoch in range(n_iter):
for k in range(n_samples):
# idx = k
idx = int(rng.rand(1) * n_samples)
entry = X[idx]
seen.add(idx)
if counter >= 1:
for j in range(n_features):
if last_updated[j] == 0:
weights[j] -= c_sum[counter - 1] * sum_gradient[j]
else:
weights[j] -= ((c_sum[counter - 1] -
c_sum[last_updated[j] - 1]) *
sum_gradient[j])
last_updated[j] = counter
p = (wscale * np.dot(entry, weights)) + intercept
gradient = dloss(p, y[idx])
if sample_weight is not None:
gradient *= sample_weight[idx]
update = entry * gradient
sum_gradient += update - (gradient_memory[idx] * entry)
if fit_intercept:
intercept_sum_gradient += gradient - gradient_memory[idx]
intercept -= (step_size * intercept_sum_gradient
/ len(seen) * decay)
gradient_memory[idx] = gradient
wscale *= (1.0 - alpha * step_size)
if counter == 0:
c_sum[0] = step_size / (wscale * len(seen))
else:
c_sum[counter] = (c_sum[counter - 1] +
step_size / (wscale * len(seen)))
if counter >= 1 and wscale < 1e-9:
for j in range(n_features):
if last_updated[j] == 0:
weights[j] -= c_sum[counter] * sum_gradient[j]
else:
weights[j] -= ((c_sum[counter] -
c_sum[last_updated[j] - 1]) *
sum_gradient[j])
last_updated[j] = counter + 1
c_sum[counter] = 0
weights *= wscale
wscale = 1.0
counter += 1
for j in range(n_features):
if last_updated[j] == 0:
weights[j] -= c_sum[counter - 1] * sum_gradient[j]
else:
weights[j] -= ((c_sum[counter - 1] -
c_sum[last_updated[j] - 1]) *
sum_gradient[j])
weights *= wscale
return weights, intercept
def get_step_size(X, alpha, fit_intercept, classification=True):
if classification:
return (4.0 / (np.max(np.sum(X * X, axis=1))
+ fit_intercept + 4.0 * alpha))
else:
return 1.0 / (np.max(np.sum(X * X, axis=1)) + fit_intercept + alpha)
@ignore_warnings
def test_classifier_matching():
n_samples = 20
X, y = make_blobs(n_samples=n_samples, centers=2, random_state=0,
cluster_std=0.1)
y[y == 0] = -1
alpha = 1.1
n_iter = 80
fit_intercept = True
step_size = get_step_size(X, alpha, fit_intercept)
clf = LogisticRegression(solver="sag", fit_intercept=fit_intercept,
tol=1e-11, C=1. / alpha / n_samples,
max_iter=n_iter, random_state=10)
clf.fit(X, y)
weights, intercept = sag_sparse(X, y, step_size, alpha, n_iter=n_iter,
dloss=log_dloss,
fit_intercept=fit_intercept)
weights2, intercept2 = sag(X, y, step_size, alpha, n_iter=n_iter,
dloss=log_dloss,
fit_intercept=fit_intercept)
weights = np.atleast_2d(weights)
intercept = np.atleast_1d(intercept)
weights2 = np.atleast_2d(weights2)
intercept2 = np.atleast_1d(intercept2)
assert_array_almost_equal(weights, clf.coef_, decimal=10)
assert_array_almost_equal(intercept, clf.intercept_, decimal=10)
assert_array_almost_equal(weights2, clf.coef_, decimal=10)
assert_array_almost_equal(intercept2, clf.intercept_, decimal=10)
@ignore_warnings
def test_regressor_matching():
n_samples = 10
n_features = 5
rng = np.random.RandomState(10)
X = rng.normal(size=(n_samples, n_features))
true_w = rng.normal(size=n_features)
y = X.dot(true_w)
alpha = 1.
n_iter = 100
fit_intercept = True
step_size = get_step_size(X, alpha, fit_intercept, classification=False)
clf = Ridge(fit_intercept=fit_intercept, tol=.00000000001, solver='sag',
alpha=alpha * n_samples, max_iter=n_iter)
clf.fit(X, y)
weights1, intercept1 = sag_sparse(X, y, step_size, alpha, n_iter=n_iter,
dloss=squared_dloss,
fit_intercept=fit_intercept)
weights2, intercept2 = sag(X, y, step_size, alpha, n_iter=n_iter,
dloss=squared_dloss,
fit_intercept=fit_intercept)
assert_array_almost_equal(weights1, clf.coef_, decimal=10)
assert_array_almost_equal(intercept1, clf.intercept_, decimal=10)
assert_array_almost_equal(weights2, clf.coef_, decimal=10)
assert_array_almost_equal(intercept2, clf.intercept_, decimal=10)
@ignore_warnings
def test_sag_pobj_matches_logistic_regression():
"""tests if the sag pobj matches log reg"""
n_samples = 100
alpha = 1.0
max_iter = 20
X, y = make_blobs(n_samples=n_samples, centers=2, random_state=0,
cluster_std=0.1)
clf1 = LogisticRegression(solver='sag', fit_intercept=False, tol=.0000001,
C=1. / alpha / n_samples, max_iter=max_iter,
random_state=10)
clf2 = clone(clf1)
clf3 = LogisticRegression(fit_intercept=False, tol=.0000001,
C=1. / alpha / n_samples, max_iter=max_iter,
random_state=10)
clf1.fit(X, y)
clf2.fit(sp.csr_matrix(X), y)
clf3.fit(X, y)
pobj1 = get_pobj(clf1.coef_, alpha, X, y, log_loss)
pobj2 = get_pobj(clf2.coef_, alpha, X, y, log_loss)
pobj3 = get_pobj(clf3.coef_, alpha, X, y, log_loss)
assert_array_almost_equal(pobj1, pobj2, decimal=4)
assert_array_almost_equal(pobj2, pobj3, decimal=4)
assert_array_almost_equal(pobj3, pobj1, decimal=4)
@ignore_warnings
def test_sag_pobj_matches_ridge_regression():
"""tests if the sag pobj matches ridge reg"""
n_samples = 100
n_features = 10
alpha = 1.0
n_iter = 100
fit_intercept = False
rng = np.random.RandomState(10)
X = rng.normal(size=(n_samples, n_features))
true_w = rng.normal(size=n_features)
y = X.dot(true_w)
clf1 = Ridge(fit_intercept=fit_intercept, tol=.00000000001, solver='sag',
alpha=alpha, max_iter=n_iter, random_state=42)
clf2 = clone(clf1)
clf3 = Ridge(fit_intercept=fit_intercept, tol=.00001, solver='lsqr',
alpha=alpha, max_iter=n_iter, random_state=42)
clf1.fit(X, y)
clf2.fit(sp.csr_matrix(X), y)
clf3.fit(X, y)
pobj1 = get_pobj(clf1.coef_, alpha, X, y, squared_loss)
pobj2 = get_pobj(clf2.coef_, alpha, X, y, squared_loss)
pobj3 = get_pobj(clf3.coef_, alpha, X, y, squared_loss)
assert_array_almost_equal(pobj1, pobj2, decimal=4)
assert_array_almost_equal(pobj1, pobj3, decimal=4)
assert_array_almost_equal(pobj3, pobj2, decimal=4)
@ignore_warnings
def test_sag_regressor_computed_correctly():
"""tests if the sag regressor is computed correctly"""
alpha = .1
n_features = 10
n_samples = 40
max_iter = 50
tol = .000001
fit_intercept = True
rng = np.random.RandomState(0)
X = rng.normal(size=(n_samples, n_features))
w = rng.normal(size=n_features)
y = np.dot(X, w) + 2.
step_size = get_step_size(X, alpha, fit_intercept, classification=False)
clf1 = Ridge(fit_intercept=fit_intercept, tol=tol, solver='sag',
alpha=alpha * n_samples, max_iter=max_iter)
clf2 = clone(clf1)
clf1.fit(X, y)
clf2.fit(sp.csr_matrix(X), y)
spweights1, spintercept1 = sag_sparse(X, y, step_size, alpha,
n_iter=max_iter,
dloss=squared_dloss,
fit_intercept=fit_intercept)
spweights2, spintercept2 = sag_sparse(X, y, step_size, alpha,
n_iter=max_iter,
dloss=squared_dloss, sparse=True,
fit_intercept=fit_intercept)
assert_array_almost_equal(clf1.coef_.ravel(),
spweights1.ravel(),
decimal=3)
assert_almost_equal(clf1.intercept_, spintercept1, decimal=1)
# TODO: uncomment when sparse Ridge with intercept will be fixed (#4710)
#assert_array_almost_equal(clf2.coef_.ravel(),
# spweights2.ravel(),
# decimal=3)
#assert_almost_equal(clf2.intercept_, spintercept2, decimal=1)'''
@ignore_warnings
def test_get_auto_step_size():
X = np.array([[1, 2, 3], [2, 3, 4], [2, 3, 2]], dtype=np.float64)
alpha = 1.2
fit_intercept = False
# sum the squares of the second sample because that's the largest
max_squared_sum = 4 + 9 + 16
max_squared_sum_ = get_max_squared_sum(X)
assert_almost_equal(max_squared_sum, max_squared_sum_, decimal=4)
for fit_intercept in (True, False):
step_size_sqr = 1.0 / (max_squared_sum + alpha + int(fit_intercept))
step_size_log = 4.0 / (max_squared_sum + 4.0 * alpha +
int(fit_intercept))
step_size_sqr_ = get_auto_step_size(max_squared_sum_, alpha, "squared",
fit_intercept)
step_size_log_ = get_auto_step_size(max_squared_sum_, alpha, "log",
fit_intercept)
assert_almost_equal(step_size_sqr, step_size_sqr_, decimal=4)
assert_almost_equal(step_size_log, step_size_log_, decimal=4)
msg = 'Unknown loss function for SAG solver, got wrong instead of'
assert_raise_message(ValueError, msg, get_auto_step_size,
max_squared_sum_, alpha, "wrong", fit_intercept)
def test_get_max_squared_sum():
n_samples = 100
n_features = 10
rng = np.random.RandomState(42)
X = rng.randn(n_samples, n_features).astype(np.float64)
mask = rng.randn(n_samples, n_features)
X[mask > 0] = 0.
X_csr = sp.csr_matrix(X)
X[0, 3] = 0.
X_csr[0, 3] = 0.
sum_X = get_max_squared_sum(X)
sum_X_csr = get_max_squared_sum(X_csr)
assert_almost_equal(sum_X, sum_X_csr)
@ignore_warnings
def test_sag_regressor():
"""tests if the sag regressor performs well"""
xmin, xmax = -5, 5
n_samples = 20
tol = .001
max_iter = 20
alpha = 0.1
rng = np.random.RandomState(0)
X = np.linspace(xmin, xmax, n_samples).reshape(n_samples, 1)
# simple linear function without noise
y = 0.5 * X.ravel()
clf1 = Ridge(tol=tol, solver='sag', max_iter=max_iter,
alpha=alpha * n_samples)
clf2 = clone(clf1)
clf1.fit(X, y)
clf2.fit(sp.csr_matrix(X), y)
score1 = clf1.score(X, y)
score2 = clf2.score(X, y)
assert_greater(score1, 0.99)
assert_greater(score2, 0.99)
# simple linear function with noise
y = 0.5 * X.ravel() + rng.randn(n_samples, 1).ravel()
clf1 = Ridge(tol=tol, solver='sag', max_iter=max_iter,
alpha=alpha * n_samples)
clf2 = clone(clf1)
clf1.fit(X, y)
clf2.fit(sp.csr_matrix(X), y)
score1 = clf1.score(X, y)
score2 = clf2.score(X, y)
score2 = clf2.score(X, y)
assert_greater(score1, 0.5)
assert_greater(score2, 0.5)
@ignore_warnings
def test_sag_classifier_computed_correctly():
"""tests if the binary classifier is computed correctly"""
alpha = .1
n_samples = 50
n_iter = 50
tol = .00001
fit_intercept = True
X, y = make_blobs(n_samples=n_samples, centers=2, random_state=0,
cluster_std=0.1)
step_size = get_step_size(X, alpha, fit_intercept, classification=True)
classes = np.unique(y)
y_tmp = np.ones(n_samples)
y_tmp[y != classes[1]] = -1
y = y_tmp
clf1 = LogisticRegression(solver='sag', C=1. / alpha / n_samples,
max_iter=n_iter, tol=tol, random_state=77,
fit_intercept=fit_intercept)
clf2 = clone(clf1)
clf1.fit(X, y)
clf2.fit(sp.csr_matrix(X), y)
spweights, spintercept = sag_sparse(X, y, step_size, alpha, n_iter=n_iter,
dloss=log_dloss,
fit_intercept=fit_intercept)
spweights2, spintercept2 = sag_sparse(X, y, step_size, alpha,
n_iter=n_iter,
dloss=log_dloss, sparse=True,
fit_intercept=fit_intercept)
assert_array_almost_equal(clf1.coef_.ravel(),
spweights.ravel(),
decimal=2)
assert_almost_equal(clf1.intercept_, spintercept, decimal=1)
assert_array_almost_equal(clf2.coef_.ravel(),
spweights2.ravel(),
decimal=2)
assert_almost_equal(clf2.intercept_, spintercept2, decimal=1)
@ignore_warnings
def test_sag_multiclass_computed_correctly():
"""tests if the multiclass classifier is computed correctly"""
alpha = .1
n_samples = 20
tol = .00001
max_iter = 40
fit_intercept = True
X, y = make_blobs(n_samples=n_samples, centers=3, random_state=0,
cluster_std=0.1)
step_size = get_step_size(X, alpha, fit_intercept, classification=True)
classes = np.unique(y)
clf1 = LogisticRegression(solver='sag', C=1. / alpha / n_samples,
max_iter=max_iter, tol=tol, random_state=77,
fit_intercept=fit_intercept)
clf2 = clone(clf1)
clf1.fit(X, y)
clf2.fit(sp.csr_matrix(X), y)
coef1 = []
intercept1 = []
coef2 = []
intercept2 = []
for cl in classes:
y_encoded = np.ones(n_samples)
y_encoded[y != cl] = -1
spweights1, spintercept1 = sag_sparse(X, y_encoded, step_size, alpha,
dloss=log_dloss, n_iter=max_iter,
fit_intercept=fit_intercept)
spweights2, spintercept2 = sag_sparse(X, y_encoded, step_size, alpha,
dloss=log_dloss, n_iter=max_iter,
sparse=True,
fit_intercept=fit_intercept)
coef1.append(spweights1)
intercept1.append(spintercept1)
coef2.append(spweights2)
intercept2.append(spintercept2)
coef1 = np.vstack(coef1)
intercept1 = np.array(intercept1)
coef2 = np.vstack(coef2)
intercept2 = np.array(intercept2)
for i, cl in enumerate(classes):
assert_array_almost_equal(clf1.coef_[i].ravel(),
coef1[i].ravel(),
decimal=2)
assert_almost_equal(clf1.intercept_[i], intercept1[i], decimal=1)
assert_array_almost_equal(clf2.coef_[i].ravel(),
coef2[i].ravel(),
decimal=2)
assert_almost_equal(clf2.intercept_[i], intercept2[i], decimal=1)
@ignore_warnings
def test_classifier_results():
"""tests if classifier results match target"""
alpha = .1
n_features = 20
n_samples = 10
tol = .01
max_iter = 200
rng = np.random.RandomState(0)
X = rng.normal(size=(n_samples, n_features))
w = rng.normal(size=n_features)
y = np.dot(X, w)
y = np.sign(y)
clf1 = LogisticRegression(solver='sag', C=1. / alpha / n_samples,
max_iter=max_iter, tol=tol, random_state=77)
clf2 = clone(clf1)
clf1.fit(X, y)
clf2.fit(sp.csr_matrix(X), y)
pred1 = clf1.predict(X)
pred2 = clf2.predict(X)
assert_almost_equal(pred1, y, decimal=12)
assert_almost_equal(pred2, y, decimal=12)
@ignore_warnings
def test_binary_classifier_class_weight():
"""tests binary classifier with classweights for each class"""
alpha = .1
n_samples = 50
n_iter = 20
tol = .00001
fit_intercept = True
X, y = make_blobs(n_samples=n_samples, centers=2, random_state=10,
cluster_std=0.1)
step_size = get_step_size(X, alpha, fit_intercept, classification=True)
classes = np.unique(y)
y_tmp = np.ones(n_samples)
y_tmp[y != classes[1]] = -1
y = y_tmp
class_weight = {1: .45, -1: .55}
clf1 = LogisticRegression(solver='sag', C=1. / alpha / n_samples,
max_iter=n_iter, tol=tol, random_state=77,
fit_intercept=fit_intercept,
class_weight=class_weight)
clf2 = clone(clf1)
clf1.fit(X, y)
clf2.fit(sp.csr_matrix(X), y)
le = LabelEncoder()
class_weight_ = compute_class_weight(class_weight, np.unique(y), y)
sample_weight = class_weight_[le.fit_transform(y)]
spweights, spintercept = sag_sparse(X, y, step_size, alpha, n_iter=n_iter,
dloss=log_dloss,
sample_weight=sample_weight,
fit_intercept=fit_intercept)
spweights2, spintercept2 = sag_sparse(X, y, step_size, alpha,
n_iter=n_iter,
dloss=log_dloss, sparse=True,
sample_weight=sample_weight,
fit_intercept=fit_intercept)
assert_array_almost_equal(clf1.coef_.ravel(),
spweights.ravel(),
decimal=2)
assert_almost_equal(clf1.intercept_, spintercept, decimal=1)
assert_array_almost_equal(clf2.coef_.ravel(),
spweights2.ravel(),
decimal=2)
assert_almost_equal(clf2.intercept_, spintercept2, decimal=1)
@ignore_warnings
def test_multiclass_classifier_class_weight():
"""tests multiclass with classweights for each class"""
alpha = .1
n_samples = 20
tol = .00001
max_iter = 50
class_weight = {0: .45, 1: .55, 2: .75}
fit_intercept = True
X, y = make_blobs(n_samples=n_samples, centers=3, random_state=0,
cluster_std=0.1)
step_size = get_step_size(X, alpha, fit_intercept, classification=True)
classes = np.unique(y)
clf1 = LogisticRegression(solver='sag', C=1. / alpha / n_samples,
max_iter=max_iter, tol=tol, random_state=77,
fit_intercept=fit_intercept,
class_weight=class_weight)
clf2 = clone(clf1)
clf1.fit(X, y)
clf2.fit(sp.csr_matrix(X), y)
le = LabelEncoder()
class_weight_ = compute_class_weight(class_weight, np.unique(y), y)
sample_weight = class_weight_[le.fit_transform(y)]
coef1 = []
intercept1 = []
coef2 = []
intercept2 = []
for cl in classes:
y_encoded = np.ones(n_samples)
y_encoded[y != cl] = -1
spweights1, spintercept1 = sag_sparse(X, y_encoded, step_size, alpha,
n_iter=max_iter, dloss=log_dloss,
sample_weight=sample_weight)
spweights2, spintercept2 = sag_sparse(X, y_encoded, step_size, alpha,
n_iter=max_iter, dloss=log_dloss,
sample_weight=sample_weight,
sparse=True)
coef1.append(spweights1)
intercept1.append(spintercept1)
coef2.append(spweights2)
intercept2.append(spintercept2)
coef1 = np.vstack(coef1)
intercept1 = np.array(intercept1)
coef2 = np.vstack(coef2)
intercept2 = np.array(intercept2)
for i, cl in enumerate(classes):
assert_array_almost_equal(clf1.coef_[i].ravel(),
coef1[i].ravel(),
decimal=2)
assert_almost_equal(clf1.intercept_[i], intercept1[i], decimal=1)
assert_array_almost_equal(clf2.coef_[i].ravel(),
coef2[i].ravel(),
decimal=2)
assert_almost_equal(clf2.intercept_[i], intercept2[i], decimal=1)
def test_classifier_single_class():
"""tests if ValueError is thrown with only one class"""
X = [[1, 2], [3, 4]]
y = [1, 1]
assert_raise_message(ValueError,
"This solver needs samples of at least 2 classes "
"in the data",
LogisticRegression(solver='sag').fit,
X, y)
def test_step_size_alpha_error():
X = [[0, 0], [0, 0]]
y = [1, -1]
fit_intercept = False
alpha = 1.
msg = ("Current sag implementation does not handle the case"
" step_size * alpha_scaled == 1")
clf1 = LogisticRegression(solver='sag', C=1. / alpha,
fit_intercept=fit_intercept)
assert_raise_message(ZeroDivisionError, msg, clf1.fit, X, y)
clf2 = Ridge(fit_intercept=fit_intercept, solver='sag', alpha=alpha)
assert_raise_message(ZeroDivisionError, msg, clf2.fit, X, y)
| bsd-3-clause |
Aryan-Barbarian/bigbang | bigbang/mailman.py | 2 | 9498 | from bigbang.parse import get_date
import urllib2
import urllib
import gzip
import re
import os
import mailbox
import parse
import pandas as pd
from pprint import pprint as pp
import w3crawl
import warnings
ml_exp = re.compile('/([\w-]*)/$')
gz_exp = re.compile('href="(\d\d\d\d-\w*\.txt\.gz)"')
ietf_ml_exp = re.compile('href="([\d-]+.mail)"')
w3c_archives_exp = re.compile('lists\.w3\.org')
mailing_list_path_expressions = [gz_exp, ietf_ml_exp]
class InvalidURLException(Exception):
def __init__(self, value):
self.value = value
def __str__(self):
return repr(self.value)
class MissingDataException(Exception):
def __init__(self, value):
self.value = value
def __str__(self):
return repr(self.value)
def load_data(name,archive_dir="archives",mbox=False):
"""
Loads the data associated with an archive name, given
as a string.
Attempts to open {archives-directory}/NAME.csv as data.
Failing that, if the the name is a URL, it will try to derive
the list name from that URL and load the .csv again.
Failing that, it will collect the data from the web and create the CSV archive.
"""
if mbox:
return open_list_archives(name, archive_dir=archive_dir, mbox=True)
# a first pass at detecting if the string is a URL...
if not (name.startswith("http://") or name.startswith("https://")):
path = os.path.join(archive_dir,name + ".csv")
if os.path.exists(path):
data = pd.read_csv(path)
return data
else:
print "No data available at %s" % (path)
else:
path = os.path.join(archive_dir,get_list_name(name) + ".csv")
if os.path.exists(path):
data = pd.read_csv(path)
return data
else:
print "No data found at %s. Attempting to collect data from URL." % (name)
print "This could take a while."
return collect_from_url(name,archive_dir=archive_dir)
def collect_from_url(url,archive_dir="archives"):
url = url.rstrip()
collect_archive_from_url(url)
unzip_archive(url)
data = open_list_archives(url)
# hard coding the archives directory in too many places
# need to push this default to a configuration file
path = os.path.join(archive_dir, get_list_name(url) + ".csv")
data.to_csv(path, ",", encoding="utf-8")
return data
def collect_from_file(urls_file):
for url in open(urls_file):
collect_from_url(url)
# gets the 'list name' from a canonical mailman archive url
# does nothing if it's not this kind of url
# it would be better to catch these non-url cases earlier
def get_list_name(url):
url = url.rstrip()
if ml_exp.search(url) is not None:
return ml_exp.search(url).groups()[0]
else:
warnings.warn("No mailing list name found at %s" % url)
return url
def archive_directory(base_dir, list_name):
arc_dir = os.path.join(base_dir, list_name)
if not os.path.exists(arc_dir):
os.makedirs(arc_dir)
return arc_dir
def collect_archive_from_url(url, archive_dir="archives"):
list_name = get_list_name(url)
pp("Getting archive page for %s" % list_name)
if w3c_archives_exp.search(url):
return w3crawl.collect_from_url(url, archive_dir)
response = urllib2.urlopen(url)
html = response.read()
results = []
for exp in mailing_list_path_expressions:
results.extend(exp.findall(html))
pp(results)
# directory for downloaded files
arc_dir = archive_directory(archive_dir, list_name)
# download monthly archives
for res in results:
result_path = os.path.join(arc_dir, res)
# this check is redundant with urlretrieve
if not os.path.isfile(result_path):
gz_url = url + res
pp('retrieving %s' % gz_url)
resp = urllib2.urlopen(gz_url)
if resp.getcode() == 200:
print("200 - writing file to %s" % (result_path))
output = open(result_path, 'wb')
output.write(resp.read())
output.close()
else:
print("%s error code trying to retrieve %s" %
(str(resp.getcode(), gz_url)))
def unzip_archive(url, archive_dir="archives"):
arc_dir = archive_directory(archive_dir, get_list_name(url))
gzs = [os.path.join(arc_dir, fn) for fn
in os.listdir(arc_dir)
if fn.endswith('.txt.gz')]
print 'unzipping %d archive files' % (len(gzs))
for gz in gzs:
try:
f = gzip.open(gz, 'rb')
content = f.read()
f.close()
txt_fn = str(gz[:-3])
f2 = open(txt_fn, 'w')
f2.write(content)
f2.close()
except Exception as e:
print e
# This works for the names of the files. Order them.
# datetime.datetime.strptime('2000-November',"%Y-%B")
# This doesn't yet work for parsing the dates. Because of %z Bullshit
# datetime.datetime.strptime(arch[0][0].get('Date'),"%a, %d %b %Y %H:%M:%S %z")
# x is a String, a Message, or a list of Messages
# The payload of a Message may be a String, a Message, or a list of Messages.
# OR maybe it's never just a Message, but always a list of them.
def recursive_get_payload(x):
if isinstance(x,str):
return x
elif isinstance(x,list):
#return [recursive_get_payload(y) for y in x]
return recursive_get_payload(x[0])
elif isinstance(x,email.message.Message):
return recursive_get_payload(x.get_payload())
else:
print x
return None
def open_list_archives(url, archive_dir="archives", mbox=False):
"""
Returns a list of all email messages contained in the specified directory.
The argument *url* here is taken to be the name of a subdirectory
of the directory specified in argument *archive_dir*.
This directory is expected to contain files with extensions .txt,
.mail, or .mbox. These files are all expected to be in mbox format--
i.e. a series of blocks of text starting with headers (colon-separated
key-value pairs) followed by an email body.
"""
messages = None
if mbox and (os.path.isfile(os.path.join(archive_dir, url))):
# treat string as the path to a file that is an mbox
box = mailbox.mbox(os.path.join(archive_dir, url), create=False)
messages = box.values()
else:
# assume string is the path to a directory with many
list_name = get_list_name(url)
arc_dir = archive_directory(archive_dir, list_name)
file_extensions = [".txt", ".mail", ".mbox"]
txts = [os.path.join(arc_dir, fn) for fn
in os.listdir(arc_dir)
if any([fn.endswith(extension) for extension in file_extensions])]
print 'Opening %d archive files' % (len(txts))
arch = [mailbox.mbox(txt, create=False).values() for txt in txts]
messages = [item for sublist in arch for item in sublist]
if len(messages) == 0:
raise MissingDataException(
("No messages in %s under %s. Did you run the "
"collect_mail.py script?") %
(archive_dir, list_name))
return messages_to_dataframe(messages)
def get_text(msg):
import chardet
text = u""
if msg.is_multipart():
html = None
for part in msg.walk():
if part.get_content_charset() is None:
charset = chardet.detect(str(part))['encoding']
else:
charset = part.get_content_charset()
if part.get_content_type() == 'text/plain':
text = unicode(part.get_payload(decode=True), str(charset), "ignore")
if part.get_content_type() == 'text/html':
html = unicode(part.get_payload(decode=True), str(charset), "ignore")
if text is not None:
return text.strip()
else:
import html2text
h = html2text.HTML2Text()
h.encoding = 'utf-8'
return unicode(h.handle(html))
else:
charset = msg.get_content_charset() or 'utf-8'
text = unicode(msg.get_payload(), encoding=charset, errors='ignore')
return text.strip()
def messages_to_dataframe(messages):
"""
Turn a list of parsed messages into a dataframe of message data,
indexed by message-id, with column-names from headers.
"""
def safe_unicode(t):
return t and unicode(t, 'utf-8', 'ignore')
# extract data into a list of tuples -- records -- with
# the Message-ID separated out as an index
pm = [(m.get('Message-ID'),
safe_unicode(m.get('From')),
safe_unicode(m.get('Subject')),
get_date(m),
safe_unicode(m.get('In-Reply-To')),
safe_unicode(m.get('References')),
get_text(m))
for m in messages if m.get('Message-ID')]
mdf = pd.DataFrame.from_records(list(pm),
index='Message-ID',
columns=['Message-ID', 'From',
'Subject',
'Date',
'In-Reply-To',
'References',
'Body'])
mdf.index.name = 'Message-ID'
return mdf
| gpl-2.0 |
toobaz/pandas | pandas/tests/io/pytables/test_compat.py | 2 | 2635 | import pytest
import pandas as pd
from pandas.tests.io.pytables.test_pytables import ensure_clean_path
from pandas.util.testing import assert_frame_equal
tables = pytest.importorskip("tables")
@pytest.fixture
def pytables_hdf5_file():
"""Use PyTables to create a simple HDF5 file."""
table_schema = {
"c0": tables.Time64Col(pos=0),
"c1": tables.StringCol(5, pos=1),
"c2": tables.Int64Col(pos=2),
}
t0 = 1561105000.0
testsamples = [
{"c0": t0, "c1": "aaaaa", "c2": 1},
{"c0": t0 + 1, "c1": "bbbbb", "c2": 2},
{"c0": t0 + 2, "c1": "ccccc", "c2": 10 ** 5},
{"c0": t0 + 3, "c1": "ddddd", "c2": 4294967295},
]
objname = "pandas_test_timeseries"
with ensure_clean_path("written_with_pytables.h5") as path:
# The `ensure_clean_path` context mgr removes the temp file upon exit.
with tables.open_file(path, mode="w") as f:
t = f.create_table("/", name=objname, description=table_schema)
for sample in testsamples:
for key, value in sample.items():
t.row[key] = value
t.row.append()
yield path, objname, pd.DataFrame(testsamples)
class TestReadPyTablesHDF5:
"""
A group of tests which covers reading HDF5 files written by plain PyTables
(not written by pandas).
Was introduced for regression-testing issue 11188.
"""
def test_read_complete(self, pytables_hdf5_file):
path, objname, df = pytables_hdf5_file
result = pd.read_hdf(path, key=objname)
expected = df
assert_frame_equal(result, expected)
def test_read_with_start(self, pytables_hdf5_file):
path, objname, df = pytables_hdf5_file
# This is a regression test for pandas-dev/pandas/issues/11188
result = pd.read_hdf(path, key=objname, start=1)
expected = df[1:].reset_index(drop=True)
assert_frame_equal(result, expected)
def test_read_with_stop(self, pytables_hdf5_file):
path, objname, df = pytables_hdf5_file
# This is a regression test for pandas-dev/pandas/issues/11188
result = pd.read_hdf(path, key=objname, stop=1)
expected = df[:1].reset_index(drop=True)
assert_frame_equal(result, expected)
def test_read_with_startstop(self, pytables_hdf5_file):
path, objname, df = pytables_hdf5_file
# This is a regression test for pandas-dev/pandas/issues/11188
result = pd.read_hdf(path, key=objname, start=1, stop=2)
expected = df[1:2].reset_index(drop=True)
assert_frame_equal(result, expected)
| bsd-3-clause |
maxplanck-ie/HiCExplorer | hicexplorer/hicPlotDistVsCounts.py | 1 | 17606 | import warnings
warnings.simplefilter(action="ignore", category=RuntimeWarning)
warnings.simplefilter(action="ignore", category=PendingDeprecationWarning)
import os.path
import numpy as np
import pandas as pd
import argparse
from hicmatrix import HiCMatrix
from hicexplorer._version import __version__
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
from collections import OrderedDict
from past.builtins import zip
from scipy.sparse import triu
import logging
log = logging.getLogger(__name__)
def parse_arguments(args=None):
parser = argparse.ArgumentParser(
add_help=False,
description='This program creates distance vs. Hi-C counts plots. It can use several matrix files to compare '
'them at once. If the `--perchr` option is given, each chromosome is plotted independently. '
'When plotting multiple matrices, denser matrices are scaled down to match the sum of the smallest matrix.')
parserRequired = parser.add_argument_group('Required arguments')
# define the arguments
parserRequired.add_argument('--matrices', '-m',
help='Hi-C normalized (corrected) matrices. Each path should be separated by a space.',
nargs="+",
required=True)
parserRequired.add_argument('--plotFile', '-o',
help='File name to save the file. The given file '
'ending will be used '
'to determine the image format. '
'The available options are: .png, .emf, '
'.eps, .pdf and .svg.',
type=argparse.FileType('w'),
metavar='file name',
required=True)
parserOpt = parser.add_argument_group('Optional arguments')
parserOpt.add_argument('--labels',
help='Label to assign to each matrix file. Each label should be separated by a space. Quote '
'labels that contain spaces: E.g. --labels label1 "labels 2". If no labels are given '
'then the file name is used.',
nargs="+")
parserOpt.add_argument('--skipDiagonal', '-s',
help='If set, diagonal counts are not included.',
action='store_true')
parserOpt.add_argument('--maxdepth',
help='Maximum distance from diagonal to use. In other words, distances up to maxDepth are '
'computed. Default is 3 million bp.',
metavar='INT bp',
type=int,
default=int(3e6))
parserOpt.add_argument('--perchr',
help='If given, computes and display distance versus Hi-C counts plots for each chromosome stored '
'in the matrices passed to --matrices.',
action='store_true')
parserOpt.add_argument('--chromosomeExclude',
help='Exclude the given list of chromosomes. This is useful for example to exclude '
'the Y chromosome. The names of the chromosomes should be separated by space.',
nargs='+')
parserOpt.add_argument('--outFileData',
help='If given, the data underlying the plots is saved on this file.',
type=argparse.FileType('w'),
)
parserOpt.add_argument('--plotsize',
help='Width and height of the plot (in inches). Default is 6*number of cols, 4 * number of '
'rows. The maximum number of rows is 4. Example: --plotsize 6 5',
nargs=2,
type=float
)
parserOpt.add_argument('--help', '-h', action='help', help='show this help message and exit')
parserOpt.add_argument('--version', action='version',
version='%(prog)s {}'.format(__version__))
return parser
def compute_distance_mean(hicmat, maxdepth=None, perchr=False):
"""
Converts a corrected counts matrix into a
obs / expected matrix or z-scores fast.
The caveat is that the obs/exp or z-score are only
computed for non-zero values, although zero values that
are not part of the sparse matrix are considered.
For each diagonal the mean (and std when computing z-scores) are
calculated and then each non-zero value of the sparse matrix is
replaced by the obs/exp or z-score.
Parameters
----------
hicmat: HiCMatrix object
maxdepth: maximum distance from the diagonal to consider. All contacts beyond this distance will not
be considered.
perchr: bool to indicate if computations should be perform per chromosome
Returns
-------
observed / expected sparse matrix
>>> from scipy.sparse import csr_matrix, dia_matrix
>>> row, col = np.triu_indices(5)
>>> cut_intervals = [('a', 0, 10, 1), ('a', 10, 20, 1),
... ('a', 20, 30, 1), ('a', 30, 40, 1), ('b', 40, 50, 1)]
>>> hic = HiCMatrix.hiCMatrix()
>>> hic.nan_bins = []
>>> matrix = np.array([
... [ 1, 8, 5, 3, 0],
... [ 0, 4, 15, 5, 1],
... [ 0, 0, 0, 7, 2],
... [ 0, 0, 0, 0, 1],
... [ 0, 0, 0, 0, 0]])
>>> hic.matrix = csr_matrix(matrix)
>>> hic.setMatrix(hic.matrix, cut_intervals)
>>> hic.convert_to_obs_exp_matrix().todense()
matrix([[1. , 0.8, 1. , 1. , 0. ],
[0. , 4. , 1.5, 1. , 1. ],
[0. , 0. , 0. , 0.7, 2. ],
[0. , 0. , 0. , 0. , 1. ],
[0. , 0. , 0. , 0. , 0. ]])
>>> hic.matrix = csr_matrix(matrix)
>>> hic.convert_to_obs_exp_matrix(maxdepth=20).todense()
matrix([[1. , 0.8, 1. , 0. , 0. ],
[0. , 4. , 1.5, 1. , 0. ],
[0. , 0. , 0. , 0.7, nan],
[0. , 0. , 0. , 0. , nan],
[0. , 0. , 0. , 0. , 0. ]])
>>> hic.matrix = csr_matrix(matrix)
>>> hic.convert_to_obs_exp_matrix(zscore=True).todense()
matrix([[ 0. , -0.56195149, nan, nan, -1.41421356],
[ 0. , 1.93649167, 1.40487872, nan, 0. ],
[ 0. , 0. , -0.64549722, -0.84292723, 1.41421356],
[ 0. , 0. , 0. , -0.64549722, 0. ],
[ 0. , 0. , 0. , 0. , -0.64549722]])
nans occur where the standard deviation is zero
"""
binsize = hicmat.getBinSize()
if maxdepth:
if maxdepth < binsize:
exit("Please specify a maxDepth larger than bin size ({})".format(binsize))
max_depth_in_bins = int(float(maxdepth * 1.5) / binsize)
# work only with the upper matrix
# and remove all pixels that are beyond
# max_depth_in_bis
# (this is done by subtracting a second sparse matrix
# that contains only the upper matrix that wants to be removed.
hicmat.matrix = triu(hicmat.matrix, k=0, format='csr') - \
triu(hicmat.matrix, k=max_depth_in_bins, format='csr')
else:
hicmat.matrix = triu(hicmat.matrix, k=0, format='csr')
hicmat.matrix.eliminate_zeros()
chr_submatrix = OrderedDict()
cut_intervals = OrderedDict()
chrom_sizes = OrderedDict()
chrom_range = OrderedDict()
if perchr:
for chrname in hicmat.getChrNames():
chr_range = hicmat.getChrBinRange(chrname)
chr_submatrix[chrname] = hicmat.matrix[chr_range[0]:chr_range[1], chr_range[0]:chr_range[1]].tocoo()
cut_intervals[chrname] = [hicmat.cut_intervals[x] for x in range(chr_range[0], chr_range[1])]
chrom_sizes[chrname] = [chr_submatrix[chrname].shape[0]]
chrom_range[chrname] = (chr_range[0], chr_range[1])
else:
chr_submatrix['all'] = hicmat.matrix.tocoo()
cut_intervals['all'] = hicmat.cut_intervals
chrom_sizes['all'] = np.array([v[1] - v[0] for k, v in hicmat.chrBinBoundaries.items()])
chrom_range['all'] = (0, hicmat.matrix.shape[0])
mean_dict = {}
for chrname, submatrix in chr_submatrix.items():
log.info("processing chromosome {}\n".format(chrname))
dist_list, chrom_list = hicmat.getDistList(submatrix.row, submatrix.col, HiCMatrix.hiCMatrix.fit_cut_intervals(cut_intervals[chrname]))
# to get the sum of all values at a given distance I use np.bincount which
# is quite fast. However, the input of bincount is positive integers. Moreover
# it returns the sum for every consecutive integer, even if this is not on the list.
# Thus, dist_list, which contains the distance in bp between any two bins is
# converted to bin distance.
# Because positive integers are needed we add +1 to all bin distances
# such that the value of -1 (which means different chromosomes) can now be used
dist_list[dist_list == -1] = -binsize
# divide by binsize to get a list of bin distances and add +1 to remove negative values
dist_list = (np.array(dist_list).astype(float) / binsize).astype(int) + 1
# for each distance, return the sum of all values
sum_counts = np.bincount(dist_list, weights=submatrix.data)
distance_len = np.bincount(dist_list)
# compute the average for each distance
mat_size = submatrix.shape[0]
# compute mean value for each distance
mu = {}
zero_value_bins = []
for bin_dist_plus_one, sum_value in enumerate(sum_counts):
if maxdepth and bin_dist_plus_one == 0: # this is for intra chromosomal counts
# when max depth is set, the computation
# of the total_intra is not accurate and is safer to
# output np.nan
mu[bin_dist_plus_one] = np.nan
continue
if bin_dist_plus_one == 0:
total_intra = mat_size ** 2 - sum([size ** 2 for size in chrom_sizes[chrname]])
diagonal_length = total_intra / 2
else:
# to compute the average counts per distance we take the sum_counts and divide
# by the number of values on the respective diagonal
# which is equal to the size of each chromosome - the diagonal offset (for those
# chromosome larger than the offset)
# In the following example with two chromosomes
# the first (main) diagonal has a size equal to the matrix (6),
# while the next has 1 value less for each chromosome (4) and the last one has only 2 values
# 0 1 2 . . .
# - 0 1 . . .
# - - 0 . . .
# . . . 0 1 2
# . . . - 0 1
# . . . - - 0
# idx - 1 because earlier the values where
# shifted.
diagonal_length = sum([size - (bin_dist_plus_one - 1) for size in chrom_sizes[chrname]
if size > (bin_dist_plus_one - 1)])
# the diagonal length should contain the number of values at a certain distance.
# If the matrix is dense, the distance_len[bin_dist_plus_one] correctly contains the number of values
# If the matrix is equally spaced, then, the diagonal_length as computed before is accurate.
# But, if the matrix is both sparse and with unequal bins, then none of the above methods is
# accurate but the the diagonal_length as computed before will be closer.
diagonal_length = max(diagonal_length, distance_len[bin_dist_plus_one])
if diagonal_length == 0:
mu[bin_dist_plus_one] = np.nan
else:
mu[bin_dist_plus_one] = np.float64(sum_value) / diagonal_length
if sum_value == 0:
zero_value_bins.append(bin_dist_plus_one)
log.info("zero value for {}, diagonal len: {}\n".format(bin_dist_plus_one, diagonal_length))
if len(zero_value_bins) > 10:
diff = np.diff(zero_value_bins)
if len(diff[diff == 1]) > 10:
# if too many consecutive bins with zero are found that means that probably no
# further counts will be found
log.info("skipping rest of chromosome {}. Too many emtpy diagonals\n".format(chrname))
break
if np.isnan(sum_value):
log.info("nan value found for distance {}\n".format((bin_dist_plus_one - 1) * binsize))
if maxdepth is None:
maxdepth = np.inf
mean_dict[chrname] = OrderedDict([((k - 1) * binsize, v) for k, v in mu.items() if k > 0 and
(k - 1) * binsize <= maxdepth])
# mean_dict[chrname]['intra_chr'] = mu[0]
return mean_dict
def main(args=None):
"""
for each distance, compare the
distribution of two samples,
report number of cases were they differ
"""
args = parse_arguments().parse_args(args)
mean_dict = OrderedDict()
matrix_sum = {}
if args.labels is None:
labels = OrderedDict([(x, os.path.basename(x)) for x in args.matrices])
else:
labels = OrderedDict(zip(args.matrices, args.labels))
chroms = set()
for matrix_file in args.matrices:
hic_ma = HiCMatrix.hiCMatrix(matrix_file)
matrix_sum[matrix_file] = hic_ma.matrix.sum()
if args.chromosomeExclude is None:
args.chromosomeExclude = []
chrtokeep = [x for x in list(hic_ma.interval_trees) if x not in args.chromosomeExclude]
hic_ma.keepOnlyTheseChr(chrtokeep)
mean_dict[matrix_file] = compute_distance_mean(hic_ma, maxdepth=args.maxdepth, perchr=args.perchr)
chroms = chroms.union([k for k in list(mean_dict[matrix_file]) if len(mean_dict[matrix_file][k]) > 1])
# compute scale factors such that values are comparable
min_sum = min(matrix_sum.values())
scale_factor = dict([(matrix_file, float(min_sum) / mat_sum) for matrix_file, mat_sum in matrix_sum.items()])
log.info("The scale factors used are: {}".format(scale_factor))
if len(args.matrices) > 1 and args.perchr:
# in this case, for each chromosome a plot is made that combines the data from the
# hic matrices
max_cols = 4
num_rows = int(np.ceil(float(len(chroms)) / max_cols))
num_cols = min(len(chroms), max_cols)
else:
num_cols = num_rows = 1
if args.plotsize is None:
width = 6
height = 4
else:
width, height = args.plotsize
fig = plt.figure(figsize=(width * num_cols, height * num_rows))
axs = np.empty((num_rows, num_cols), dtype='object')
for matrix_file in args.matrices:
idx = 0
for chrom, mean_values in mean_dict[matrix_file].items():
if len(mean_values) <= 1:
log.debug("No values found for: {}, chromosome: {}\n".format(matrix_file, chrom))
continue
x, y = zip(*[(k, v) for k, v in mean_values.items() if v > 0])
if len(x) <= 1:
log.debug("No values found for: {}, chromosome: {}\n".format(matrix_file, chrom))
continue
if args.perchr and len(args.matrices) == 1:
col = 0
row = 0
else:
col = idx % num_cols
row = idx // num_cols
if axs[row, col] is None:
ax = plt.subplot2grid((num_rows, num_cols), (row, col))
ax.set_xlabel('genomic distance')
ax.set_ylabel('corrected Hi-C counts')
try:
ax.set_yscale('log')
ax.set_xscale('log')
except ValueError:
continue
else:
ax = axs[row, col]
y = np.array(y) * scale_factor[matrix_file]
if args.perchr and len(args.matrices) > 1:
label = labels[matrix_file]
ax.set_title(chrom)
elif args.perchr:
label = chrom
else:
label = labels[matrix_file]
ax.plot(x, y, label=label)
axs[row, col] = ax
idx += 1
if args.outFileData is not None:
x_vals = np.stack(x).T
y_vals = np.stack(y).T
table_to_export = pd.DataFrame({'Matrix': labels[matrix_file],
'Chromosome': chrom,
'Distance': x_vals,
'Contacts': y_vals})
table_to_export.to_csv(args.outFileData, sep='\t')
for ax in axs.reshape(-1):
if ax is None:
continue
ax.legend(prop={'size': 'small'})
ax.set_xlim(0, args.maxdepth)
handles, labels = ax.get_legend_handles_labels()
lgd = ax.legend(handles, labels, loc='center left', bbox_to_anchor=(1, 0.5))
plt.tight_layout()
plt.savefig(args.plotFile.name, bbox_inches='tight', bbox_extra_artists=(lgd,))
plt.close(fig)
| gpl-2.0 |
scholz/toyclassifiers | toykmeans.py | 1 | 6423 | #!/usr/bin/python2.7
from abstractclassifier import AbstractClassifier
import numpy as np
import random
class ToyKMeans(AbstractClassifier):
"""Toy KMeans unsupervised clustering
Algorithm
---------
1. Random intialization of centroids (cluster centers), or use provided centers
2. Computation of squared euclidean distance between clusters and all instances in train data
3. Assign each instance to closest centroid
4. Handle centroids which have no assigned data instances
a. assign those data instances which have the largest distance to the other centroids to empty centroids
b. set centroids coordinates to these data instances
Used Documentation
---------------------------
- Book: Collective Intelligence, Segaran, O'Reilly
- Sklearn KMeans Source
Attributes
----------
"""
def __init__(self, n_clusters, n_iterations=2, seed=None, centroids=[]):
self.n_clusters_=n_clusters
self.n_iterations_=n_iterations
self.centroids_=np.array(centroids)
self.inertia_=0
random.seed(seed)
def fit(self, train_data, metric=None):
"""Run the K-means clustering on train_data
Parameters
----------
train_data : array-like, shape (n_samples, n_features)
Returns
-------
clusters : array-like, shape (n_clusters, n_features)
"""
if metric==None:
self.metric=self.euclidean_sqr
if self.centroids_.shape[0]==0:
centroids=self.random_init(train_data)
else:
centroids=self.centroids_
# remove mean from data
#train_data_mean=np.mean(train_data,axis=0)
#train_data=train_data-train_data_mean
# row norms??
#train_data_sqr_norms = np.einsum('ij,ij->i', train_data, train_data)
old_centroids=np.zeros(centroids.shape)
# iterate until no change in cluster centers or defined number of iterations is reached
n_iterations=self.n_iterations_
while n_iterations>0 and np.array_equal(centroids,old_centroids)==False:
n_iterations-=1
old_centroids=centroids
centroids=self.fit_iteration(train_data, centroids)
self.centroids_=centroids
return centroids
def fit_iteration(self, train_data, centroids):
train_data_centroid_idx=np.zeros( (train_data.shape[0],2) )
dist=0.
for i in range(train_data.shape[0]):
dists=[]
for c in range(centroids.shape[0]):
# compute distance between current train_data instance and centroid
dists.append( self.metric( instance=train_data[i,:], centroid=centroids[c,:]) )
# assign instance to closest centroid
train_data_centroid_idx[i,:]=np.array([ dists.index(min(dists)), min(dists)])
# inertia i.e. total distance
dist+=min(dists)
self.inertia_=dist
# extract instances with largest distances
distances=train_data_centroid_idx[:,1]
distances_idx=distances.argsort()[::-1]
# new centroid positions
new_centroids=np.zeros(centroids.shape)
# centroids with no assigned points are assigned the farthest points from the other centroids
# note this allows that this point is attributed to two centroids
pc=0
for c in range(centroids.shape[0]):
if c not in train_data_centroid_idx[:,1] and pc<train_data.shape[0]:
new_centroids[c,:]=train_data[distances_idx[pc],:]
pc+=1
# move clusters such that the distance to all assigned points is minimized
for c in range(centroids.shape[0]):
points_of_centroid=train_data[train_data_centroid_idx[:,0]==c,:]
if points_of_centroid.shape[0]>0:
new_centroids[c,:]=np.mean(points_of_centroid,axis=0)
return new_centroids
def predict(self, test_data):
""" Predict to which clusters the instances in test_data belong
"""
if self.centroids_.shape[0]==0:
raise ValueError("No centroids present. Run KMeans.fit first.")
print test_data.shape
part_of_cluster=np.zeros(test_data.shape[0])
for i in range(test_data.shape[0]):
dists=[]
for c in range(self.centroids_.shape[0]):
# compute distance between current train_data instance and each cluster
dists.append( self.metric( instance=test_data[i,:], centroid=self.centroids_[c,:]) )
# assign point to cluster with minimal distance
part_of_cluster[i]=dists.index(min(dists))
return part_of_cluster
def random_init(self, train_data):
"""Provides random initialization of clusters using dimension of train_data
Intializes self.n_clusters_ for each dimension randomly using the ranges
used by the features in the data set
Parameters
----------
train_data : array-like, shape (n_samples, n_features)
Returns
-------
centroids: array-like, shape (n_clusters, n_features)
"""
centroids=np.zeros((self.n_clusters_, train_data.shape[1]))
for c in range(self.n_clusters_):
for f in range(train_data.shape[1]):
centroids[c,f]=random.uniform(min(train_data[:,f]), max(train_data[:,f]))
return centroids
def euclidean_sqr(self, instance, centroid):
""" calculate euclidean distance between instance and cluster
"""
return np.linalg.norm(instance-centroid)**2
if __name__=="__main__":
data=np.array([ [1,1], [1.5,1.5], [1,1.5],
[4,4], [4.5,4.5], [1,1.7]], dtype=np.float)
centroids=[[1,1], [2,2],[3,3]]
#data=np.array([ [1], [1.5], [1.7],
# [4], [4.5], [1.2]], dtype=np.float)
#centroids=[[1], [2],[200]]
km=ToyKMeans(n_clusters=3, centroids=centroids)
print "toykmeans"
print km.fit(data)
#print km.predict(np.array([data[3]]))
from sklearn.cluster import KMeans
skm=KMeans(n_clusters=3,init=np.array(centroids), max_iter=2, n_init=1, verbose=False)
skm.fit(data)
print "skm"
print skm.cluster_centers_
print skm.labels_
skm.predict(np.array([data[3]]))
| mit |
dimarkov/pyBefit | inference/hierarchical.py | 1 | 15503 | """This module contains the hierarchical implementations of the parametric model.
"""
import pandas as pd
import torch
from torch import zeros, ones
from torch.distributions import constraints, biject_to
import pyro.distributions as dist
from pyro import sample, param, plate, deterministic
from pyro.distributions.util import sum_rightmost
from pyro.ops.indexing import Vindex
from .infer import Inferrer
__all__ = [
'Horseshoe',
'NormalGamma',
'NormalGammaHierarch'
]
class Horseshoe(Inferrer):
def __init__(self, agent, stimulus, responses, mask=None, fixed_params=None):
super(Horseshoe, self).__init__(agent, stimulus, responses, mask=mask, fixed_params=fixed_params)
def model(self):
"""
Generative model of behavior with a hierarchical (horseshoe)
prior over free model parameters.
"""
runs = self.runs # number of independent runs of experiment
npar = self.npar # number of parameters
# define hyper priors over model parameters.
# each model parameter has a hyperpriors defining group level mean
m = param('m', zeros(npar))
s = param('s', ones(npar), constraint=constraints.positive)
mu = sample('mu', dist.Normal(m, s).to_event(1))
# define prior uncertanty over model parameters and subjects
lam = param('lam', ones(1), constraint=constraints.positive)
tau = sample('tau', dist.HalfCauchy(ones(npar)).to_event(1))
# define prior mean over model parametrs and subjects
with plate('runs', runs):
base_dist = dist.Normal(0., 1.).expand_by([npar])
transform = dist.transforms.AffineTransform(mu, lam * tau)
locs = sample('locs', dist.TransformedDistribution(base_dist, [transform]).to_event(1))
if self.fixed_values:
x = zeros(runs, self.agent.npar)
x[:, self.locs['fixed']] = self.values
x[:, self.locs['free']] = locs
else:
x = locs
self.agent.set_parameters(x)
for b in range(self.nb):
for t in range(self.nt):
# update single trial
offers = self.stimulus['offers'][b, t]
self.agent.planning(b, t, offers)
outcomes = self.stimulus['outcomes'][b, t]
responses = self.responses[b, t]
mask = self.stimulus['mask'][b, t]
self.agent.update_beliefs(b, t, [responses, outcomes], mask=mask)
logits = self.agent.logits[-1]
sample('obs_{}_{}'.format(b, t),
dist.Categorical(logits=logits).mask(self.notnans[b, t]),
obs=responses)
def guide(self):
"""Approximate posterior for the horseshoe prior. We assume posterior in the form
of the multivariate normal distriburtion for the global mean and standard deviation
and multivariate normal distribution for the parameters of each subject independently.
"""
nsub = self.runs # number of subjects
npar = self.npar # number of parameters
trns = biject_to(constraints.positive)
m_hyp = param('m_hyp', zeros(2 * npar))
st_hyp = param('scale_tril_hyp', torch.eye(2 * npar), constraint=constraints.lower_cholesky)
hyp = sample('hyp',
dist.MultivariateNormal(m_hyp, scale_tril=st_hyp),
infer={'is_auxiliary': True})
unc_mu = hyp[..., :npar]
unc_tau = hyp[..., npar:]
c_tau = trns(unc_tau)
ld_tau = trns.inv.log_abs_det_jacobian(c_tau, unc_tau)
ld_tau = sum_rightmost(ld_tau, ld_tau.dim() - c_tau.dim() + 1)
sample("mu", dist.Delta(unc_mu, event_dim=1))
sample("tau", dist.Delta(c_tau, log_density=ld_tau, event_dim=1))
m_locs = param('m_locs', zeros(nsub, npar))
st_locs = param('scale_tril_locs',
torch.eye(npar).repeat(nsub, 1, 1),
constraint=constraints.lower_cholesky)
with plate('runs', nsub):
sample("locs", dist.MultivariateNormal(m_locs, scale_tril=st_locs))
def _get_quantiles(self, quantiles):
"""
Returns posterior quantiles each for latent variable.
:param quantiles: A list of requested quantiles between 0 and 1.
:type quantiles: torch.Tensor or list
:return: A dict mapping sample site name to a list of quantile values.
:rtype: dict
"""
self.means = [param('m_locs'), param('m_hyp')]
self.stds = [param('scale_tril_hyp'), param('scale_tril_locs')]
quantiles = torch.tensor(quantiles).reshape(1, 3)
m_locs = param('m_locs').reshape(-1, 1)
s_locs = param('scale_tril_locs').diagonal(dim1=-2, dim2=-1).reshape(-1, 1)
latents = dist.Normal(m_locs, s_locs).icdf(quantiles).reshape(self.runs, -1, 3)
result = {'locs': latents}
m_hyp = param('m_hyp').reshape(-1, 1)
s_hyp = param('scale_tril_hyp').diagonal(dim1=-2, dim2=-1).reshape(-1, 1)
latents = dist.Normal(m_hyp, s_hyp).icdf(quantiles).reshape(-1, 1)
result['mu'] = latents[:self.npar]
result['tau'] = latents[self.npar:].exp()
return result
class NormalGamma(Inferrer):
def __init__(self, agent, stimulus, responses, mask=None, fixed_params=None):
super(NormalGamma, self).__init__(agent, stimulus, responses, mask=mask, fixed_params=fixed_params)
def model(self):
"""
Generative model of behavior with a hierarchical (horseshoe)
prior over free model parameters.
"""
runs = self.runs # number of independent runs of experiment
npar = self.npar # number of parameters
# define hyper priors over model parameters
a = param('a', ones(npar), constraint=constraints.positive)
lam = param('lam', ones(npar), constraint=constraints.positive)
tau = sample('tau', dist.Gamma(a, a/lam).to_event(1))
sig = 1/torch.sqrt(tau)
# each model parameter has a hyperpriors defining group level mean
m = param('m', zeros(npar))
s = param('s', ones(npar), constraint=constraints.positive)
mu = sample('mu', dist.Normal(m, s*sig).to_event(1))
# define prior mean over model parametrs and subjects
with plate('runs', runs):
base_dist = dist.Normal(0., 1.).expand_by([npar])
transform = dist.transforms.AffineTransform(mu, sig)
locs = sample('locs', dist.TransformedDistribution(base_dist, [transform]).to_event(1))
if self.fixed_values:
x = zeros(locs.shape[:-1] + (self.agent.npar,))
x[..., self.locs['fixed']] = self.values
x[..., self.locs['free']] = locs
else:
x = locs
self.agent.set_parameters(x)
for b in range(self.nb):
for t in range(self.nt):
# update single trial
offers = self.stimulus['offers'][b, t]
self.agent.planning(b, t, offers)
outcomes = self.stimulus['outcomes'][b, t]
responses = self.responses[b, t]
mask = self.stimulus['mask'][b, t]
self.agent.update_beliefs(b, t, [responses, outcomes], mask=mask)
mask = self.notnans[b, t]
logits = self.agent.logits[-1]
sample('obs_{}_{}'.format(b, t),
dist.Categorical(logits=logits).mask(mask),
obs=responses)
def guide(self):
"""Approximate posterior for the horseshoe prior. We assume posterior in the form
of the multivariate normal distriburtion for the global mean and standard deviation
and multivariate normal distribution for the parameters of each subject independently.
"""
nsub = self.runs # number of subjects
npar = self.npar # number of parameters
trns = biject_to(constraints.positive)
m_hyp = param('m_hyp', zeros(2*npar))
st_hyp = param('scale_tril_hyp',
torch.eye(2*npar),
constraint=constraints.lower_cholesky)
hyp = sample('hyp',
dist.MultivariateNormal(m_hyp, scale_tril=st_hyp),
infer={'is_auxiliary': True})
unc_mu = hyp[..., :npar]
unc_tau = hyp[..., npar:]
c_tau = trns(unc_tau)
ld_tau = trns.inv.log_abs_det_jacobian(c_tau, unc_tau)
ld_tau = sum_rightmost(ld_tau, ld_tau.dim() - c_tau.dim() + 1)
mu = sample("mu", dist.Delta(unc_mu, event_dim=1))
tau = sample("tau", dist.Delta(c_tau, log_density=ld_tau, event_dim=1))
m_locs = param('m_locs', zeros(nsub, npar))
st_locs = param('scale_tril_locs',
torch.eye(npar).repeat(nsub, 1, 1),
constraint=constraints.lower_cholesky)
with plate('runs', nsub):
locs = sample("locs", dist.MultivariateNormal(m_locs, scale_tril=st_locs))
return {'tau': tau, 'mu': mu, 'locs': locs}
def _get_quantiles(self, quantiles):
"""
Returns posterior quantiles each latent variable.
:param quantiles: A list of requested quantiles between 0 and 1.
:type quantiles: torch.Tensor or list
:return: A dict mapping sample site name to a list of quantile values.
:rtype: dict
"""
self.means = [param('m_locs'), param('m_hyp')]
self.stds = [param('scale_tril_hyp'), param('scale_tril_locs')]
quantiles = torch.tensor(quantiles).reshape(1, 3)
m_locs = param('m_locs').reshape(-1, 1)
s_locs = param('scale_tril_locs').diagonal(dim1=-2, dim2=-1).reshape(-1, 1)
latents = dist.Normal(m_locs, s_locs).icdf(quantiles).reshape(self.runs, -1, 3)
result = {'locs': latents}
m_hyp = param('m_hyp').reshape(-1, 1)
s_hyp = param('scale_tril_hyp').diagonal(dim1=-2, dim2=-1).reshape(-1, 1)
latents = dist.Normal(m_hyp, s_hyp).icdf(quantiles).reshape(-1, 1)
result['mu'] = latents[:self.npar]
result['tau'] = latents[self.npar:].exp()
return result
class NormalGammaHierarch(Inferrer):
def __init__(self, agent, stimulus, responses, mask=None, fixed_params=None, dim=10):
self.dim = dim
super(NormalGammaHierarch, self).__init__(agent, stimulus, responses, mask=mask, fixed_params=fixed_params)
def model(self):
"""
Generative model of behavior with a hierarchical (horseshoe)
prior over free model parameters.
"""
nsub = self.runs # number of independent runs of experiment
npar = self.npar # number of parameters
# define hyper priors over model parameters
a = param('a', ones(npar), constraint=constraints.positive)
lam = param('lam', ones(npar), constraint=constraints.positive)
tau = sample('tau', dist.Gamma(a, a/lam).to_event(1))
sig = 1/torch.sqrt(tau)
# each model parameter has a hyperpriors defining group level mean
m = param('m', zeros(npar))
s = param('s', ones(npar), constraint=constraints.positive)
mu = sample('mu', dist.Normal(m, s*sig).to_event(1))
# define prior mean over model parametrs and subjects
with plate('runs', nsub):
base_dist = dist.Normal(0., 1.).expand_by([npar])
transform = dist.transforms.AffineTransform(mu, sig)
locs = sample('locs', dist.TransformedDistribution(base_dist, [transform]).to_event(1))
if self.fixed_values:
x = zeros(locs.shape[:-1] + (self.agent.npar,))
x[..., self.locs['fixed']] = self.values
x[..., self.locs['free']] = locs
else:
x = locs
self.agent.set_parameters(x)
for b in range(self.nb):
for t in range(self.nt):
# update single trial
offers = self.stimulus['offers'][b, t]
self.agent.planning(b, t, offers)
outcomes = self.stimulus['outcomes'][b, t]
responses = self.responses[b, t]
mask = self.stimulus['mask'][b, t]
self.agent.update_beliefs(b, t, [responses, outcomes], mask=mask)
mask = self.notnans[b, t]
logits = self.agent.logits[-1]
sample('obs_{}_{}'.format(b, t),
dist.Categorical(logits=logits).mask(mask),
obs=responses)
def guide(self):
"""Approximate posterior for the horseshoe prior. We assume posterior in the form
of the multivariate normal distriburtion for the global mean and standard deviation
and multivariate normal distribution for the parameters of each subject independently.
"""
nsub = self.runs # number of subjects
npar = self.npar # number of parameters
dim = self.dim # number of classes
trns = biject_to(constraints.positive)
logits = param('logits', zeros(dim))
m_hyp = param('m_hyp', .1*torch.randn(dim, 2*npar))
std_hyp = param('scale_tril_hyp', ones(dim, 2*npar), constraint=constraints.positive)
hyp = sample('hyp', dist.MixtureOfDiagNormals(m_hyp, std_hyp, logits), infer={'is_auxiliary': True})
unc_mu = hyp[..., :npar]
unc_tau = hyp[..., npar:]
c_tau = trns(unc_tau)
ld_tau = trns.inv.log_abs_det_jacobian(c_tau, unc_tau)
ld_tau = sum_rightmost(ld_tau, ld_tau.dim() - c_tau.dim() + 1)
sample("mu", dist.Delta(unc_mu, event_dim=1))
sample("tau", dist.Delta(c_tau, log_density=ld_tau, event_dim=1))
m_locs = param('m_locs', zeros(nsub, npar))
st_locs = param('scale_tril_locs',
torch.eye(npar).repeat(nsub, 1, 1),
constraint=constraints.lower_cholesky)
with plate('runs', nsub):
sample("locs", dist.MultivariateNormal(m_locs, scale_tril=st_locs))
def _get_quantiles(self, quantiles):
"""
Returns posterior quantiles each latent variable.
:param quantiles: A list of requested quantiles between 0 and 1.
:type quantiles: torch.Tensor or list
:return: A dict mapping sample site name to a list of quantile values.
:rtype: dict
"""
self.means = [param('m_locs'), param('m_hyp')]
self.stds = [param('scale_tril_hyp'), param('scale_tril_locs')]
quantiles = torch.tensor(quantiles).reshape(1, 3)
m_locs = param('m_locs').reshape(-1, 1)
s_locs = param('scale_tril_locs').diagonal(dim1=-2, dim2=-1).reshape(-1, 1)
latents = dist.Normal(m_locs, s_locs).icdf(quantiles).reshape(self.runs, -1, 3)
result = {'locs': latents}
m_hyp = param('m_hyp').reshape(-1, 1)
s_hyp = param('scale_tril_hyp').diagonal(dim1=-2, dim2=-1).reshape(-1, 1)
latents = dist.Normal(m_hyp, s_hyp).icdf(quantiles).reshape(-1, 1)
result['mu'] = latents[:self.npar]
result['tau'] = latents[self.npar:].exp()
return result | mit |
lordkman/burnman | examples/example_perplex.py | 5 | 2010 | # This file is part of BurnMan - a thermoelastic and thermodynamic toolkit for the Earth and Planetary Sciences
# Copyright (C) 2012 - 2015 by the BurnMan team, released under the GNU
# GPL v2 or later.
"""
example_perplex
---------------
This minimal example demonstrates how burnman can be used to read and interrogate
a PerpleX tab file (as produced by burnman/misc/create_burnman_readable_perplex_table.py
It also demonstrates how we can smooth a given property on a given P-T grid.
*Uses:*
* :doc:`PerplexMaterial`
* :func:`burnman.material.Material.evaluate`
* :func:`burnman.tools.smooth_array`
*Demonstrates:*
* Use of PerplexMaterial
* Smoothing gridded properties
"""
import sys
import os
import numpy as np
sys.path.insert(1, os.path.abspath('..'))
import burnman
import matplotlib.pyplot as plt
rock=burnman.PerplexMaterial('../burnman/data/input_perplex/in23_1.tab')
P = 1.e9
T = 1650.
rock.set_state(P, T)
print('P: {0:.1f} GPa, T: {1:.1f} K, density: {2:.1f} kg/m^3'.format(P/1.e9, T, rock.rho))
pressures = np.linspace(10.e9, 25.e9, 151)
temperatures = [T] * len(pressures)
densities = rock.evaluate(['rho'], pressures, temperatures)[0]
plt.plot(pressures/1.e9, densities)
plt.xlabel('Pressure (GPa)')
plt.ylabel('Density (kg/m^3)')
plt.show()
pressures = np.linspace(10.e9, 25.e9, 151)
temperatures = np.linspace(1600., 1800., 3)
T = 1650.
entropies = rock.evaluate(['S'], pressures, np.array([T] * len(pressures)))[0]
smoothed_entropies = burnman.tools.smooth_array(array=entropies,
grid_spacing=np.array([pressures[1] - pressures[0]]),
gaussian_rms_widths = np.array([5.e8]))
plt.plot(pressures/1.e9, entropies, label='entropies')
plt.plot(pressures/1.e9, smoothed_entropies, label='smoothed entropies')
plt.xlabel('Pressure (GPa)')
plt.ylabel('Entropy (J/K/mol)')
plt.legend(loc='upper right')
plt.show()
| gpl-2.0 |
bthirion/scikit-learn | examples/svm/plot_rbf_parameters.py | 20 | 8048 | '''
==================
RBF SVM parameters
==================
This example illustrates the effect of the parameters ``gamma`` and ``C`` of
the Radial Basis Function (RBF) kernel SVM.
Intuitively, the ``gamma`` parameter defines how far the influence of a single
training example reaches, with low values meaning 'far' and high values meaning
'close'. The ``gamma`` parameters can be seen as the inverse of the radius of
influence of samples selected by the model as support vectors.
The ``C`` parameter trades off misclassification of training examples against
simplicity of the decision surface. A low ``C`` makes the decision surface
smooth, while a high ``C`` aims at classifying all training examples correctly
by giving the model freedom to select more samples as support vectors.
The first plot is a visualization of the decision function for a variety of
parameter values on a simplified classification problem involving only 2 input
features and 2 possible target classes (binary classification). Note that this
kind of plot is not possible to do for problems with more features or target
classes.
The second plot is a heatmap of the classifier's cross-validation accuracy as a
function of ``C`` and ``gamma``. For this example we explore a relatively large
grid for illustration purposes. In practice, a logarithmic grid from
:math:`10^{-3}` to :math:`10^3` is usually sufficient. If the best parameters
lie on the boundaries of the grid, it can be extended in that direction in a
subsequent search.
Note that the heat map plot has a special colorbar with a midpoint value close
to the score values of the best performing models so as to make it easy to tell
them appart in the blink of an eye.
The behavior of the model is very sensitive to the ``gamma`` parameter. If
``gamma`` is too large, the radius of the area of influence of the support
vectors only includes the support vector itself and no amount of
regularization with ``C`` will be able to prevent overfitting.
When ``gamma`` is very small, the model is too constrained and cannot capture
the complexity or "shape" of the data. The region of influence of any selected
support vector would include the whole training set. The resulting model will
behave similarly to a linear model with a set of hyperplanes that separate the
centers of high density of any pair of two classes.
For intermediate values, we can see on the second plot that good models can
be found on a diagonal of ``C`` and ``gamma``. Smooth models (lower ``gamma``
values) can be made more complex by selecting a larger number of support
vectors (larger ``C`` values) hence the diagonal of good performing models.
Finally one can also observe that for some intermediate values of ``gamma`` we
get equally performing models when ``C`` becomes very large: it is not
necessary to regularize by limiting the number of support vectors. The radius of
the RBF kernel alone acts as a good structural regularizer. In practice though
it might still be interesting to limit the number of support vectors with a
lower value of ``C`` so as to favor models that use less memory and that are
faster to predict.
We should also note that small differences in scores results from the random
splits of the cross-validation procedure. Those spurious variations can be
smoothed out by increasing the number of CV iterations ``n_splits`` at the
expense of compute time. Increasing the value number of ``C_range`` and
``gamma_range`` steps will increase the resolution of the hyper-parameter heat
map.
'''
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from matplotlib.colors import Normalize
from sklearn.svm import SVC
from sklearn.preprocessing import StandardScaler
from sklearn.datasets import load_iris
from sklearn.model_selection import StratifiedShuffleSplit
from sklearn.model_selection import GridSearchCV
# Utility function to move the midpoint of a colormap to be around
# the values of interest.
class MidpointNormalize(Normalize):
def __init__(self, vmin=None, vmax=None, midpoint=None, clip=False):
self.midpoint = midpoint
Normalize.__init__(self, vmin, vmax, clip)
def __call__(self, value, clip=None):
x, y = [self.vmin, self.midpoint, self.vmax], [0, 0.5, 1]
return np.ma.masked_array(np.interp(value, x, y))
##############################################################################
# Load and prepare data set
#
# dataset for grid search
iris = load_iris()
X = iris.data
y = iris.target
# Dataset for decision function visualization: we only keep the first two
# features in X and sub-sample the dataset to keep only 2 classes and
# make it a binary classification problem.
X_2d = X[:, :2]
X_2d = X_2d[y > 0]
y_2d = y[y > 0]
y_2d -= 1
# It is usually a good idea to scale the data for SVM training.
# We are cheating a bit in this example in scaling all of the data,
# instead of fitting the transformation on the training set and
# just applying it on the test set.
scaler = StandardScaler()
X = scaler.fit_transform(X)
X_2d = scaler.fit_transform(X_2d)
##############################################################################
# Train classifiers
#
# For an initial search, a logarithmic grid with basis
# 10 is often helpful. Using a basis of 2, a finer
# tuning can be achieved but at a much higher cost.
C_range = np.logspace(-2, 10, 13)
gamma_range = np.logspace(-9, 3, 13)
param_grid = dict(gamma=gamma_range, C=C_range)
cv = StratifiedShuffleSplit(n_splits=5, test_size=0.2, random_state=42)
grid = GridSearchCV(SVC(), param_grid=param_grid, cv=cv)
grid.fit(X, y)
print("The best parameters are %s with a score of %0.2f"
% (grid.best_params_, grid.best_score_))
# Now we need to fit a classifier for all parameters in the 2d version
# (we use a smaller set of parameters here because it takes a while to train)
C_2d_range = [1e-2, 1, 1e2]
gamma_2d_range = [1e-1, 1, 1e1]
classifiers = []
for C in C_2d_range:
for gamma in gamma_2d_range:
clf = SVC(C=C, gamma=gamma)
clf.fit(X_2d, y_2d)
classifiers.append((C, gamma, clf))
##############################################################################
# visualization
#
# draw visualization of parameter effects
plt.figure(figsize=(8, 6))
xx, yy = np.meshgrid(np.linspace(-3, 3, 200), np.linspace(-3, 3, 200))
for (k, (C, gamma, clf)) in enumerate(classifiers):
# evaluate decision function in a grid
Z = clf.decision_function(np.c_[xx.ravel(), yy.ravel()])
Z = Z.reshape(xx.shape)
# visualize decision function for these parameters
plt.subplot(len(C_2d_range), len(gamma_2d_range), k + 1)
plt.title("gamma=10^%d, C=10^%d" % (np.log10(gamma), np.log10(C)),
size='medium')
# visualize parameter's effect on decision function
plt.pcolormesh(xx, yy, -Z, cmap=plt.cm.RdBu)
plt.scatter(X_2d[:, 0], X_2d[:, 1], c=y_2d, cmap=plt.cm.RdBu_r,
edgecolors='k')
plt.xticks(())
plt.yticks(())
plt.axis('tight')
scores = grid.cv_results_['mean_test_score'].reshape(len(C_range),
len(gamma_range))
# Draw heatmap of the validation accuracy as a function of gamma and C
#
# The score are encoded as colors with the hot colormap which varies from dark
# red to bright yellow. As the most interesting scores are all located in the
# 0.92 to 0.97 range we use a custom normalizer to set the mid-point to 0.92 so
# as to make it easier to visualize the small variations of score values in the
# interesting range while not brutally collapsing all the low score values to
# the same color.
plt.figure(figsize=(8, 6))
plt.subplots_adjust(left=.2, right=0.95, bottom=0.15, top=0.95)
plt.imshow(scores, interpolation='nearest', cmap=plt.cm.hot,
norm=MidpointNormalize(vmin=0.2, midpoint=0.92))
plt.xlabel('gamma')
plt.ylabel('C')
plt.colorbar()
plt.xticks(np.arange(len(gamma_range)), gamma_range, rotation=45)
plt.yticks(np.arange(len(C_range)), C_range)
plt.title('Validation accuracy')
plt.show()
| bsd-3-clause |
jjsalomon/python-analytics | pandas2 - Reading & Writing Data/pandas7 - Pickle - Python Object Serialization.py | 1 | 1389 | # -*- coding: utf-8 -*-
"""
Created on Sun May 28 21:57:12 2017
@author: azkei
The pickle module implements a powerful algorithm for serialization and de-serialiation
of a data structure implemented in Python. Pickling is the rocess in which the
heirarchy of an object is converted into a stream of bytes.
This allows an object to be transmitted, store and then rebuilt by the receiver itself
retaining all the original features.
We're going to be using cPickle module to do this, as it was written in C and heavily
optimized.
"""
# 1. Serialize a Python Object with cPickle
# Import cPickle, now called _pickle
import _pickle as pickle
# Create sufficient data
data = {'color':['white','red'],'value':[5,7]}
# Perform serialization of Data through dumps() function
pickled_data = pickle.dumps(data)
print(pickled_data)
# Once Data has been Serialized, they can be written on a file, or sent over a socket etc.
# Once it has been transmitted, it is possible to reconstruct the serialized object
# using loads() function
nframe = pickle.loads(pickled_data)
nframe
# 2. Pickling with pandas.
frame = pd.DataFrame(np.arange(16).reshape(4,4),
index=['up','down','left','right'])
frame.to_pickle('frame.pkl')
# To open the pkl file
pd.read_pickle('frame.pkl')
# NOTE: Make sure that the data doesnt have any malicious data in it, as pickle
# was not designed to be safe. | mit |
jougs/nest-simulator | pynest/examples/one_neuron.py | 1 | 3809 | # -*- coding: utf-8 -*-
#
# one_neuron.py
#
# This file is part of NEST.
#
# Copyright (C) 2004 The NEST Initiative
#
# NEST is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 2 of the License, or
# (at your option) any later version.
#
# NEST is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with NEST. If not, see <http://www.gnu.org/licenses/>.
"""
One neuron example
------------------
This script simulates a neuron driven by a constant external current
and records its membrane potential.
See Also
~~~~~~~~
:doc:`twoneurons`
"""
#######################################################################
# First, we import all necessary modules for simulation, analysis and
# plotting. Additionally, we set the verbosity to suppress info
# messages and reset the kernel.
# Resetting the kernel allows you to execute the script several
# times in a Python shell without interferences from previous NEST
# simulations. Thus, without resetting the kernel the network status
# including connections between nodes, status of neurons, devices and
# intrinsic time clocks, is kept and influences the next simulations.
import nest
import nest.voltage_trace
import matplotlib.pyplot as plt
nest.set_verbosity("M_WARNING")
nest.ResetKernel()
#######################################################################
# Second, the nodes (neurons and devices) are created using ``Create``.
# We store the returned handles in variables for later reference.
# The ``Create`` function also allow you to create multiple nodes
# e.g. ``nest.Create('iaf_psc_alpha',5)``
# Also default parameters of the model can be configured using ``Create``
# by including a list of parameter dictionaries
# e.g. `nest.Create("iaf_psc_alpha", params=[{'I_e':376.0}])`.
# In this example we will configure these parameters in an additional
# step, which is explained in the third section.
neuron = nest.Create("iaf_psc_alpha")
voltmeter = nest.Create("voltmeter")
#######################################################################
# Third, the neuron is configured using `SetStatus()`, which expects
# a list of node handles and a list of parameter dictionaries.
# In this example we use `SetStatus()` to configure the constant
# current input to the neuron.
neuron.I_e = 376.0
#######################################################################
# Fourth, the neuron is connected to the voltmeter. The command
# ``Connect`` has different variants. Plain ``Connect`` just takes the
# handles of pre- and post-synaptic nodes and uses the default values
# for weight and delay. Note that the connection direction for the voltmeter is
# reversed compared to the spike recorder, because it observes the
# neuron instead of receiving events from it. Thus, ``Connect``
# reflects the direction of signal flow in the simulation kernel
# rather than the physical process of inserting an electrode into the
# neuron. The latter semantics is presently not available in NEST.
nest.Connect(voltmeter, neuron)
#######################################################################
# Now we simulate the network using ``Simulate``, which takes the
# desired simulation time in milliseconds.
nest.Simulate(1000.0)
#######################################################################
# Finally, we plot the neuron's membrane potential as a function of
# time and display the plot using pyplot.
nest.voltage_trace.from_device(voltmeter)
plt.show()
| gpl-2.0 |
zorroblue/scikit-learn | examples/neighbors/plot_digits_kde_sampling.py | 50 | 2007 | """
=========================
Kernel Density Estimation
=========================
This example shows how kernel density estimation (KDE), a powerful
non-parametric density estimation technique, can be used to learn
a generative model for a dataset. With this generative model in place,
new samples can be drawn. These new samples reflect the underlying model
of the data.
"""
import numpy as np
import matplotlib.pyplot as plt
from sklearn.datasets import load_digits
from sklearn.neighbors import KernelDensity
from sklearn.decomposition import PCA
from sklearn.model_selection import GridSearchCV
# load the data
digits = load_digits()
# project the 64-dimensional data to a lower dimension
pca = PCA(n_components=15, whiten=False)
data = pca.fit_transform(digits.data)
# use grid search cross-validation to optimize the bandwidth
params = {'bandwidth': np.logspace(-1, 1, 20)}
grid = GridSearchCV(KernelDensity(), params)
grid.fit(data)
print("best bandwidth: {0}".format(grid.best_estimator_.bandwidth))
# use the best estimator to compute the kernel density estimate
kde = grid.best_estimator_
# sample 44 new points from the data
new_data = kde.sample(44, random_state=0)
new_data = pca.inverse_transform(new_data)
# turn data into a 4x11 grid
new_data = new_data.reshape((4, 11, -1))
real_data = digits.data[:44].reshape((4, 11, -1))
# plot real digits and resampled digits
fig, ax = plt.subplots(9, 11, subplot_kw=dict(xticks=[], yticks=[]))
for j in range(11):
ax[4, j].set_visible(False)
for i in range(4):
im = ax[i, j].imshow(real_data[i, j].reshape((8, 8)),
cmap=plt.cm.binary, interpolation='nearest')
im.set_clim(0, 16)
im = ax[i + 5, j].imshow(new_data[i, j].reshape((8, 8)),
cmap=plt.cm.binary, interpolation='nearest')
im.set_clim(0, 16)
ax[0, 5].set_title('Selection from the input data')
ax[5, 5].set_title('"New" digits drawn from the kernel density model')
plt.show()
| bsd-3-clause |
LohithBlaze/scikit-learn | sklearn/manifold/tests/test_mds.py | 324 | 1862 | import numpy as np
from numpy.testing import assert_array_almost_equal
from nose.tools import assert_raises
from sklearn.manifold import mds
def test_smacof():
# test metric smacof using the data of "Modern Multidimensional Scaling",
# Borg & Groenen, p 154
sim = np.array([[0, 5, 3, 4],
[5, 0, 2, 2],
[3, 2, 0, 1],
[4, 2, 1, 0]])
Z = np.array([[-.266, -.539],
[.451, .252],
[.016, -.238],
[-.200, .524]])
X, _ = mds.smacof(sim, init=Z, n_components=2, max_iter=1, n_init=1)
X_true = np.array([[-1.415, -2.471],
[1.633, 1.107],
[.249, -.067],
[-.468, 1.431]])
assert_array_almost_equal(X, X_true, decimal=3)
def test_smacof_error():
# Not symmetric similarity matrix:
sim = np.array([[0, 5, 9, 4],
[5, 0, 2, 2],
[3, 2, 0, 1],
[4, 2, 1, 0]])
assert_raises(ValueError, mds.smacof, sim)
# Not squared similarity matrix:
sim = np.array([[0, 5, 9, 4],
[5, 0, 2, 2],
[4, 2, 1, 0]])
assert_raises(ValueError, mds.smacof, sim)
# init not None and not correct format:
sim = np.array([[0, 5, 3, 4],
[5, 0, 2, 2],
[3, 2, 0, 1],
[4, 2, 1, 0]])
Z = np.array([[-.266, -.539],
[.016, -.238],
[-.200, .524]])
assert_raises(ValueError, mds.smacof, sim, init=Z, n_init=1)
def test_MDS():
sim = np.array([[0, 5, 3, 4],
[5, 0, 2, 2],
[3, 2, 0, 1],
[4, 2, 1, 0]])
mds_clf = mds.MDS(metric=False, n_jobs=3, dissimilarity="precomputed")
mds_clf.fit(sim)
| bsd-3-clause |
jejjohnson/manifold_learning | src/python/ssma_demo.py | 1 | 2339 | from data.data_generation import generate_gaussian
from manifold_alignment.ssma import ManifoldAlignment
import numpy as np
from utils.classification_list import lda_pred, accuracy_stats
import matplotlib.pyplot as plt
# my test function to see if it works. Should be in the 80s range of
# accuracy
def test_ma_gaussian(ma_method='wang', n_components=2, plot=False):
# define some dictionaries with empty labeled lists
X ={}; Y={};
X['label'] = []; X['unlabel'] = []; X['test'] = []
Y['label'] = []; Y['unlabel'] = []; Y['test'] = []
# assign labels from gaussian dataset
X1, X2, XT1, XT2, \
Y1, Y2, YT1, YT2, \
U1, U2 = generate_gaussian(plot_data=plot)
# create appropriate data structures based off of
# the manifold alignment class criteria
X['label'] = [X1, X2]
X['unlabel'] = [U1, U2]
X['test'] = [XT1, XT2]
Y['label'] = [Y1 , Y2]
Y['test'] = [YT1, YT2]
print(np.shape(X['label'][0]), np.shape(Y['label'][0]))
print(np.shape(X['unlabel'][0]))
print(np.shape(X['test'][0]), np.shape(Y['test'][0]))
print(np.shape(X['label'][1]), np.shape(Y['label'][1]))
print(np.shape(X['unlabel'][1]))
print(np.shape(X['test'][1]), np.shape(Y['test'][1]))
ma_method = ManifoldAlignment(ma_method=ma_method,
lap_method='personal')
ma_method.fit(X,Y)
Xproj = ma_method.transform(X, n_components=2)
Y['pred'] = lda_pred(Xproj['train'],
Xproj['test'],
Y['label'],
Y['test'])
Acc_stats = accuracy_stats(Y['pred'], Y['test'])
Lg = ma_method.L_g
Vs = ma_method.V_s
Vd = ma_method.V_d
import matplotlib.pyplot as plt
fig, ax = plt.subplots(nrows=3, ncols=1,
figsize=(10,10))
ax[0].spy(Lg, precision=1E-5, markersize=.2)
ax[0].set_title('Geometric Laplacian')
ax[1].spy(Vs, precision=1E-5, markersize=.2)
ax[1].set_title('Similarity Potential')
ax[2].spy(Vd, precision=1E-5, markersize=.2)
ax[2].set_title('Dissimilarity Potential')
plt.show()
print('AA - Domain 1: {s}'.format(s=Acc_stats['AA'][0]))
print('AA - Domain 2: {s}'.format(s=Acc_stats['AA'][1]))
if __name__ == "__main__":
test_ma_gaussian(ma_method='ssma', n_components=3, plot=True)
| mit |
DEVELByte/incubator-airflow | setup.py | 1 | 8767 | # -*- coding: utf-8 -*-
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from setuptools import setup, find_packages, Command
from setuptools.command.test import test as TestCommand
import imp
import logging
import os
import sys
logger = logging.getLogger(__name__)
# Kept manually in sync with airflow.__version__
version = imp.load_source(
'version', os.path.join('airflow', 'version.py')).version
class Tox(TestCommand):
user_options = [('tox-args=', None, "Arguments to pass to tox")]
def initialize_options(self):
TestCommand.initialize_options(self)
self.tox_args = ''
def finalize_options(self):
TestCommand.finalize_options(self)
self.test_args = []
self.test_suite = True
def run_tests(self):
#import here, cause outside the eggs aren't loaded
import tox
errno = tox.cmdline(args=self.tox_args.split())
sys.exit(errno)
class CleanCommand(Command):
"""Custom clean command to tidy up the project root."""
user_options = []
def initialize_options(self):
pass
def finalize_options(self):
pass
def run(self):
os.system('rm -vrf ./build ./dist ./*.pyc ./*.tgz ./*.egg-info')
def git_version(version):
"""
Return a version to identify the state of the underlying git repo. The version will
indicate whether the head of the current git-backed working directory is tied to a
release tag or not : it will indicate the former with a 'release:{version}' prefix
and the latter with a 'dev0' prefix. Following the prefix will be a sha of the current
branch head. Finally, a "dirty" suffix is appended to indicate that uncommitted changes
are present.
"""
repo = None
try:
import git
repo = git.Repo('.git')
except ImportError:
logger.warn('gitpython not found: Cannot compute the git version.')
return ''
except Exception as e:
logger.warn('Git repo not found: Cannot compute the git version.')
return ''
if repo:
sha = repo.head.commit.hexsha
if repo.is_dirty():
return '.dev0+{sha}.dirty'.format(sha=sha)
# commit is clean
# is it release of `version` ?
try:
tag = repo.git.describe(
match='[0-9]*', exact_match=True,
tags=True, dirty=True)
assert tag == version, (tag, version)
return '.release:{version}+{sha}'.format(version=version,
sha=sha)
except git.GitCommandError:
return '.dev0+{sha}'.format(sha=sha)
else:
return 'no_git_version'
def write_version(filename=os.path.join(*['airflow',
'git_version'])):
text = "{}".format(git_version(version))
with open(filename, 'w') as a:
a.write(text)
async = [
'greenlet>=0.4.9',
'eventlet>= 0.9.7',
'gevent>=0.13'
]
celery = [
'celery>=3.1.17',
'flower>=0.7.3'
]
crypto = ['cryptography>=0.9.3']
datadog = ['datadog>=0.14.0']
doc = [
'sphinx>=1.2.3',
'sphinx-argparse>=0.1.13',
'sphinx-rtd-theme>=0.1.6',
'Sphinx-PyPI-upload>=0.2.1'
]
docker = ['docker-py>=1.6.0']
druid = ['pydruid>=0.2.1']
emr = ['boto3>=1.0.0']
gcp_api = [
'httplib2',
'google-api-python-client>=1.5.0, <1.6.0',
'oauth2client>=2.0.2, <2.1.0',
'PyOpenSSL',
]
hdfs = ['snakebite>=2.7.8']
webhdfs = ['hdfs[dataframe,avro,kerberos]>=2.0.4']
hive = [
'hive-thrift-py>=0.0.1',
'pyhive>=0.1.3',
'impyla>=0.13.3',
'unicodecsv>=0.14.1'
]
jdbc = ['jaydebeapi>=0.2.0']
mssql = ['pymssql>=2.1.1', 'unicodecsv>=0.14.1']
mysql = ['mysqlclient>=1.3.6']
rabbitmq = ['librabbitmq>=1.6.1']
oracle = ['cx_Oracle>=5.1.2']
postgres = ['psycopg2>=2.6']
salesforce = ['simple-salesforce>=0.72']
s3 = [
'boto>=2.36.0',
'filechunkio>=1.6',
]
samba = ['pysmbclient>=0.1.3']
slack = ['slackclient>=1.0.0']
statsd = ['statsd>=3.0.1, <4.0']
vertica = ['vertica-python>=0.5.1']
ldap = ['ldap3>=0.9.9.1']
kerberos = ['pykerberos>=1.1.8',
'thrift_sasl>=0.2.0',
'snakebite[kerberos]>=2.7.8']
password = [
'bcrypt>=2.0.0',
'flask-bcrypt>=0.7.1',
]
github_enterprise = ['Flask-OAuthlib>=0.9.1']
qds = ['qds-sdk>=1.9.0']
cloudant = ['cloudant>=0.5.9,<2.0'] # major update coming soon, clamp to 0.x
all_dbs = postgres + mysql + hive + mssql + hdfs + vertica + cloudant
devel = ['lxml>=3.3.4', 'nose', 'nose-parameterized', 'mock', 'click', 'jira', 'moto', 'freezegun']
devel_minreq = devel + mysql + doc + password + s3
devel_hadoop = devel_minreq + hive + hdfs + webhdfs + kerberos
devel_all = devel + all_dbs + doc + samba + s3 + slack + crypto + oracle + docker
def do_setup():
write_version()
setup(
name='airflow',
description='Programmatically author, schedule and monitor data pipelines',
license='Apache License 2.0',
version=version,
packages=find_packages(),
package_data={'': ['airflow/alembic.ini', "airflow/git_version"]},
include_package_data=True,
zip_safe=False,
scripts=['airflow/bin/airflow'],
install_requires=[
'alembic>=0.8.3, <0.9',
'croniter>=0.3.8, <0.4',
'dill>=0.2.2, <0.3',
'flask>=0.11, <0.12',
'flask-admin==1.4.1',
'flask-cache>=0.13.1, <0.14',
'flask-login==0.2.11',
'flask-wtf==0.12',
'funcsigs>=1.0.2, <1.1',
'future>=0.15.0, <0.16',
'gitpython>=2.0.2',
'gunicorn>=19.3.0, <19.4.0', # 19.4.? seemed to have issues
'jinja2>=2.7.3, <3.0',
'markdown>=2.5.2, <3.0',
'pandas>=0.17.1, <1.0.0',
'psutil>=4.2.0, <5.0.0',
'pygments>=2.0.1, <3.0',
'python-daemon>=2.1.1, <2.2',
'python-dateutil>=2.3, <3',
'python-nvd3==0.14.2',
'requests>=2.5.1, <3',
'setproctitle>=1.1.8, <2',
'sqlalchemy>=0.9.8',
'tabulate>=0.7.5, <0.8.0',
'thrift>=0.9.2, <0.10',
'zope.deprecation>=4.0, <5.0',
'lxml>=3.6.0, <4.0',
'unicodecsv'>='0.14.1', # used in the hive_hook.py
],
extras_require={
'all': devel_all,
'all_dbs': all_dbs,
'async': async,
'celery': celery,
'cloudant': cloudant,
'crypto': crypto,
'datadog': datadog,
'devel': devel_minreq,
'devel_hadoop': devel_hadoop,
'doc': doc,
'docker': docker,
'druid': druid,
'gcp_api': gcp_api,
'github_enterprise': github_enterprise,
'hdfs': hdfs,
'hive': hive,
'jdbc': jdbc,
'kerberos': kerberos,
'ldap': ldap,
'mssql': mssql,
'mysql': mysql,
'oracle': oracle,
'password': password,
'postgres': postgres,
'qds': qds,
'rabbitmq': rabbitmq,
's3': s3,
'emr': emr,
'salesforce': salesforce,
'samba': samba,
'slack': slack,
'statsd': statsd,
'vertica': vertica,
'webhdfs': webhdfs,
},
classifiers=[
'Development Status :: 5 - Production/Stable',
'Environment :: Console',
'Environment :: Web Environment',
'Intended Audience :: Developers',
'Intended Audience :: System Administrators',
'License :: OSI Approved :: Apache Software License',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3.4',
'Topic :: System :: Monitoring',
],
author='Maxime Beauchemin',
author_email='[email protected]',
url='https://github.com/apache/incubator-airflow',
download_url=(
'https://github.com/apache/incubator-airflow/tarball/' + version),
cmdclass={
'test': Tox,
'extra_clean': CleanCommand,
},
)
if __name__ == "__main__":
do_setup()
| apache-2.0 |
LiaoPan/scikit-learn | sklearn/linear_model/tests/test_bayes.py | 299 | 1770 | # Author: Alexandre Gramfort <[email protected]>
# Fabian Pedregosa <[email protected]>
#
# License: BSD 3 clause
import numpy as np
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import SkipTest
from sklearn.linear_model.bayes import BayesianRidge, ARDRegression
from sklearn import datasets
from sklearn.utils.testing import assert_array_almost_equal
def test_bayesian_on_diabetes():
# Test BayesianRidge on diabetes
raise SkipTest("XFailed Test")
diabetes = datasets.load_diabetes()
X, y = diabetes.data, diabetes.target
clf = BayesianRidge(compute_score=True)
# Test with more samples than features
clf.fit(X, y)
# Test that scores are increasing at each iteration
assert_array_equal(np.diff(clf.scores_) > 0, True)
# Test with more features than samples
X = X[:5, :]
y = y[:5]
clf.fit(X, y)
# Test that scores are increasing at each iteration
assert_array_equal(np.diff(clf.scores_) > 0, True)
def test_toy_bayesian_ridge_object():
# Test BayesianRidge on toy
X = np.array([[1], [2], [6], [8], [10]])
Y = np.array([1, 2, 6, 8, 10])
clf = BayesianRidge(compute_score=True)
clf.fit(X, Y)
# Check that the model could approximately learn the identity function
test = [[1], [3], [4]]
assert_array_almost_equal(clf.predict(test), [1, 3, 4], 2)
def test_toy_ard_object():
# Test BayesianRegression ARD classifier
X = np.array([[1], [2], [3]])
Y = np.array([1, 2, 3])
clf = ARDRegression(compute_score=True)
clf.fit(X, Y)
# Check that the model could approximately learn the identity function
test = [[1], [3], [4]]
assert_array_almost_equal(clf.predict(test), [1, 3, 4], 2)
| bsd-3-clause |
ammarkhann/FinalSeniorCode | lib/python2.7/site-packages/pandas/tests/sparse/test_array.py | 9 | 30556 | from pandas.compat import range
import re
import operator
import pytest
import warnings
from numpy import nan
import numpy as np
from pandas import _np_version_under1p8
from pandas.core.sparse.api import SparseArray, SparseSeries
from pandas._libs.sparse import IntIndex
from pandas.util.testing import assert_almost_equal
import pandas.util.testing as tm
class TestSparseArray(object):
def setup_method(self, method):
self.arr_data = np.array([nan, nan, 1, 2, 3, nan, 4, 5, nan, 6])
self.arr = SparseArray(self.arr_data)
self.zarr = SparseArray([0, 0, 1, 2, 3, 0, 4, 5, 0, 6], fill_value=0)
def test_constructor_dtype(self):
arr = SparseArray([np.nan, 1, 2, np.nan])
assert arr.dtype == np.float64
assert np.isnan(arr.fill_value)
arr = SparseArray([np.nan, 1, 2, np.nan], fill_value=0)
assert arr.dtype == np.float64
assert arr.fill_value == 0
arr = SparseArray([0, 1, 2, 4], dtype=np.float64)
assert arr.dtype == np.float64
assert np.isnan(arr.fill_value)
arr = SparseArray([0, 1, 2, 4], dtype=np.int64)
assert arr.dtype == np.int64
assert arr.fill_value == 0
arr = SparseArray([0, 1, 2, 4], fill_value=0, dtype=np.int64)
assert arr.dtype == np.int64
assert arr.fill_value == 0
arr = SparseArray([0, 1, 2, 4], dtype=None)
assert arr.dtype == np.int64
assert arr.fill_value == 0
arr = SparseArray([0, 1, 2, 4], fill_value=0, dtype=None)
assert arr.dtype == np.int64
assert arr.fill_value == 0
def test_constructor_object_dtype(self):
# GH 11856
arr = SparseArray(['A', 'A', np.nan, 'B'], dtype=np.object)
assert arr.dtype == np.object
assert np.isnan(arr.fill_value)
arr = SparseArray(['A', 'A', np.nan, 'B'], dtype=np.object,
fill_value='A')
assert arr.dtype == np.object
assert arr.fill_value == 'A'
def test_constructor_spindex_dtype(self):
arr = SparseArray(data=[1, 2], sparse_index=IntIndex(4, [1, 2]))
tm.assert_sp_array_equal(arr, SparseArray([np.nan, 1, 2, np.nan]))
assert arr.dtype == np.float64
assert np.isnan(arr.fill_value)
arr = SparseArray(data=[1, 2, 3],
sparse_index=IntIndex(4, [1, 2, 3]),
dtype=np.int64, fill_value=0)
exp = SparseArray([0, 1, 2, 3], dtype=np.int64, fill_value=0)
tm.assert_sp_array_equal(arr, exp)
assert arr.dtype == np.int64
assert arr.fill_value == 0
arr = SparseArray(data=[1, 2], sparse_index=IntIndex(4, [1, 2]),
fill_value=0, dtype=np.int64)
exp = SparseArray([0, 1, 2, 0], fill_value=0, dtype=np.int64)
tm.assert_sp_array_equal(arr, exp)
assert arr.dtype == np.int64
assert arr.fill_value == 0
arr = SparseArray(data=[1, 2, 3],
sparse_index=IntIndex(4, [1, 2, 3]),
dtype=None, fill_value=0)
exp = SparseArray([0, 1, 2, 3], dtype=None)
tm.assert_sp_array_equal(arr, exp)
assert arr.dtype == np.int64
assert arr.fill_value == 0
# scalar input
arr = SparseArray(data=1, sparse_index=IntIndex(1, [0]), dtype=None)
exp = SparseArray([1], dtype=None)
tm.assert_sp_array_equal(arr, exp)
assert arr.dtype == np.int64
assert arr.fill_value == 0
arr = SparseArray(data=[1, 2], sparse_index=IntIndex(4, [1, 2]),
fill_value=0, dtype=None)
exp = SparseArray([0, 1, 2, 0], fill_value=0, dtype=None)
tm.assert_sp_array_equal(arr, exp)
assert arr.dtype == np.int64
assert arr.fill_value == 0
def test_sparseseries_roundtrip(self):
# GH 13999
for kind in ['integer', 'block']:
for fill in [1, np.nan, 0]:
arr = SparseArray([np.nan, 1, np.nan, 2, 3], kind=kind,
fill_value=fill)
res = SparseArray(SparseSeries(arr))
tm.assert_sp_array_equal(arr, res)
arr = SparseArray([0, 0, 0, 1, 1, 2], dtype=np.int64,
kind=kind, fill_value=fill)
res = SparseArray(SparseSeries(arr), dtype=np.int64)
tm.assert_sp_array_equal(arr, res)
res = SparseArray(SparseSeries(arr))
tm.assert_sp_array_equal(arr, res)
for fill in [True, False, np.nan]:
arr = SparseArray([True, False, True, True], dtype=np.bool,
kind=kind, fill_value=fill)
res = SparseArray(SparseSeries(arr))
tm.assert_sp_array_equal(arr, res)
res = SparseArray(SparseSeries(arr))
tm.assert_sp_array_equal(arr, res)
def test_get_item(self):
assert np.isnan(self.arr[1])
assert self.arr[2] == 1
assert self.arr[7] == 5
assert self.zarr[0] == 0
assert self.zarr[2] == 1
assert self.zarr[7] == 5
errmsg = re.compile("bounds")
tm.assert_raises_regex(IndexError, errmsg, lambda: self.arr[11])
tm.assert_raises_regex(IndexError, errmsg, lambda: self.arr[-11])
assert self.arr[-1] == self.arr[len(self.arr) - 1]
def test_take(self):
assert np.isnan(self.arr.take(0))
assert np.isscalar(self.arr.take(2))
# np.take in < 1.8 doesn't support scalar indexing
if not _np_version_under1p8:
assert self.arr.take(2) == np.take(self.arr_data, 2)
assert self.arr.take(6) == np.take(self.arr_data, 6)
exp = SparseArray(np.take(self.arr_data, [2, 3]))
tm.assert_sp_array_equal(self.arr.take([2, 3]), exp)
exp = SparseArray(np.take(self.arr_data, [0, 1, 2]))
tm.assert_sp_array_equal(self.arr.take([0, 1, 2]), exp)
def test_take_fill_value(self):
data = np.array([1, np.nan, 0, 3, 0])
sparse = SparseArray(data, fill_value=0)
exp = SparseArray(np.take(data, [0]), fill_value=0)
tm.assert_sp_array_equal(sparse.take([0]), exp)
exp = SparseArray(np.take(data, [1, 3, 4]), fill_value=0)
tm.assert_sp_array_equal(sparse.take([1, 3, 4]), exp)
def test_take_negative(self):
exp = SparseArray(np.take(self.arr_data, [-1]))
tm.assert_sp_array_equal(self.arr.take([-1]), exp)
exp = SparseArray(np.take(self.arr_data, [-4, -3, -2]))
tm.assert_sp_array_equal(self.arr.take([-4, -3, -2]), exp)
def test_bad_take(self):
tm.assert_raises_regex(
IndexError, "bounds", lambda: self.arr.take(11))
pytest.raises(IndexError, lambda: self.arr.take(-11))
def test_take_invalid_kwargs(self):
msg = r"take\(\) got an unexpected keyword argument 'foo'"
tm.assert_raises_regex(TypeError, msg, self.arr.take,
[2, 3], foo=2)
msg = "the 'out' parameter is not supported"
tm.assert_raises_regex(ValueError, msg, self.arr.take,
[2, 3], out=self.arr)
msg = "the 'mode' parameter is not supported"
tm.assert_raises_regex(ValueError, msg, self.arr.take,
[2, 3], mode='clip')
def test_take_filling(self):
# similar tests as GH 12631
sparse = SparseArray([np.nan, np.nan, 1, np.nan, 4])
result = sparse.take(np.array([1, 0, -1]))
expected = SparseArray([np.nan, np.nan, 4])
tm.assert_sp_array_equal(result, expected)
# fill_value
result = sparse.take(np.array([1, 0, -1]), fill_value=True)
expected = SparseArray([np.nan, np.nan, np.nan])
tm.assert_sp_array_equal(result, expected)
# allow_fill=False
result = sparse.take(np.array([1, 0, -1]),
allow_fill=False, fill_value=True)
expected = SparseArray([np.nan, np.nan, 4])
tm.assert_sp_array_equal(result, expected)
msg = ('When allow_fill=True and fill_value is not None, '
'all indices must be >= -1')
with tm.assert_raises_regex(ValueError, msg):
sparse.take(np.array([1, 0, -2]), fill_value=True)
with tm.assert_raises_regex(ValueError, msg):
sparse.take(np.array([1, 0, -5]), fill_value=True)
with pytest.raises(IndexError):
sparse.take(np.array([1, -6]))
with pytest.raises(IndexError):
sparse.take(np.array([1, 5]))
with pytest.raises(IndexError):
sparse.take(np.array([1, 5]), fill_value=True)
def test_take_filling_fill_value(self):
# same tests as GH 12631
sparse = SparseArray([np.nan, 0, 1, 0, 4], fill_value=0)
result = sparse.take(np.array([1, 0, -1]))
expected = SparseArray([0, np.nan, 4], fill_value=0)
tm.assert_sp_array_equal(result, expected)
# fill_value
result = sparse.take(np.array([1, 0, -1]), fill_value=True)
expected = SparseArray([0, np.nan, 0], fill_value=0)
tm.assert_sp_array_equal(result, expected)
# allow_fill=False
result = sparse.take(np.array([1, 0, -1]),
allow_fill=False, fill_value=True)
expected = SparseArray([0, np.nan, 4], fill_value=0)
tm.assert_sp_array_equal(result, expected)
msg = ('When allow_fill=True and fill_value is not None, '
'all indices must be >= -1')
with tm.assert_raises_regex(ValueError, msg):
sparse.take(np.array([1, 0, -2]), fill_value=True)
with tm.assert_raises_regex(ValueError, msg):
sparse.take(np.array([1, 0, -5]), fill_value=True)
with pytest.raises(IndexError):
sparse.take(np.array([1, -6]))
with pytest.raises(IndexError):
sparse.take(np.array([1, 5]))
with pytest.raises(IndexError):
sparse.take(np.array([1, 5]), fill_value=True)
def test_take_filling_all_nan(self):
sparse = SparseArray([np.nan, np.nan, np.nan, np.nan, np.nan])
result = sparse.take(np.array([1, 0, -1]))
expected = SparseArray([np.nan, np.nan, np.nan])
tm.assert_sp_array_equal(result, expected)
result = sparse.take(np.array([1, 0, -1]), fill_value=True)
expected = SparseArray([np.nan, np.nan, np.nan])
tm.assert_sp_array_equal(result, expected)
with pytest.raises(IndexError):
sparse.take(np.array([1, -6]))
with pytest.raises(IndexError):
sparse.take(np.array([1, 5]))
with pytest.raises(IndexError):
sparse.take(np.array([1, 5]), fill_value=True)
def test_set_item(self):
def setitem():
self.arr[5] = 3
def setslice():
self.arr[1:5] = 2
tm.assert_raises_regex(TypeError, "item assignment", setitem)
tm.assert_raises_regex(TypeError, "item assignment", setslice)
def test_constructor_from_too_large_array(self):
tm.assert_raises_regex(TypeError, "expected dimension <= 1 data",
SparseArray, np.arange(10).reshape((2, 5)))
def test_constructor_from_sparse(self):
res = SparseArray(self.zarr)
assert res.fill_value == 0
assert_almost_equal(res.sp_values, self.zarr.sp_values)
def test_constructor_copy(self):
cp = SparseArray(self.arr, copy=True)
cp.sp_values[:3] = 0
assert not (self.arr.sp_values[:3] == 0).any()
not_copy = SparseArray(self.arr)
not_copy.sp_values[:3] = 0
assert (self.arr.sp_values[:3] == 0).all()
def test_constructor_bool(self):
# GH 10648
data = np.array([False, False, True, True, False, False])
arr = SparseArray(data, fill_value=False, dtype=bool)
assert arr.dtype == bool
tm.assert_numpy_array_equal(arr.sp_values, np.array([True, True]))
tm.assert_numpy_array_equal(arr.sp_values, np.asarray(arr))
tm.assert_numpy_array_equal(arr.sp_index.indices,
np.array([2, 3], np.int32))
for dense in [arr.to_dense(), arr.values]:
assert dense.dtype == bool
tm.assert_numpy_array_equal(dense, data)
def test_constructor_bool_fill_value(self):
arr = SparseArray([True, False, True], dtype=None)
assert arr.dtype == np.bool
assert not arr.fill_value
arr = SparseArray([True, False, True], dtype=np.bool)
assert arr.dtype == np.bool
assert not arr.fill_value
arr = SparseArray([True, False, True], dtype=np.bool, fill_value=True)
assert arr.dtype == np.bool
assert arr.fill_value
def test_constructor_float32(self):
# GH 10648
data = np.array([1., np.nan, 3], dtype=np.float32)
arr = SparseArray(data, dtype=np.float32)
assert arr.dtype == np.float32
tm.assert_numpy_array_equal(arr.sp_values,
np.array([1, 3], dtype=np.float32))
tm.assert_numpy_array_equal(arr.sp_values, np.asarray(arr))
tm.assert_numpy_array_equal(arr.sp_index.indices,
np.array([0, 2], dtype=np.int32))
for dense in [arr.to_dense(), arr.values]:
assert dense.dtype == np.float32
tm.assert_numpy_array_equal(dense, data)
def test_astype(self):
res = self.arr.astype('f8')
res.sp_values[:3] = 27
assert not (self.arr.sp_values[:3] == 27).any()
msg = "unable to coerce current fill_value nan to int64 dtype"
with tm.assert_raises_regex(ValueError, msg):
self.arr.astype('i8')
arr = SparseArray([0, np.nan, 0, 1])
with tm.assert_raises_regex(ValueError, msg):
arr.astype('i8')
arr = SparseArray([0, np.nan, 0, 1], fill_value=0)
msg = 'Cannot convert non-finite values \\(NA or inf\\) to integer'
with tm.assert_raises_regex(ValueError, msg):
arr.astype('i8')
def test_astype_all(self):
vals = np.array([1, 2, 3])
arr = SparseArray(vals, fill_value=1)
types = [np.float64, np.float32, np.int64,
np.int32, np.int16, np.int8]
for typ in types:
res = arr.astype(typ)
assert res.dtype == typ
assert res.sp_values.dtype == typ
tm.assert_numpy_array_equal(res.values, vals.astype(typ))
def test_set_fill_value(self):
arr = SparseArray([1., np.nan, 2.], fill_value=np.nan)
arr.fill_value = 2
assert arr.fill_value == 2
arr = SparseArray([1, 0, 2], fill_value=0, dtype=np.int64)
arr.fill_value = 2
assert arr.fill_value == 2
# coerces to int
msg = "unable to set fill_value 3\\.1 to int64 dtype"
with tm.assert_raises_regex(ValueError, msg):
arr.fill_value = 3.1
msg = "unable to set fill_value nan to int64 dtype"
with tm.assert_raises_regex(ValueError, msg):
arr.fill_value = np.nan
arr = SparseArray([True, False, True], fill_value=False, dtype=np.bool)
arr.fill_value = True
assert arr.fill_value
# coerces to bool
msg = "unable to set fill_value 0 to bool dtype"
with tm.assert_raises_regex(ValueError, msg):
arr.fill_value = 0
msg = "unable to set fill_value nan to bool dtype"
with tm.assert_raises_regex(ValueError, msg):
arr.fill_value = np.nan
# invalid
msg = "fill_value must be a scalar"
for val in [[1, 2, 3], np.array([1, 2]), (1, 2, 3)]:
with tm.assert_raises_regex(ValueError, msg):
arr.fill_value = val
def test_copy_shallow(self):
arr2 = self.arr.copy(deep=False)
def _get_base(values):
base = values.base
while base.base is not None:
base = base.base
return base
assert (_get_base(arr2) is _get_base(self.arr))
def test_values_asarray(self):
assert_almost_equal(self.arr.values, self.arr_data)
assert_almost_equal(self.arr.to_dense(), self.arr_data)
assert_almost_equal(self.arr.sp_values, np.asarray(self.arr))
def test_to_dense(self):
vals = np.array([1, np.nan, np.nan, 3, np.nan])
res = SparseArray(vals).to_dense()
tm.assert_numpy_array_equal(res, vals)
res = SparseArray(vals, fill_value=0).to_dense()
tm.assert_numpy_array_equal(res, vals)
vals = np.array([1, np.nan, 0, 3, 0])
res = SparseArray(vals).to_dense()
tm.assert_numpy_array_equal(res, vals)
res = SparseArray(vals, fill_value=0).to_dense()
tm.assert_numpy_array_equal(res, vals)
vals = np.array([np.nan, np.nan, np.nan, np.nan, np.nan])
res = SparseArray(vals).to_dense()
tm.assert_numpy_array_equal(res, vals)
res = SparseArray(vals, fill_value=0).to_dense()
tm.assert_numpy_array_equal(res, vals)
# see gh-14647
with tm.assert_produces_warning(FutureWarning,
check_stacklevel=False):
SparseArray(vals).to_dense(fill=2)
def test_getitem(self):
def _checkit(i):
assert_almost_equal(self.arr[i], self.arr.values[i])
for i in range(len(self.arr)):
_checkit(i)
_checkit(-i)
def test_getslice(self):
result = self.arr[:-3]
exp = SparseArray(self.arr.values[:-3])
tm.assert_sp_array_equal(result, exp)
result = self.arr[-4:]
exp = SparseArray(self.arr.values[-4:])
tm.assert_sp_array_equal(result, exp)
# two corner cases from Series
result = self.arr[-12:]
exp = SparseArray(self.arr)
tm.assert_sp_array_equal(result, exp)
result = self.arr[:-12]
exp = SparseArray(self.arr.values[:0])
tm.assert_sp_array_equal(result, exp)
def test_getslice_tuple(self):
dense = np.array([np.nan, 0, 3, 4, 0, 5, np.nan, np.nan, 0])
sparse = SparseArray(dense)
res = sparse[4:, ]
exp = SparseArray(dense[4:, ])
tm.assert_sp_array_equal(res, exp)
sparse = SparseArray(dense, fill_value=0)
res = sparse[4:, ]
exp = SparseArray(dense[4:, ], fill_value=0)
tm.assert_sp_array_equal(res, exp)
with pytest.raises(IndexError):
sparse[4:, :]
with pytest.raises(IndexError):
# check numpy compat
dense[4:, :]
def test_binary_operators(self):
data1 = np.random.randn(20)
data2 = np.random.randn(20)
data1[::2] = np.nan
data2[::3] = np.nan
arr1 = SparseArray(data1)
arr2 = SparseArray(data2)
data1[::2] = 3
data2[::3] = 3
farr1 = SparseArray(data1, fill_value=3)
farr2 = SparseArray(data2, fill_value=3)
def _check_op(op, first, second):
res = op(first, second)
exp = SparseArray(op(first.values, second.values),
fill_value=first.fill_value)
assert isinstance(res, SparseArray)
assert_almost_equal(res.values, exp.values)
res2 = op(first, second.values)
assert isinstance(res2, SparseArray)
tm.assert_sp_array_equal(res, res2)
res3 = op(first.values, second)
assert isinstance(res3, SparseArray)
tm.assert_sp_array_equal(res, res3)
res4 = op(first, 4)
assert isinstance(res4, SparseArray)
# ignore this if the actual op raises (e.g. pow)
try:
exp = op(first.values, 4)
exp_fv = op(first.fill_value, 4)
assert_almost_equal(res4.fill_value, exp_fv)
assert_almost_equal(res4.values, exp)
except ValueError:
pass
def _check_inplace_op(op):
tmp = arr1.copy()
pytest.raises(NotImplementedError, op, tmp, arr2)
with np.errstate(all='ignore'):
bin_ops = [operator.add, operator.sub, operator.mul,
operator.truediv, operator.floordiv, operator.pow]
for op in bin_ops:
_check_op(op, arr1, arr2)
_check_op(op, farr1, farr2)
inplace_ops = ['iadd', 'isub', 'imul', 'itruediv', 'ifloordiv',
'ipow']
for op in inplace_ops:
_check_inplace_op(getattr(operator, op))
def test_pickle(self):
def _check_roundtrip(obj):
unpickled = tm.round_trip_pickle(obj)
tm.assert_sp_array_equal(unpickled, obj)
_check_roundtrip(self.arr)
_check_roundtrip(self.zarr)
def test_generator_warnings(self):
sp_arr = SparseArray([1, 2, 3])
with warnings.catch_warnings(record=True) as w:
warnings.filterwarnings(action='always',
category=DeprecationWarning)
warnings.filterwarnings(action='always',
category=PendingDeprecationWarning)
for _ in sp_arr:
pass
assert len(w) == 0
def test_fillna(self):
s = SparseArray([1, np.nan, np.nan, 3, np.nan])
res = s.fillna(-1)
exp = SparseArray([1, -1, -1, 3, -1], fill_value=-1, dtype=np.float64)
tm.assert_sp_array_equal(res, exp)
s = SparseArray([1, np.nan, np.nan, 3, np.nan], fill_value=0)
res = s.fillna(-1)
exp = SparseArray([1, -1, -1, 3, -1], fill_value=0, dtype=np.float64)
tm.assert_sp_array_equal(res, exp)
s = SparseArray([1, np.nan, 0, 3, 0])
res = s.fillna(-1)
exp = SparseArray([1, -1, 0, 3, 0], fill_value=-1, dtype=np.float64)
tm.assert_sp_array_equal(res, exp)
s = SparseArray([1, np.nan, 0, 3, 0], fill_value=0)
res = s.fillna(-1)
exp = SparseArray([1, -1, 0, 3, 0], fill_value=0, dtype=np.float64)
tm.assert_sp_array_equal(res, exp)
s = SparseArray([np.nan, np.nan, np.nan, np.nan])
res = s.fillna(-1)
exp = SparseArray([-1, -1, -1, -1], fill_value=-1, dtype=np.float64)
tm.assert_sp_array_equal(res, exp)
s = SparseArray([np.nan, np.nan, np.nan, np.nan], fill_value=0)
res = s.fillna(-1)
exp = SparseArray([-1, -1, -1, -1], fill_value=0, dtype=np.float64)
tm.assert_sp_array_equal(res, exp)
# float dtype's fill_value is np.nan, replaced by -1
s = SparseArray([0., 0., 0., 0.])
res = s.fillna(-1)
exp = SparseArray([0., 0., 0., 0.], fill_value=-1)
tm.assert_sp_array_equal(res, exp)
# int dtype shouldn't have missing. No changes.
s = SparseArray([0, 0, 0, 0])
assert s.dtype == np.int64
assert s.fill_value == 0
res = s.fillna(-1)
tm.assert_sp_array_equal(res, s)
s = SparseArray([0, 0, 0, 0], fill_value=0)
assert s.dtype == np.int64
assert s.fill_value == 0
res = s.fillna(-1)
exp = SparseArray([0, 0, 0, 0], fill_value=0)
tm.assert_sp_array_equal(res, exp)
# fill_value can be nan if there is no missing hole.
# only fill_value will be changed
s = SparseArray([0, 0, 0, 0], fill_value=np.nan)
assert s.dtype == np.int64
assert np.isnan(s.fill_value)
res = s.fillna(-1)
exp = SparseArray([0, 0, 0, 0], fill_value=-1)
tm.assert_sp_array_equal(res, exp)
def test_fillna_overlap(self):
s = SparseArray([1, np.nan, np.nan, 3, np.nan])
# filling with existing value doesn't replace existing value with
# fill_value, i.e. existing 3 remains in sp_values
res = s.fillna(3)
exp = np.array([1, 3, 3, 3, 3], dtype=np.float64)
tm.assert_numpy_array_equal(res.to_dense(), exp)
s = SparseArray([1, np.nan, np.nan, 3, np.nan], fill_value=0)
res = s.fillna(3)
exp = SparseArray([1, 3, 3, 3, 3], fill_value=0, dtype=np.float64)
tm.assert_sp_array_equal(res, exp)
class TestSparseArrayAnalytics(object):
def test_sum(self):
data = np.arange(10).astype(float)
out = SparseArray(data).sum()
assert out == 45.0
data[5] = np.nan
out = SparseArray(data, fill_value=2).sum()
assert out == 40.0
out = SparseArray(data, fill_value=np.nan).sum()
assert out == 40.0
def test_numpy_sum(self):
data = np.arange(10).astype(float)
out = np.sum(SparseArray(data))
assert out == 45.0
data[5] = np.nan
out = np.sum(SparseArray(data, fill_value=2))
assert out == 40.0
out = np.sum(SparseArray(data, fill_value=np.nan))
assert out == 40.0
msg = "the 'dtype' parameter is not supported"
tm.assert_raises_regex(ValueError, msg, np.sum,
SparseArray(data), dtype=np.int64)
msg = "the 'out' parameter is not supported"
tm.assert_raises_regex(ValueError, msg, np.sum,
SparseArray(data), out=out)
def test_cumsum(self):
non_null_data = np.array([1, 2, 3, 4, 5], dtype=float)
non_null_expected = SparseArray(non_null_data.cumsum())
null_data = np.array([1, 2, np.nan, 4, 5], dtype=float)
null_expected = SparseArray(np.array([1.0, 3.0, np.nan, 7.0, 12.0]))
for data, expected in [
(null_data, null_expected),
(non_null_data, non_null_expected)
]:
out = SparseArray(data).cumsum()
tm.assert_sp_array_equal(out, expected)
out = SparseArray(data, fill_value=np.nan).cumsum()
tm.assert_sp_array_equal(out, expected)
out = SparseArray(data, fill_value=2).cumsum()
tm.assert_sp_array_equal(out, expected)
axis = 1 # SparseArray currently 1-D, so only axis = 0 is valid.
msg = "axis\\(={axis}\\) out of bounds".format(axis=axis)
with tm.assert_raises_regex(ValueError, msg):
SparseArray(data).cumsum(axis=axis)
def test_numpy_cumsum(self):
non_null_data = np.array([1, 2, 3, 4, 5], dtype=float)
non_null_expected = SparseArray(non_null_data.cumsum())
null_data = np.array([1, 2, np.nan, 4, 5], dtype=float)
null_expected = SparseArray(np.array([1.0, 3.0, np.nan, 7.0, 12.0]))
for data, expected in [
(null_data, null_expected),
(non_null_data, non_null_expected)
]:
out = np.cumsum(SparseArray(data))
tm.assert_sp_array_equal(out, expected)
out = np.cumsum(SparseArray(data, fill_value=np.nan))
tm.assert_sp_array_equal(out, expected)
out = np.cumsum(SparseArray(data, fill_value=2))
tm.assert_sp_array_equal(out, expected)
msg = "the 'dtype' parameter is not supported"
tm.assert_raises_regex(ValueError, msg, np.cumsum,
SparseArray(data), dtype=np.int64)
msg = "the 'out' parameter is not supported"
tm.assert_raises_regex(ValueError, msg, np.cumsum,
SparseArray(data), out=out)
def test_mean(self):
data = np.arange(10).astype(float)
out = SparseArray(data).mean()
assert out == 4.5
data[5] = np.nan
out = SparseArray(data).mean()
assert out == 40.0 / 9
def test_numpy_mean(self):
data = np.arange(10).astype(float)
out = np.mean(SparseArray(data))
assert out == 4.5
data[5] = np.nan
out = np.mean(SparseArray(data))
assert out == 40.0 / 9
msg = "the 'dtype' parameter is not supported"
tm.assert_raises_regex(ValueError, msg, np.mean,
SparseArray(data), dtype=np.int64)
msg = "the 'out' parameter is not supported"
tm.assert_raises_regex(ValueError, msg, np.mean,
SparseArray(data), out=out)
def test_ufunc(self):
# GH 13853 make sure ufunc is applied to fill_value
sparse = SparseArray([1, np.nan, 2, np.nan, -2])
result = SparseArray([1, np.nan, 2, np.nan, 2])
tm.assert_sp_array_equal(abs(sparse), result)
tm.assert_sp_array_equal(np.abs(sparse), result)
sparse = SparseArray([1, -1, 2, -2], fill_value=1)
result = SparseArray([1, 2, 2], sparse_index=sparse.sp_index,
fill_value=1)
tm.assert_sp_array_equal(abs(sparse), result)
tm.assert_sp_array_equal(np.abs(sparse), result)
sparse = SparseArray([1, -1, 2, -2], fill_value=-1)
result = SparseArray([1, 2, 2], sparse_index=sparse.sp_index,
fill_value=1)
tm.assert_sp_array_equal(abs(sparse), result)
tm.assert_sp_array_equal(np.abs(sparse), result)
sparse = SparseArray([1, np.nan, 2, np.nan, -2])
result = SparseArray(np.sin([1, np.nan, 2, np.nan, -2]))
tm.assert_sp_array_equal(np.sin(sparse), result)
sparse = SparseArray([1, -1, 2, -2], fill_value=1)
result = SparseArray(np.sin([1, -1, 2, -2]), fill_value=np.sin(1))
tm.assert_sp_array_equal(np.sin(sparse), result)
sparse = SparseArray([1, -1, 0, -2], fill_value=0)
result = SparseArray(np.sin([1, -1, 0, -2]), fill_value=np.sin(0))
tm.assert_sp_array_equal(np.sin(sparse), result)
def test_ufunc_args(self):
# GH 13853 make sure ufunc is applied to fill_value, including its arg
sparse = SparseArray([1, np.nan, 2, np.nan, -2])
result = SparseArray([2, np.nan, 3, np.nan, -1])
tm.assert_sp_array_equal(np.add(sparse, 1), result)
sparse = SparseArray([1, -1, 2, -2], fill_value=1)
result = SparseArray([2, 0, 3, -1], fill_value=2)
tm.assert_sp_array_equal(np.add(sparse, 1), result)
sparse = SparseArray([1, -1, 0, -2], fill_value=0)
result = SparseArray([2, 0, 1, -1], fill_value=1)
tm.assert_sp_array_equal(np.add(sparse, 1), result)
| mit |
saiwing-yeung/scikit-learn | sklearn/preprocessing/tests/test_label.py | 34 | 18227 | import numpy as np
from scipy.sparse import issparse
from scipy.sparse import coo_matrix
from scipy.sparse import csc_matrix
from scipy.sparse import csr_matrix
from scipy.sparse import dok_matrix
from scipy.sparse import lil_matrix
from sklearn.utils.multiclass import type_of_target
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_raise_message
from sklearn.utils.testing import ignore_warnings
from sklearn.preprocessing.label import LabelBinarizer
from sklearn.preprocessing.label import MultiLabelBinarizer
from sklearn.preprocessing.label import LabelEncoder
from sklearn.preprocessing.label import label_binarize
from sklearn.preprocessing.label import _inverse_binarize_thresholding
from sklearn.preprocessing.label import _inverse_binarize_multiclass
from sklearn import datasets
iris = datasets.load_iris()
def toarray(a):
if hasattr(a, "toarray"):
a = a.toarray()
return a
def test_label_binarizer():
# one-class case defaults to negative label
# For dense case:
inp = ["pos", "pos", "pos", "pos"]
lb = LabelBinarizer(sparse_output=False)
expected = np.array([[0, 0, 0, 0]]).T
got = lb.fit_transform(inp)
assert_array_equal(lb.classes_, ["pos"])
assert_array_equal(expected, got)
assert_array_equal(lb.inverse_transform(got), inp)
# For sparse case:
lb = LabelBinarizer(sparse_output=True)
got = lb.fit_transform(inp)
assert_true(issparse(got))
assert_array_equal(lb.classes_, ["pos"])
assert_array_equal(expected, got.toarray())
assert_array_equal(lb.inverse_transform(got.toarray()), inp)
lb = LabelBinarizer(sparse_output=False)
# two-class case
inp = ["neg", "pos", "pos", "neg"]
expected = np.array([[0, 1, 1, 0]]).T
got = lb.fit_transform(inp)
assert_array_equal(lb.classes_, ["neg", "pos"])
assert_array_equal(expected, got)
to_invert = np.array([[1, 0],
[0, 1],
[0, 1],
[1, 0]])
assert_array_equal(lb.inverse_transform(to_invert), inp)
# multi-class case
inp = ["spam", "ham", "eggs", "ham", "0"]
expected = np.array([[0, 0, 0, 1],
[0, 0, 1, 0],
[0, 1, 0, 0],
[0, 0, 1, 0],
[1, 0, 0, 0]])
got = lb.fit_transform(inp)
assert_array_equal(lb.classes_, ['0', 'eggs', 'ham', 'spam'])
assert_array_equal(expected, got)
assert_array_equal(lb.inverse_transform(got), inp)
def test_label_binarizer_unseen_labels():
lb = LabelBinarizer()
expected = np.array([[1, 0, 0],
[0, 1, 0],
[0, 0, 1]])
got = lb.fit_transform(['b', 'd', 'e'])
assert_array_equal(expected, got)
expected = np.array([[0, 0, 0],
[1, 0, 0],
[0, 0, 0],
[0, 1, 0],
[0, 0, 1],
[0, 0, 0]])
got = lb.transform(['a', 'b', 'c', 'd', 'e', 'f'])
assert_array_equal(expected, got)
def test_label_binarizer_set_label_encoding():
lb = LabelBinarizer(neg_label=-2, pos_label=0)
# two-class case with pos_label=0
inp = np.array([0, 1, 1, 0])
expected = np.array([[-2, 0, 0, -2]]).T
got = lb.fit_transform(inp)
assert_array_equal(expected, got)
assert_array_equal(lb.inverse_transform(got), inp)
lb = LabelBinarizer(neg_label=-2, pos_label=2)
# multi-class case
inp = np.array([3, 2, 1, 2, 0])
expected = np.array([[-2, -2, -2, +2],
[-2, -2, +2, -2],
[-2, +2, -2, -2],
[-2, -2, +2, -2],
[+2, -2, -2, -2]])
got = lb.fit_transform(inp)
assert_array_equal(expected, got)
assert_array_equal(lb.inverse_transform(got), inp)
@ignore_warnings
def test_label_binarizer_errors():
# Check that invalid arguments yield ValueError
one_class = np.array([0, 0, 0, 0])
lb = LabelBinarizer().fit(one_class)
multi_label = [(2, 3), (0,), (0, 2)]
assert_raises(ValueError, lb.transform, multi_label)
lb = LabelBinarizer()
assert_raises(ValueError, lb.transform, [])
assert_raises(ValueError, lb.inverse_transform, [])
assert_raises(ValueError, LabelBinarizer, neg_label=2, pos_label=1)
assert_raises(ValueError, LabelBinarizer, neg_label=2, pos_label=2)
assert_raises(ValueError, LabelBinarizer, neg_label=1, pos_label=2,
sparse_output=True)
# Fail on y_type
assert_raises(ValueError, _inverse_binarize_thresholding,
y=csr_matrix([[1, 2], [2, 1]]), output_type="foo",
classes=[1, 2], threshold=0)
# Sequence of seq type should raise ValueError
y_seq_of_seqs = [[], [1, 2], [3], [0, 1, 3], [2]]
assert_raises(ValueError, LabelBinarizer().fit_transform, y_seq_of_seqs)
# Fail on the number of classes
assert_raises(ValueError, _inverse_binarize_thresholding,
y=csr_matrix([[1, 2], [2, 1]]), output_type="foo",
classes=[1, 2, 3], threshold=0)
# Fail on the dimension of 'binary'
assert_raises(ValueError, _inverse_binarize_thresholding,
y=np.array([[1, 2, 3], [2, 1, 3]]), output_type="binary",
classes=[1, 2, 3], threshold=0)
# Fail on multioutput data
assert_raises(ValueError, LabelBinarizer().fit, np.array([[1, 3], [2, 1]]))
assert_raises(ValueError, label_binarize, np.array([[1, 3], [2, 1]]),
[1, 2, 3])
def test_label_encoder():
# Test LabelEncoder's transform and inverse_transform methods
le = LabelEncoder()
le.fit([1, 1, 4, 5, -1, 0])
assert_array_equal(le.classes_, [-1, 0, 1, 4, 5])
assert_array_equal(le.transform([0, 1, 4, 4, 5, -1, -1]),
[1, 2, 3, 3, 4, 0, 0])
assert_array_equal(le.inverse_transform([1, 2, 3, 3, 4, 0, 0]),
[0, 1, 4, 4, 5, -1, -1])
assert_raises(ValueError, le.transform, [0, 6])
le.fit(["apple", "orange"])
msg = "bad input shape"
assert_raise_message(ValueError, msg, le.transform, "apple")
def test_label_encoder_fit_transform():
# Test fit_transform
le = LabelEncoder()
ret = le.fit_transform([1, 1, 4, 5, -1, 0])
assert_array_equal(ret, [2, 2, 3, 4, 0, 1])
le = LabelEncoder()
ret = le.fit_transform(["paris", "paris", "tokyo", "amsterdam"])
assert_array_equal(ret, [1, 1, 2, 0])
def test_label_encoder_errors():
# Check that invalid arguments yield ValueError
le = LabelEncoder()
assert_raises(ValueError, le.transform, [])
assert_raises(ValueError, le.inverse_transform, [])
# Fail on unseen labels
le = LabelEncoder()
le.fit([1, 2, 3, 1, -1])
assert_raises(ValueError, le.inverse_transform, [-1])
def test_sparse_output_multilabel_binarizer():
# test input as iterable of iterables
inputs = [
lambda: [(2, 3), (1,), (1, 2)],
lambda: (set([2, 3]), set([1]), set([1, 2])),
lambda: iter([iter((2, 3)), iter((1,)), set([1, 2])]),
]
indicator_mat = np.array([[0, 1, 1],
[1, 0, 0],
[1, 1, 0]])
inverse = inputs[0]()
for sparse_output in [True, False]:
for inp in inputs:
# With fit_tranform
mlb = MultiLabelBinarizer(sparse_output=sparse_output)
got = mlb.fit_transform(inp())
assert_equal(issparse(got), sparse_output)
if sparse_output:
got = got.toarray()
assert_array_equal(indicator_mat, got)
assert_array_equal([1, 2, 3], mlb.classes_)
assert_equal(mlb.inverse_transform(got), inverse)
# With fit
mlb = MultiLabelBinarizer(sparse_output=sparse_output)
got = mlb.fit(inp()).transform(inp())
assert_equal(issparse(got), sparse_output)
if sparse_output:
got = got.toarray()
assert_array_equal(indicator_mat, got)
assert_array_equal([1, 2, 3], mlb.classes_)
assert_equal(mlb.inverse_transform(got), inverse)
assert_raises(ValueError, mlb.inverse_transform,
csr_matrix(np.array([[0, 1, 1],
[2, 0, 0],
[1, 1, 0]])))
def test_multilabel_binarizer():
# test input as iterable of iterables
inputs = [
lambda: [(2, 3), (1,), (1, 2)],
lambda: (set([2, 3]), set([1]), set([1, 2])),
lambda: iter([iter((2, 3)), iter((1,)), set([1, 2])]),
]
indicator_mat = np.array([[0, 1, 1],
[1, 0, 0],
[1, 1, 0]])
inverse = inputs[0]()
for inp in inputs:
# With fit_tranform
mlb = MultiLabelBinarizer()
got = mlb.fit_transform(inp())
assert_array_equal(indicator_mat, got)
assert_array_equal([1, 2, 3], mlb.classes_)
assert_equal(mlb.inverse_transform(got), inverse)
# With fit
mlb = MultiLabelBinarizer()
got = mlb.fit(inp()).transform(inp())
assert_array_equal(indicator_mat, got)
assert_array_equal([1, 2, 3], mlb.classes_)
assert_equal(mlb.inverse_transform(got), inverse)
def test_multilabel_binarizer_empty_sample():
mlb = MultiLabelBinarizer()
y = [[1, 2], [1], []]
Y = np.array([[1, 1],
[1, 0],
[0, 0]])
assert_array_equal(mlb.fit_transform(y), Y)
def test_multilabel_binarizer_unknown_class():
mlb = MultiLabelBinarizer()
y = [[1, 2]]
assert_raises(KeyError, mlb.fit(y).transform, [[0]])
mlb = MultiLabelBinarizer(classes=[1, 2])
assert_raises(KeyError, mlb.fit_transform, [[0]])
def test_multilabel_binarizer_given_classes():
inp = [(2, 3), (1,), (1, 2)]
indicator_mat = np.array([[0, 1, 1],
[1, 0, 0],
[1, 0, 1]])
# fit_transform()
mlb = MultiLabelBinarizer(classes=[1, 3, 2])
assert_array_equal(mlb.fit_transform(inp), indicator_mat)
assert_array_equal(mlb.classes_, [1, 3, 2])
# fit().transform()
mlb = MultiLabelBinarizer(classes=[1, 3, 2])
assert_array_equal(mlb.fit(inp).transform(inp), indicator_mat)
assert_array_equal(mlb.classes_, [1, 3, 2])
# ensure works with extra class
mlb = MultiLabelBinarizer(classes=[4, 1, 3, 2])
assert_array_equal(mlb.fit_transform(inp),
np.hstack(([[0], [0], [0]], indicator_mat)))
assert_array_equal(mlb.classes_, [4, 1, 3, 2])
# ensure fit is no-op as iterable is not consumed
inp = iter(inp)
mlb = MultiLabelBinarizer(classes=[1, 3, 2])
assert_array_equal(mlb.fit(inp).transform(inp), indicator_mat)
def test_multilabel_binarizer_same_length_sequence():
# Ensure sequences of the same length are not interpreted as a 2-d array
inp = [[1], [0], [2]]
indicator_mat = np.array([[0, 1, 0],
[1, 0, 0],
[0, 0, 1]])
# fit_transform()
mlb = MultiLabelBinarizer()
assert_array_equal(mlb.fit_transform(inp), indicator_mat)
assert_array_equal(mlb.inverse_transform(indicator_mat), inp)
# fit().transform()
mlb = MultiLabelBinarizer()
assert_array_equal(mlb.fit(inp).transform(inp), indicator_mat)
assert_array_equal(mlb.inverse_transform(indicator_mat), inp)
def test_multilabel_binarizer_non_integer_labels():
tuple_classes = np.empty(3, dtype=object)
tuple_classes[:] = [(1,), (2,), (3,)]
inputs = [
([('2', '3'), ('1',), ('1', '2')], ['1', '2', '3']),
([('b', 'c'), ('a',), ('a', 'b')], ['a', 'b', 'c']),
([((2,), (3,)), ((1,),), ((1,), (2,))], tuple_classes),
]
indicator_mat = np.array([[0, 1, 1],
[1, 0, 0],
[1, 1, 0]])
for inp, classes in inputs:
# fit_transform()
mlb = MultiLabelBinarizer()
assert_array_equal(mlb.fit_transform(inp), indicator_mat)
assert_array_equal(mlb.classes_, classes)
assert_array_equal(mlb.inverse_transform(indicator_mat), inp)
# fit().transform()
mlb = MultiLabelBinarizer()
assert_array_equal(mlb.fit(inp).transform(inp), indicator_mat)
assert_array_equal(mlb.classes_, classes)
assert_array_equal(mlb.inverse_transform(indicator_mat), inp)
mlb = MultiLabelBinarizer()
assert_raises(TypeError, mlb.fit_transform, [({}), ({}, {'a': 'b'})])
def test_multilabel_binarizer_non_unique():
inp = [(1, 1, 1, 0)]
indicator_mat = np.array([[1, 1]])
mlb = MultiLabelBinarizer()
assert_array_equal(mlb.fit_transform(inp), indicator_mat)
def test_multilabel_binarizer_inverse_validation():
inp = [(1, 1, 1, 0)]
mlb = MultiLabelBinarizer()
mlb.fit_transform(inp)
# Not binary
assert_raises(ValueError, mlb.inverse_transform, np.array([[1, 3]]))
# The following binary cases are fine, however
mlb.inverse_transform(np.array([[0, 0]]))
mlb.inverse_transform(np.array([[1, 1]]))
mlb.inverse_transform(np.array([[1, 0]]))
# Wrong shape
assert_raises(ValueError, mlb.inverse_transform, np.array([[1]]))
assert_raises(ValueError, mlb.inverse_transform, np.array([[1, 1, 1]]))
def test_label_binarize_with_class_order():
out = label_binarize([1, 6], classes=[1, 2, 4, 6])
expected = np.array([[1, 0, 0, 0], [0, 0, 0, 1]])
assert_array_equal(out, expected)
# Modified class order
out = label_binarize([1, 6], classes=[1, 6, 4, 2])
expected = np.array([[1, 0, 0, 0], [0, 1, 0, 0]])
assert_array_equal(out, expected)
out = label_binarize([0, 1, 2, 3], classes=[3, 2, 0, 1])
expected = np.array([[0, 0, 1, 0],
[0, 0, 0, 1],
[0, 1, 0, 0],
[1, 0, 0, 0]])
assert_array_equal(out, expected)
def check_binarized_results(y, classes, pos_label, neg_label, expected):
for sparse_output in [True, False]:
if ((pos_label == 0 or neg_label != 0) and sparse_output):
assert_raises(ValueError, label_binarize, y, classes,
neg_label=neg_label, pos_label=pos_label,
sparse_output=sparse_output)
continue
# check label_binarize
binarized = label_binarize(y, classes, neg_label=neg_label,
pos_label=pos_label,
sparse_output=sparse_output)
assert_array_equal(toarray(binarized), expected)
assert_equal(issparse(binarized), sparse_output)
# check inverse
y_type = type_of_target(y)
if y_type == "multiclass":
inversed = _inverse_binarize_multiclass(binarized, classes=classes)
else:
inversed = _inverse_binarize_thresholding(binarized,
output_type=y_type,
classes=classes,
threshold=((neg_label +
pos_label) /
2.))
assert_array_equal(toarray(inversed), toarray(y))
# Check label binarizer
lb = LabelBinarizer(neg_label=neg_label, pos_label=pos_label,
sparse_output=sparse_output)
binarized = lb.fit_transform(y)
assert_array_equal(toarray(binarized), expected)
assert_equal(issparse(binarized), sparse_output)
inverse_output = lb.inverse_transform(binarized)
assert_array_equal(toarray(inverse_output), toarray(y))
assert_equal(issparse(inverse_output), issparse(y))
def test_label_binarize_binary():
y = [0, 1, 0]
classes = [0, 1]
pos_label = 2
neg_label = -1
expected = np.array([[2, -1], [-1, 2], [2, -1]])[:, 1].reshape((-1, 1))
yield check_binarized_results, y, classes, pos_label, neg_label, expected
# Binary case where sparse_output = True will not result in a ValueError
y = [0, 1, 0]
classes = [0, 1]
pos_label = 3
neg_label = 0
expected = np.array([[3, 0], [0, 3], [3, 0]])[:, 1].reshape((-1, 1))
yield check_binarized_results, y, classes, pos_label, neg_label, expected
def test_label_binarize_multiclass():
y = [0, 1, 2]
classes = [0, 1, 2]
pos_label = 2
neg_label = 0
expected = 2 * np.eye(3)
yield check_binarized_results, y, classes, pos_label, neg_label, expected
assert_raises(ValueError, label_binarize, y, classes, neg_label=-1,
pos_label=pos_label, sparse_output=True)
def test_label_binarize_multilabel():
y_ind = np.array([[0, 1, 0], [1, 1, 1], [0, 0, 0]])
classes = [0, 1, 2]
pos_label = 2
neg_label = 0
expected = pos_label * y_ind
y_sparse = [sparse_matrix(y_ind)
for sparse_matrix in [coo_matrix, csc_matrix, csr_matrix,
dok_matrix, lil_matrix]]
for y in [y_ind] + y_sparse:
yield (check_binarized_results, y, classes, pos_label, neg_label,
expected)
assert_raises(ValueError, label_binarize, y, classes, neg_label=-1,
pos_label=pos_label, sparse_output=True)
def test_invalid_input_label_binarize():
assert_raises(ValueError, label_binarize, [0, 2], classes=[0, 2],
pos_label=0, neg_label=1)
def test_inverse_binarize_multiclass():
got = _inverse_binarize_multiclass(csr_matrix([[0, 1, 0],
[-1, 0, -1],
[0, 0, 0]]),
np.arange(3))
assert_array_equal(got, np.array([1, 1, 0]))
| bsd-3-clause |
nmartensen/pandas | pandas/tests/frame/test_rank.py | 5 | 9430 | # -*- coding: utf-8 -*-
import pytest
from datetime import timedelta, datetime
from distutils.version import LooseVersion
from numpy import nan
import numpy as np
from pandas import Series, DataFrame
from pandas.compat import product
from pandas.util.testing import assert_frame_equal
import pandas.util.testing as tm
from pandas.tests.frame.common import TestData
class TestRank(TestData):
s = Series([1, 3, 4, 2, nan, 2, 1, 5, nan, 3])
df = DataFrame({'A': s, 'B': s})
results = {
'average': np.array([1.5, 5.5, 7.0, 3.5, nan,
3.5, 1.5, 8.0, nan, 5.5]),
'min': np.array([1, 5, 7, 3, nan, 3, 1, 8, nan, 5]),
'max': np.array([2, 6, 7, 4, nan, 4, 2, 8, nan, 6]),
'first': np.array([1, 5, 7, 3, nan, 4, 2, 8, nan, 6]),
'dense': np.array([1, 3, 4, 2, nan, 2, 1, 5, nan, 3]),
}
def test_rank(self):
rankdata = pytest.importorskip('scipy.stats.rankdata')
self.frame['A'][::2] = np.nan
self.frame['B'][::3] = np.nan
self.frame['C'][::4] = np.nan
self.frame['D'][::5] = np.nan
ranks0 = self.frame.rank()
ranks1 = self.frame.rank(1)
mask = np.isnan(self.frame.values)
fvals = self.frame.fillna(np.inf).values
exp0 = np.apply_along_axis(rankdata, 0, fvals)
exp0[mask] = np.nan
exp1 = np.apply_along_axis(rankdata, 1, fvals)
exp1[mask] = np.nan
tm.assert_almost_equal(ranks0.values, exp0)
tm.assert_almost_equal(ranks1.values, exp1)
# integers
df = DataFrame(np.random.randint(0, 5, size=40).reshape((10, 4)))
result = df.rank()
exp = df.astype(float).rank()
tm.assert_frame_equal(result, exp)
result = df.rank(1)
exp = df.astype(float).rank(1)
tm.assert_frame_equal(result, exp)
def test_rank2(self):
df = DataFrame([[1, 3, 2], [1, 2, 3]])
expected = DataFrame([[1.0, 3.0, 2.0], [1, 2, 3]]) / 3.0
result = df.rank(1, pct=True)
tm.assert_frame_equal(result, expected)
df = DataFrame([[1, 3, 2], [1, 2, 3]])
expected = df.rank(0) / 2.0
result = df.rank(0, pct=True)
tm.assert_frame_equal(result, expected)
df = DataFrame([['b', 'c', 'a'], ['a', 'c', 'b']])
expected = DataFrame([[2.0, 3.0, 1.0], [1, 3, 2]])
result = df.rank(1, numeric_only=False)
tm.assert_frame_equal(result, expected)
expected = DataFrame([[2.0, 1.5, 1.0], [1, 1.5, 2]])
result = df.rank(0, numeric_only=False)
tm.assert_frame_equal(result, expected)
df = DataFrame([['b', np.nan, 'a'], ['a', 'c', 'b']])
expected = DataFrame([[2.0, nan, 1.0], [1.0, 3.0, 2.0]])
result = df.rank(1, numeric_only=False)
tm.assert_frame_equal(result, expected)
expected = DataFrame([[2.0, nan, 1.0], [1.0, 1.0, 2.0]])
result = df.rank(0, numeric_only=False)
tm.assert_frame_equal(result, expected)
# f7u12, this does not work without extensive workaround
data = [[datetime(2001, 1, 5), nan, datetime(2001, 1, 2)],
[datetime(2000, 1, 2), datetime(2000, 1, 3),
datetime(2000, 1, 1)]]
df = DataFrame(data)
# check the rank
expected = DataFrame([[2., nan, 1.],
[2., 3., 1.]])
result = df.rank(1, numeric_only=False, ascending=True)
tm.assert_frame_equal(result, expected)
expected = DataFrame([[1., nan, 2.],
[2., 1., 3.]])
result = df.rank(1, numeric_only=False, ascending=False)
tm.assert_frame_equal(result, expected)
# mixed-type frames
self.mixed_frame['datetime'] = datetime.now()
self.mixed_frame['timedelta'] = timedelta(days=1, seconds=1)
result = self.mixed_frame.rank(1)
expected = self.mixed_frame.rank(1, numeric_only=True)
tm.assert_frame_equal(result, expected)
df = DataFrame({"a": [1e-20, -5, 1e-20 + 1e-40, 10,
1e60, 1e80, 1e-30]})
exp = DataFrame({"a": [3.5, 1., 3.5, 5., 6., 7., 2.]})
tm.assert_frame_equal(df.rank(), exp)
def test_rank_na_option(self):
rankdata = pytest.importorskip('scipy.stats.rankdata')
self.frame['A'][::2] = np.nan
self.frame['B'][::3] = np.nan
self.frame['C'][::4] = np.nan
self.frame['D'][::5] = np.nan
# bottom
ranks0 = self.frame.rank(na_option='bottom')
ranks1 = self.frame.rank(1, na_option='bottom')
fvals = self.frame.fillna(np.inf).values
exp0 = np.apply_along_axis(rankdata, 0, fvals)
exp1 = np.apply_along_axis(rankdata, 1, fvals)
tm.assert_almost_equal(ranks0.values, exp0)
tm.assert_almost_equal(ranks1.values, exp1)
# top
ranks0 = self.frame.rank(na_option='top')
ranks1 = self.frame.rank(1, na_option='top')
fval0 = self.frame.fillna((self.frame.min() - 1).to_dict()).values
fval1 = self.frame.T
fval1 = fval1.fillna((fval1.min() - 1).to_dict()).T
fval1 = fval1.fillna(np.inf).values
exp0 = np.apply_along_axis(rankdata, 0, fval0)
exp1 = np.apply_along_axis(rankdata, 1, fval1)
tm.assert_almost_equal(ranks0.values, exp0)
tm.assert_almost_equal(ranks1.values, exp1)
# descending
# bottom
ranks0 = self.frame.rank(na_option='top', ascending=False)
ranks1 = self.frame.rank(1, na_option='top', ascending=False)
fvals = self.frame.fillna(np.inf).values
exp0 = np.apply_along_axis(rankdata, 0, -fvals)
exp1 = np.apply_along_axis(rankdata, 1, -fvals)
tm.assert_almost_equal(ranks0.values, exp0)
tm.assert_almost_equal(ranks1.values, exp1)
# descending
# top
ranks0 = self.frame.rank(na_option='bottom', ascending=False)
ranks1 = self.frame.rank(1, na_option='bottom', ascending=False)
fval0 = self.frame.fillna((self.frame.min() - 1).to_dict()).values
fval1 = self.frame.T
fval1 = fval1.fillna((fval1.min() - 1).to_dict()).T
fval1 = fval1.fillna(np.inf).values
exp0 = np.apply_along_axis(rankdata, 0, -fval0)
exp1 = np.apply_along_axis(rankdata, 1, -fval1)
tm.assert_numpy_array_equal(ranks0.values, exp0)
tm.assert_numpy_array_equal(ranks1.values, exp1)
def test_rank_axis(self):
# check if using axes' names gives the same result
df = DataFrame([[2, 1], [4, 3]])
tm.assert_frame_equal(df.rank(axis=0), df.rank(axis='index'))
tm.assert_frame_equal(df.rank(axis=1), df.rank(axis='columns'))
def test_rank_methods_frame(self):
pytest.importorskip('scipy.stats.special')
rankdata = pytest.importorskip('scipy.stats.rankdata')
import scipy
xs = np.random.randint(0, 21, (100, 26))
xs = (xs - 10.0) / 10.0
cols = [chr(ord('z') - i) for i in range(xs.shape[1])]
for vals in [xs, xs + 1e6, xs * 1e-6]:
df = DataFrame(vals, columns=cols)
for ax in [0, 1]:
for m in ['average', 'min', 'max', 'first', 'dense']:
result = df.rank(axis=ax, method=m)
sprank = np.apply_along_axis(
rankdata, ax, vals,
m if m != 'first' else 'ordinal')
sprank = sprank.astype(np.float64)
expected = DataFrame(sprank, columns=cols)
if LooseVersion(scipy.__version__) >= '0.17.0':
expected = expected.astype('float64')
tm.assert_frame_equal(result, expected)
def test_rank_descending(self):
dtypes = ['O', 'f8', 'i8']
for dtype, method in product(dtypes, self.results):
if 'i' in dtype:
df = self.df.dropna()
else:
df = self.df.astype(dtype)
res = df.rank(ascending=False)
expected = (df.max() - df).rank()
assert_frame_equal(res, expected)
if method == 'first' and dtype == 'O':
continue
expected = (df.max() - df).rank(method=method)
if dtype != 'O':
res2 = df.rank(method=method, ascending=False,
numeric_only=True)
assert_frame_equal(res2, expected)
res3 = df.rank(method=method, ascending=False,
numeric_only=False)
assert_frame_equal(res3, expected)
def test_rank_2d_tie_methods(self):
df = self.df
def _check2d(df, expected, method='average', axis=0):
exp_df = DataFrame({'A': expected, 'B': expected})
if axis == 1:
df = df.T
exp_df = exp_df.T
result = df.rank(method=method, axis=axis)
assert_frame_equal(result, exp_df)
dtypes = [None, object]
disabled = set([(object, 'first')])
results = self.results
for method, axis, dtype in product(results, [0, 1], dtypes):
if (dtype, method) in disabled:
continue
frame = df if dtype is None else df.astype(dtype)
_check2d(frame, results[method], method=method, axis=axis)
| bsd-3-clause |
HyukjinKwon/spark | python/pyspark/pandas/indexes/__init__.py | 16 | 1065 | #
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from pyspark.pandas.indexes.base import Index # noqa: F401
from pyspark.pandas.indexes.datetimes import DatetimeIndex # noqa: F401
from pyspark.pandas.indexes.multi import MultiIndex # noqa: F401
from pyspark.pandas.indexes.numeric import Float64Index, Int64Index # noqa: F401
| apache-2.0 |
QuantumFractal/hackisu-2017-nucleus | conv-ae/preprocess.py | 1 | 1669 | import matplotlib.pyplot as plt
import numpy as np
from patcher import Patcher
import os
import random
from sklearn.feature_extraction import image
import h5py
import gc
import sys
#dataset = sys.argv[1]
savename = sys.argv[1]
#data_path = 'C:\\nucleus\\' + dataset
data_path = 'C:\\Users\\micha\\Pictures\\Kinect_Capture_005450'
patch_size = 200
imgs = os.listdir(data_path)
x_train = None
x_train_tmp = []
y_train = None
y_train_tmp = []
first = True
# Patching data
for train_img in imgs:
if train_img.endswith('d.jpg'):
continue
train_lbl = train_img.replace('c.jpg', 'd.jpg')
print('{} -> {}'.format(train_img, train_lbl))
train_img = data_path + '\\' + train_img
train_lbl = data_path + '\\' + train_lbl
patcher = Patcher.from_image(train_img, train_lbl, _dim=(patch_size, patch_size), _stride=(128, 128))
patches, labels = patcher.patchify()
x_train_tmp = x_train_tmp + patches
y_train_tmp = y_train_tmp + labels
# if len(x_train_tmp) > 1:
# if first:
# x_train = np.array(x_train_tmp)
# y_train = np.array(y_train_tmp)
# first = False
# else:
# x_train = np.concatenate((x_train, np.array(x_train_tmp)))
# y_train = np.concatenate((y_train, np.array(y_train_tmp)))
# x_train_tmp = []
# y_train_tmp = []
patcher = None
gc.collect()
x_train = np.array(x_train_tmp)
y_train = np.array(y_train_tmp)
with h5py.File(savename, "w") as f:
f.create_dataset('x_train', data=x_train)
f.create_dataset('y_train', data=y_train)
| mit |
tdopires/forest-cover-group6 | analysis/visualize.py | 1 | 5749 | # Some code for visualization of the raw data.
import pandas as pd
from sklearn import ensemble
import pylab
import math
def plot_scatter_all(df_train, columns):
"""
Makes scatter plots of all combinations of quantative features.
"""
# Get a feature for the x-axis
for i in xrange(1,11):
count = 0
# Make subplots for all other features.
for j in xrange(1,11):
if i != j:
x = df_train[columns[i]]
y = df_train[columns[j]]
z = df_train['Cover_Type']
# Make a subplot on position of count.
count += 1
pylab.subplot(3,3,count)
pylab.scatter(x,y, c=z)
pylab.xlabel(columns[i])
pylab.ylabel(columns[j])
# Show plot.
pylab.suptitle("Scatter plots of " + columns[i])
pylab.show()
def plot_hist(df_train, columns):
"""
Makes histograms of all quantative features.
"""
for i in xrange(1,11):
type_1 = []
type_2 = []
type_3 = []
type_4 = []
type_5 = []
type_6 = []
type_7 = []
# Make different variables for all the different cover types.
for j in xrange(len(df_train['Cover_Type'])):
cover_type = df_train['Cover_Type'][j]
instance = df_train[columns[i]][j]
if cover_type == 1:
type_1.append(instance)
elif cover_type == 2:
type_2.append(instance)
elif cover_type == 3:
type_3.append(instance)
elif cover_type == 4:
type_4.append(instance)
elif cover_type == 5:
type_5.append(instance)
elif cover_type == 6:
type_6.append(instance)
elif cover_type == 7:
type_7.append(instance)
names = ["Spruce/Fir", "Lodgepole Pine", "Ponderosa Pine", "Cottonwood/Willow", "Aspen", "Douglas-fir", "Krummholz"]
colors = ["navy", "lime", "saddlebrown", "royalblue", "darkred", "darkorange", "forestgreen"]
# Plot the histogram.
pylab.hist([type_1, type_2, type_3, type_4, type_5, type_6, type_7], 50, label=names, stacked=True, color=colors)
pylab.xlabel(columns[i])
pylab.ylabel("Frequency")
pylab.legend()
pylab.show()
def entropy1(df_train, columns, n_bins):
"""
Calculates entropy of quantative features.
n_bins: number of bins
"""
for i in xrange(1,11):
# Get feature and zip it with cover type.
feature = df_train[columns[i]]
cover_type = df_train['Cover_Type']
together = zip(feature,cover_type)
sorted_together = sorted(together)
# Split in even bins.
bin = 15120/float(n_bins)
splitted_together = split(sorted_together, int(bin))
# For each bin, get the entropy.
total_entropy = []
for j in xrange(0,n_bins):
# Get cover_types in bin and calculate probability.
cover_type_bin = [x[1] for x in splitted_together[j]]
probs = []
for value in xrange(1,8):
prob = cover_type_bin.count(value)/bin
probs.append(prob)
# Calculate entropy.
entropy_bin = []
for p in probs:
if p == 0.0:
ent = 0
else:
ent = -p*math.log(p,2)
entropy_bin.append(ent)
# Correct for number of bins.
total_entropy.append((1/float(n_bins))*sum(entropy_bin))
print sum(total_entropy), columns[i]
def entropy2(df_train, columns):
"""
Calculates entropy of the two categorical features.
"""
for i in xrange(11,13):
# Get feature and zip it with cover type.
feature = df_train[columns[i]]
cover_type = df_train['Cover_Type']
together = zip(feature,cover_type)
sorted_together = sorted(together)
# Split based on possible values.
bins = []
for val in xrange(1,(max(feature)+1)):
bin = [x for x in together if x[0] == val]
bins.append(bin)
# For each bin, get the entropy.
total_entropy = []
for j in xrange(0,len(bins)):
# Get cover_types in bin and calculate probability.
cover_type_bin = [x[1] for x in bins[j]]
# Set 1.0 in case of empty bin.
if len(cover_type_bin) == 0:
length = 1.0
else:
length = float(len(cover_type_bin))
probs = []
for value in xrange(1,8):
prob = cover_type_bin.count(value)/length
probs.append(prob)
# Calculate entropy.
entropy_bin = []
for p in probs:
if p == 0.0:
ent = 0
else:
ent = -p*math.log(p,2)
entropy_bin.append(ent)
# Correct for number of bins.
total_entropy.append((1/float(len(bins)))*sum(entropy_bin))
print sum(total_entropy), columns[i]
def split(l,n):
"""
Splits list in even bins.
"""
return [l[i:i+n] for i in xrange(0, 15120, n)]
if __name__ == "__main__":
loc_train = "../data/train_normalised.csv"
# Read data and get columns.
df_train = pd.read_csv(loc_train)
columns = df_train.columns
#plot_scatter_all(df_train, columns)
#plot_hist(df_train, columns)
entropy1(df_train, columns, 14)
entropy2(df_train, columns)
| apache-2.0 |
will-cromar/needy | ProofOfConcept/sandbox.py | 1 | 1501 | from price_parsing import *
from regression_models import *
from regression_graphs import *
from util import *
from sklearn import tree, svm
#Will's example code
#Testing code. Run this once to make sure it works
def doesItWork():
obj1 = "Less recent"
obj2 = "More recent"
savePickle(obj1, "stringtest")
time.sleep(20)
savePickle(obj2, "stringtest")
print getMostRecentPickle("stringtest")
def test():
data = getStockPrices("GOOG", frequency="daily")
times, prices = preprocessStocks(data)
dataset = Dataset(times, prices, "The GOOG", graphColor="k", mode="sklearn")
min_samples = len(times) * .025
regs = [("Decision tree", tree.DecisionTreeRegressor(min_samples_leaf=min_samples), "r")]
#("Ouija board", svm.SVR(kernel="poly"), "g")]
regressions = runRegressions(regs, times, prices)
graphRegressionsOverTime(dataset, *regressions)
def test2():
data = getStockPrices("GOOG", frequency="daily")
times, prices = preprocessStocks(data)
dataset = Dataset(times, prices, "The GOOG", graphColor="k", mode="sklearn")
graphRegressionsOverTime(dataset)
def recentTrendTest(ticker):
samples = 50
lookback = 4 * samples
data = getStockPrices(ticker, frequency="daily")
dates, prices = preprocessStocks(data[-lookback:])
dataset = Dataset(dates, prices, mode="preformatted")
recentTrend = graphRecentTrend(dataset, samples)
graphRegressionsOverTime(ticker, dataset, recentTrend, title="")
| mit |
lcharleux/compmod-doc | doc/example_code/models/ring_compression_opti.py | 1 | 5138 | # SOME OPTIMIZATION WITH RING COMPRESSION
from abapy.materials import Hollomon
from compmod.models import RingCompression
from scipy import interpolate
from scipy.optimize import minimize
import matplotlib.pyplot as plt
import numpy as np
import pickle, copy
import platform
node = platform.node()
#FIXED PAREMETERS
settings = {}
settings['file_name'] = 'test_exp.txt'
settings['inner_radius'], settings['outer_radius'] = 45.18 , 50.36
settings['Nt'], settings['Nr'] = 80, 8
settings['displacement'] = 45.
settings['nFrames'] = 100
settings['E'] = 74.e3
settings['nu'] = .3
settings['iteration'] = 15
settings['thickness'] = 20.02
if node == 'lcharleux':
abqlauncher = '/opt/Abaqus/6.9/Commands/abaqus' # Local machine configuration
workdir = "workdir/"
if node == 'epua-pd47':
abqlauncher = 'C:/SIMULIA/Abaqus/6.11-2/exec/abq6112.exe' # Local machine configuration
workdir = "D:/Simulations/Dossier_travail_Abaqus/"
label = "ringCompressionOpti"
elType = "CPE4"
cpus = 6
def read_file(file_name):
'''
Read a two rows data file and converts it to numbers
'''
f = open(file_name, 'r') # Opening the file
lignes = f.readlines() # Reads all lines one by one and stores them in a list
f.close() # Closing the file
# lignes.pop(0) # Delete le saut de ligne for each lines
force_exp, disp_exp = [],[]
for ligne in lignes:
data = ligne.split() # Lines are splitted
disp_exp.append(float(data[0]))
force_exp.append(float(data[1]))
return -np.array(disp_exp), -np.array(force_exp)
class Simulation(object):
def __init__(self, sy, n, settings):
self.sy = sy
self.n = n
self.settings = settings
def Run(self):
"""
Runs a simulation for a given couple (sy, n) and returns the (disp, force) couple.
"""
#MODEL DEFINITION
sy = self.sy
n = self.n
E = self.settings['E']
nu = self.settings['nu']
inner_radius = self.settings['inner_radius']
outer_radius = self.settings['outer_radius']
disp = self.settings['displacement']/2.
nFrames = self.settings['nFrames']
Nr = self.settings['Nr']
Nt = self.settings['Nt']
thickness = self.settings['thickness']
print E, nu, sy, n
material = Hollomon(
labels = "SAMPLE_MAT",
E = E, nu = nu,
sy = sy, n = n)
m = RingCompression(
material = material ,
inner_radius = inner_radius,
outer_radius = outer_radius,
disp = disp,
nFrames = nFrames,
thickness = thickness,
Nr = Nr,
Nt = Nt,
workdir = workdir,
label = label,
elType = elType,
abqlauncher = abqlauncher,
cpus = cpus)
# SIMULATION
m.MakeMesh()
m.MakeInp()
m.Run()
m.PostProc()
outputs = m.outputs
force = -2. * outputs['history']['force']
disp = -2 * outputs['history']['disp']
self.disp = disp
self.force = force
def Interp(self):
"""
Interpolate the curve Force-displacement on a known grid
"""
disp, force = self.disp, self.force
f = interpolate.interp1d(disp.data[0], force.data[0])
return f
class Opti(object):
def __init__(self, sy0, n0, settings):
self.sy0 = sy0
self.n0 = n0
self.settings = settings
self.sy = []
self.n = []
self.err = []
self.force_sim = []
disp_exp, force_exp = read_file(self.settings['file_name'])
g = interpolate.interp1d(disp_exp, force_exp)
self.disp_exp = disp_exp
self.force_exp = force_exp
self.g = g
def Err(self, param):
"""
Compute the residual error between experimental and simulated curve
"""
sy = param[0]
n =param[1]
s = Simulation(sy, n ,self.settings)
s.Run()
f = s.Interp()
d = self.settings['displacement']
disp = np.linspace(0., d, 100)
force_sim = f(disp)
g = self.g
force_exp = g(disp)
err = np.sqrt(((force_exp - force_sim)**2).sum())
self.sy.append(sy)
self.n.append(n)
self.err.append(err)
self.force_sim.append(force_sim)
self.disp = disp
self.force_exp = force_exp
return err
def Optimize(self):
p0 = [self.sy0, self.n0]
result = minimize(self.Err, p0, method='nelder-mead', options={'disp':True, 'maxiter':settings['iteration']})
self.result = result
O = Opti(130., 0.1, settings)
O.Optimize()
fig = plt.figure('Load vs. disp')
plt.clf()
plt.plot(O.disp, O.force_exp, 'k-', label = 'experimental curve', linewidth = 2.)
plt.plot(O.disp, O.force_sim[0], 'g-', label = 'initial curve', linewidth = 2.)
a = O.err
index = np.argmin(a)
plt.plot(O.disp, O.force_sim[index], 'r-', label = 'optimized curve', linewidth = 2.)
for i in range(1, settings['iteration']):
plt.plot(O.disp, O.force_sim[i], 'b-', linewidth = .2)
#plt.plot(disp.data[1], force.data[1], 'b-', label = 'Unloading', linewidth = 2.)
plt.legend(loc="lower right")
plt.grid()
plt.xlabel('Displacement, $U$')
plt.ylabel('Force, $F$')
plt.savefig(workdir + label + '_load-vs-disp.pdf')
#print s.force.data[0]
"""
f = s.Interp()
x = np.arange(0., 49., 0.1)
print f(x)
"""
| gpl-2.0 |
kylerbrown/scikit-learn | benchmarks/bench_random_projections.py | 397 | 8900 | """
===========================
Random projection benchmark
===========================
Benchmarks for random projections.
"""
from __future__ import division
from __future__ import print_function
import gc
import sys
import optparse
from datetime import datetime
import collections
import numpy as np
import scipy.sparse as sp
from sklearn import clone
from sklearn.externals.six.moves import xrange
from sklearn.random_projection import (SparseRandomProjection,
GaussianRandomProjection,
johnson_lindenstrauss_min_dim)
def type_auto_or_float(val):
if val == "auto":
return "auto"
else:
return float(val)
def type_auto_or_int(val):
if val == "auto":
return "auto"
else:
return int(val)
def compute_time(t_start, delta):
mu_second = 0.0 + 10 ** 6 # number of microseconds in a second
return delta.seconds + delta.microseconds / mu_second
def bench_scikit_transformer(X, transfomer):
gc.collect()
clf = clone(transfomer)
# start time
t_start = datetime.now()
clf.fit(X)
delta = (datetime.now() - t_start)
# stop time
time_to_fit = compute_time(t_start, delta)
# start time
t_start = datetime.now()
clf.transform(X)
delta = (datetime.now() - t_start)
# stop time
time_to_transform = compute_time(t_start, delta)
return time_to_fit, time_to_transform
# Make some random data with uniformly located non zero entries with
# Gaussian distributed values
def make_sparse_random_data(n_samples, n_features, n_nonzeros,
random_state=None):
rng = np.random.RandomState(random_state)
data_coo = sp.coo_matrix(
(rng.randn(n_nonzeros),
(rng.randint(n_samples, size=n_nonzeros),
rng.randint(n_features, size=n_nonzeros))),
shape=(n_samples, n_features))
return data_coo.toarray(), data_coo.tocsr()
def print_row(clf_type, time_fit, time_transform):
print("%s | %s | %s" % (clf_type.ljust(30),
("%.4fs" % time_fit).center(12),
("%.4fs" % time_transform).center(12)))
if __name__ == "__main__":
###########################################################################
# Option parser
###########################################################################
op = optparse.OptionParser()
op.add_option("--n-times",
dest="n_times", default=5, type=int,
help="Benchmark results are average over n_times experiments")
op.add_option("--n-features",
dest="n_features", default=10 ** 4, type=int,
help="Number of features in the benchmarks")
op.add_option("--n-components",
dest="n_components", default="auto",
help="Size of the random subspace."
" ('auto' or int > 0)")
op.add_option("--ratio-nonzeros",
dest="ratio_nonzeros", default=10 ** -3, type=float,
help="Number of features in the benchmarks")
op.add_option("--n-samples",
dest="n_samples", default=500, type=int,
help="Number of samples in the benchmarks")
op.add_option("--random-seed",
dest="random_seed", default=13, type=int,
help="Seed used by the random number generators.")
op.add_option("--density",
dest="density", default=1 / 3,
help="Density used by the sparse random projection."
" ('auto' or float (0.0, 1.0]")
op.add_option("--eps",
dest="eps", default=0.5, type=float,
help="See the documentation of the underlying transformers.")
op.add_option("--transformers",
dest="selected_transformers",
default='GaussianRandomProjection,SparseRandomProjection',
type=str,
help="Comma-separated list of transformer to benchmark. "
"Default: %default. Available: "
"GaussianRandomProjection,SparseRandomProjection")
op.add_option("--dense",
dest="dense",
default=False,
action="store_true",
help="Set input space as a dense matrix.")
(opts, args) = op.parse_args()
if len(args) > 0:
op.error("this script takes no arguments.")
sys.exit(1)
opts.n_components = type_auto_or_int(opts.n_components)
opts.density = type_auto_or_float(opts.density)
selected_transformers = opts.selected_transformers.split(',')
###########################################################################
# Generate dataset
###########################################################################
n_nonzeros = int(opts.ratio_nonzeros * opts.n_features)
print('Dataset statics')
print("===========================")
print('n_samples \t= %s' % opts.n_samples)
print('n_features \t= %s' % opts.n_features)
if opts.n_components == "auto":
print('n_components \t= %s (auto)' %
johnson_lindenstrauss_min_dim(n_samples=opts.n_samples,
eps=opts.eps))
else:
print('n_components \t= %s' % opts.n_components)
print('n_elements \t= %s' % (opts.n_features * opts.n_samples))
print('n_nonzeros \t= %s per feature' % n_nonzeros)
print('ratio_nonzeros \t= %s' % opts.ratio_nonzeros)
print('')
###########################################################################
# Set transformer input
###########################################################################
transformers = {}
###########################################################################
# Set GaussianRandomProjection input
gaussian_matrix_params = {
"n_components": opts.n_components,
"random_state": opts.random_seed
}
transformers["GaussianRandomProjection"] = \
GaussianRandomProjection(**gaussian_matrix_params)
###########################################################################
# Set SparseRandomProjection input
sparse_matrix_params = {
"n_components": opts.n_components,
"random_state": opts.random_seed,
"density": opts.density,
"eps": opts.eps,
}
transformers["SparseRandomProjection"] = \
SparseRandomProjection(**sparse_matrix_params)
###########################################################################
# Perform benchmark
###########################################################################
time_fit = collections.defaultdict(list)
time_transform = collections.defaultdict(list)
print('Benchmarks')
print("===========================")
print("Generate dataset benchmarks... ", end="")
X_dense, X_sparse = make_sparse_random_data(opts.n_samples,
opts.n_features,
n_nonzeros,
random_state=opts.random_seed)
X = X_dense if opts.dense else X_sparse
print("done")
for name in selected_transformers:
print("Perform benchmarks for %s..." % name)
for iteration in xrange(opts.n_times):
print("\titer %s..." % iteration, end="")
time_to_fit, time_to_transform = bench_scikit_transformer(X_dense,
transformers[name])
time_fit[name].append(time_to_fit)
time_transform[name].append(time_to_transform)
print("done")
print("")
###########################################################################
# Print results
###########################################################################
print("Script arguments")
print("===========================")
arguments = vars(opts)
print("%s \t | %s " % ("Arguments".ljust(16),
"Value".center(12),))
print(25 * "-" + ("|" + "-" * 14) * 1)
for key, value in arguments.items():
print("%s \t | %s " % (str(key).ljust(16),
str(value).strip().center(12)))
print("")
print("Transformer performance:")
print("===========================")
print("Results are averaged over %s repetition(s)." % opts.n_times)
print("")
print("%s | %s | %s" % ("Transformer".ljust(30),
"fit".center(12),
"transform".center(12)))
print(31 * "-" + ("|" + "-" * 14) * 2)
for name in sorted(selected_transformers):
print_row(name,
np.mean(time_fit[name]),
np.mean(time_transform[name]))
print("")
print("")
| bsd-3-clause |
RayMick/scikit-learn | examples/linear_model/plot_bayesian_ridge.py | 248 | 2588 | """
=========================
Bayesian Ridge Regression
=========================
Computes a Bayesian Ridge Regression on a synthetic dataset.
See :ref:`bayesian_ridge_regression` for more information on the regressor.
Compared to the OLS (ordinary least squares) estimator, the coefficient
weights are slightly shifted toward zeros, which stabilises them.
As the prior on the weights is a Gaussian prior, the histogram of the
estimated weights is Gaussian.
The estimation of the model is done by iteratively maximizing the
marginal log-likelihood of the observations.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from scipy import stats
from sklearn.linear_model import BayesianRidge, LinearRegression
###############################################################################
# Generating simulated data with Gaussian weigthts
np.random.seed(0)
n_samples, n_features = 100, 100
X = np.random.randn(n_samples, n_features) # Create Gaussian data
# Create weigts with a precision lambda_ of 4.
lambda_ = 4.
w = np.zeros(n_features)
# Only keep 10 weights of interest
relevant_features = np.random.randint(0, n_features, 10)
for i in relevant_features:
w[i] = stats.norm.rvs(loc=0, scale=1. / np.sqrt(lambda_))
# Create noise with a precision alpha of 50.
alpha_ = 50.
noise = stats.norm.rvs(loc=0, scale=1. / np.sqrt(alpha_), size=n_samples)
# Create the target
y = np.dot(X, w) + noise
###############################################################################
# Fit the Bayesian Ridge Regression and an OLS for comparison
clf = BayesianRidge(compute_score=True)
clf.fit(X, y)
ols = LinearRegression()
ols.fit(X, y)
###############################################################################
# Plot true weights, estimated weights and histogram of the weights
plt.figure(figsize=(6, 5))
plt.title("Weights of the model")
plt.plot(clf.coef_, 'b-', label="Bayesian Ridge estimate")
plt.plot(w, 'g-', label="Ground truth")
plt.plot(ols.coef_, 'r--', label="OLS estimate")
plt.xlabel("Features")
plt.ylabel("Values of the weights")
plt.legend(loc="best", prop=dict(size=12))
plt.figure(figsize=(6, 5))
plt.title("Histogram of the weights")
plt.hist(clf.coef_, bins=n_features, log=True)
plt.plot(clf.coef_[relevant_features], 5 * np.ones(len(relevant_features)),
'ro', label="Relevant features")
plt.ylabel("Features")
plt.xlabel("Values of the weights")
plt.legend(loc="lower left")
plt.figure(figsize=(6, 5))
plt.title("Marginal log-likelihood")
plt.plot(clf.scores_)
plt.ylabel("Score")
plt.xlabel("Iterations")
plt.show()
| bsd-3-clause |
kc4271/cuda-convnet2 | convdata.py | 174 | 14675 | # Copyright 2014 Google Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from python_util.data import *
import numpy.random as nr
import numpy as n
import random as r
from time import time
from threading import Thread
from math import sqrt
import sys
#from matplotlib import pylab as pl
from PIL import Image
from StringIO import StringIO
from time import time
import itertools as it
class JPEGBatchLoaderThread(Thread):
def __init__(self, dp, batch_num, label_offset, list_out):
Thread.__init__(self)
self.list_out = list_out
self.label_offset = label_offset
self.dp = dp
self.batch_num = batch_num
@staticmethod
def load_jpeg_batch(rawdics, dp, label_offset):
if type(rawdics) != list:
rawdics = [rawdics]
nc_total = sum(len(r['data']) for r in rawdics)
jpeg_strs = list(it.chain.from_iterable(rd['data'] for rd in rawdics))
labels = list(it.chain.from_iterable(rd['labels'] for rd in rawdics))
img_mat = n.empty((nc_total * dp.data_mult, dp.inner_pixels * dp.num_colors), dtype=n.float32)
lab_mat = n.zeros((nc_total, dp.get_num_classes()), dtype=n.float32)
dp.convnet.libmodel.decodeJpeg(jpeg_strs, img_mat, dp.img_size, dp.inner_size, dp.test, dp.multiview)
lab_vec = n.tile(n.asarray([(l[nr.randint(len(l))] if len(l) > 0 else -1) + label_offset for l in labels], dtype=n.single).reshape((nc_total, 1)), (dp.data_mult,1))
for c in xrange(nc_total):
lab_mat[c, [z + label_offset for z in labels[c]]] = 1
lab_mat = n.tile(lab_mat, (dp.data_mult, 1))
return {'data': img_mat[:nc_total * dp.data_mult,:],
'labvec': lab_vec[:nc_total * dp.data_mult,:],
'labmat': lab_mat[:nc_total * dp.data_mult,:]}
def run(self):
rawdics = self.dp.get_batch(self.batch_num)
p = JPEGBatchLoaderThread.load_jpeg_batch(rawdics,
self.dp,
self.label_offset)
self.list_out.append(p)
class ColorNoiseMakerThread(Thread):
def __init__(self, pca_stdevs, pca_vecs, num_noise, list_out):
Thread.__init__(self)
self.pca_stdevs, self.pca_vecs = pca_stdevs, pca_vecs
self.num_noise = num_noise
self.list_out = list_out
def run(self):
noise = n.dot(nr.randn(self.num_noise, 3).astype(n.single) * self.pca_stdevs.T, self.pca_vecs.T)
self.list_out.append(noise)
class ImageDataProvider(LabeledDataProvider):
def __init__(self, data_dir, batch_range=None, init_epoch=1, init_batchnum=None, dp_params=None, test=False):
LabeledDataProvider.__init__(self, data_dir, batch_range, init_epoch, init_batchnum, dp_params, test)
self.data_mean = self.batch_meta['data_mean'].astype(n.single)
self.color_eig = self.batch_meta['color_pca'][1].astype(n.single)
self.color_stdevs = n.c_[self.batch_meta['color_pca'][0].astype(n.single)]
self.color_noise_coeff = dp_params['color_noise']
self.num_colors = 3
self.img_size = int(sqrt(self.batch_meta['num_vis'] / self.num_colors))
self.mini = dp_params['minibatch_size']
self.inner_size = dp_params['inner_size'] if dp_params['inner_size'] > 0 else self.img_size
self.inner_pixels = self.inner_size **2
self.border_size = (self.img_size - self.inner_size) / 2
self.multiview = dp_params['multiview_test'] and test
self.num_views = 5*2
self.data_mult = self.num_views if self.multiview else 1
self.batch_size = self.batch_meta['batch_size']
self.label_offset = 0 if 'label_offset' not in self.batch_meta else self.batch_meta['label_offset']
self.scalar_mean = dp_params['scalar_mean']
# Maintain pointers to previously-returned data matrices so they don't get garbage collected.
self.data = [None, None] # These are pointers to previously-returned data matrices
self.loader_thread, self.color_noise_thread = None, None
self.convnet = dp_params['convnet']
self.num_noise = self.batch_size
self.batches_generated, self.loaders_started = 0, 0
self.data_mean_crop = self.data_mean.reshape((self.num_colors,self.img_size,self.img_size))[:,self.border_size:self.border_size+self.inner_size,self.border_size:self.border_size+self.inner_size].reshape((1,3*self.inner_size**2))
if self.scalar_mean >= 0:
self.data_mean_crop = self.scalar_mean
def showimg(self, img):
from matplotlib import pylab as pl
pixels = img.shape[0] / 3
size = int(sqrt(pixels))
img = img.reshape((3,size,size)).swapaxes(0,2).swapaxes(0,1)
pl.imshow(img, interpolation='nearest')
pl.show()
def get_data_dims(self, idx=0):
if idx == 0:
return self.inner_size**2 * 3
if idx == 2:
return self.get_num_classes()
return 1
def start_loader(self, batch_idx):
self.load_data = []
self.loader_thread = JPEGBatchLoaderThread(self,
self.batch_range[batch_idx],
self.label_offset,
self.load_data)
self.loader_thread.start()
def start_color_noise_maker(self):
color_noise_list = []
self.color_noise_thread = ColorNoiseMakerThread(self.color_stdevs, self.color_eig, self.num_noise, color_noise_list)
self.color_noise_thread.start()
return color_noise_list
def set_labels(self, datadic):
pass
def get_data_from_loader(self):
if self.loader_thread is None:
self.start_loader(self.batch_idx)
self.loader_thread.join()
self.data[self.d_idx] = self.load_data[0]
self.start_loader(self.get_next_batch_idx())
else:
# Set the argument to join to 0 to re-enable batch reuse
self.loader_thread.join()
if not self.loader_thread.is_alive():
self.data[self.d_idx] = self.load_data[0]
self.start_loader(self.get_next_batch_idx())
#else:
# print "Re-using batch"
self.advance_batch()
def add_color_noise(self):
# At this point the data already has 0 mean.
# So I'm going to add noise to it, but I'm also going to scale down
# the original data. This is so that the overall scale of the training
# data doesn't become too different from the test data.
s = self.data[self.d_idx]['data'].shape
cropped_size = self.get_data_dims(0) / 3
ncases = s[0]
if self.color_noise_thread is None:
self.color_noise_list = self.start_color_noise_maker()
self.color_noise_thread.join()
self.color_noise = self.color_noise_list[0]
self.color_noise_list = self.start_color_noise_maker()
else:
self.color_noise_thread.join(0)
if not self.color_noise_thread.is_alive():
self.color_noise = self.color_noise_list[0]
self.color_noise_list = self.start_color_noise_maker()
self.data[self.d_idx]['data'] = self.data[self.d_idx]['data'].reshape((ncases*3, cropped_size))
self.color_noise = self.color_noise[:ncases,:].reshape((3*ncases, 1))
self.data[self.d_idx]['data'] += self.color_noise * self.color_noise_coeff
self.data[self.d_idx]['data'] = self.data[self.d_idx]['data'].reshape((ncases, 3* cropped_size))
self.data[self.d_idx]['data'] *= 1.0 / (1.0 + self.color_noise_coeff) # <--- NOTE: This is the slow line, 0.25sec. Down from 0.75sec when I used division.
def get_next_batch(self):
self.d_idx = self.batches_generated % 2
epoch, batchnum = self.curr_epoch, self.curr_batchnum
self.get_data_from_loader()
# Subtract mean
self.data[self.d_idx]['data'] -= self.data_mean_crop
if self.color_noise_coeff > 0 and not self.test:
self.add_color_noise()
self.batches_generated += 1
return epoch, batchnum, [self.data[self.d_idx]['data'].T, self.data[self.d_idx]['labvec'].T, self.data[self.d_idx]['labmat'].T]
# Takes as input an array returned by get_next_batch
# Returns a (numCases, imgSize, imgSize, 3) array which can be
# fed to pylab for plotting.
# This is used by shownet.py to plot test case predictions.
def get_plottable_data(self, data, add_mean=True):
mean = self.data_mean_crop.reshape((data.shape[0],1)) if data.flags.f_contiguous or self.scalar_mean else self.data_mean_crop.reshape((data.shape[0],1))
return n.require((data + (mean if add_mean else 0)).T.reshape(data.shape[1], 3, self.inner_size, self.inner_size).swapaxes(1,3).swapaxes(1,2) / 255.0, dtype=n.single)
class CIFARDataProvider(LabeledDataProvider):
def __init__(self, data_dir, batch_range=None, init_epoch=1, init_batchnum=None, dp_params=None, test=False):
LabeledDataProvider.__init__(self, data_dir, batch_range, init_epoch, init_batchnum, dp_params, test)
self.img_size = 32
self.num_colors = 3
self.inner_size = dp_params['inner_size'] if dp_params['inner_size'] > 0 else self.batch_meta['img_size']
self.border_size = (self.img_size - self.inner_size) / 2
self.multiview = dp_params['multiview_test'] and test
self.num_views = 9
self.scalar_mean = dp_params['scalar_mean']
self.data_mult = self.num_views if self.multiview else 1
self.data_dic = []
for i in batch_range:
self.data_dic += [unpickle(self.get_data_file_name(i))]
self.data_dic[-1]["labels"] = n.require(self.data_dic[-1]['labels'], dtype=n.single)
self.data_dic[-1]["labels"] = n.require(n.tile(self.data_dic[-1]["labels"].reshape((1, n.prod(self.data_dic[-1]["labels"].shape))), (1, self.data_mult)), requirements='C')
self.data_dic[-1]['data'] = n.require(self.data_dic[-1]['data'] - self.scalar_mean, dtype=n.single, requirements='C')
self.cropped_data = [n.zeros((self.get_data_dims(), self.data_dic[0]['data'].shape[1]*self.data_mult), dtype=n.single) for x in xrange(2)]
self.batches_generated = 0
self.data_mean = self.batch_meta['data_mean'].reshape((self.num_colors,self.img_size,self.img_size))[:,self.border_size:self.border_size+self.inner_size,self.border_size:self.border_size+self.inner_size].reshape((self.get_data_dims(), 1))
def get_next_batch(self):
epoch, batchnum = self.curr_epoch, self.curr_batchnum
self.advance_batch()
bidx = batchnum - self.batch_range[0]
cropped = self.cropped_data[self.batches_generated % 2]
self.__trim_borders(self.data_dic[bidx]['data'], cropped)
cropped -= self.data_mean
self.batches_generated += 1
return epoch, batchnum, [cropped, self.data_dic[bidx]['labels']]
def get_data_dims(self, idx=0):
return self.inner_size**2 * self.num_colors if idx == 0 else 1
# Takes as input an array returned by get_next_batch
# Returns a (numCases, imgSize, imgSize, 3) array which can be
# fed to pylab for plotting.
# This is used by shownet.py to plot test case predictions.
def get_plottable_data(self, data):
return n.require((data + self.data_mean).T.reshape(data.shape[1], 3, self.inner_size, self.inner_size).swapaxes(1,3).swapaxes(1,2) / 255.0, dtype=n.single)
def __trim_borders(self, x, target):
y = x.reshape(self.num_colors, self.img_size, self.img_size, x.shape[1])
if self.test: # don't need to loop over cases
if self.multiview:
start_positions = [(0,0), (0, self.border_size), (0, self.border_size*2),
(self.border_size, 0), (self.border_size, self.border_size), (self.border_size, self.border_size*2),
(self.border_size*2, 0), (self.border_size*2, self.border_size), (self.border_size*2, self.border_size*2)]
end_positions = [(sy+self.inner_size, sx+self.inner_size) for (sy,sx) in start_positions]
for i in xrange(self.num_views):
target[:,i * x.shape[1]:(i+1)* x.shape[1]] = y[:,start_positions[i][0]:end_positions[i][0],start_positions[i][1]:end_positions[i][1],:].reshape((self.get_data_dims(),x.shape[1]))
else:
pic = y[:,self.border_size:self.border_size+self.inner_size,self.border_size:self.border_size+self.inner_size, :] # just take the center for now
target[:,:] = pic.reshape((self.get_data_dims(), x.shape[1]))
else:
for c in xrange(x.shape[1]): # loop over cases
startY, startX = nr.randint(0,self.border_size*2 + 1), nr.randint(0,self.border_size*2 + 1)
endY, endX = startY + self.inner_size, startX + self.inner_size
pic = y[:,startY:endY,startX:endX, c]
if nr.randint(2) == 0: # also flip the image with 50% probability
pic = pic[:,:,::-1]
target[:,c] = pic.reshape((self.get_data_dims(),))
class DummyConvNetLogRegDataProvider(LabeledDummyDataProvider):
def __init__(self, data_dim):
LabeledDummyDataProvider.__init__(self, data_dim)
self.img_size = int(sqrt(data_dim/3))
def get_next_batch(self):
epoch, batchnum, dic = LabeledDummyDataProvider.get_next_batch(self)
dic = {'data': dic[0], 'labels': dic[1]}
print dic['data'].shape, dic['labels'].shape
return epoch, batchnum, [dic['data'], dic['labels']]
# Returns the dimensionality of the two data matrices returned by get_next_batch
def get_data_dims(self, idx=0):
return self.batch_meta['num_vis'] if idx == 0 else 1
| apache-2.0 |
reiinakano/scikit-plot | scikitplot/cluster.py | 1 | 4865 | """
The :mod:`scikitplot.cluster` module includes plots built specifically for
scikit-learn clusterer instances e.g. KMeans. You can use your own clusterers,
but these plots assume specific properties shared by scikit-learn estimators.
The specific requirements are documented per function.
"""
from __future__ import absolute_import, division, print_function, \
unicode_literals
import time
import matplotlib.pyplot as plt
import numpy as np
from sklearn.base import clone
from joblib import Parallel, delayed
def plot_elbow_curve(clf, X, title='Elbow Plot', cluster_ranges=None, n_jobs=1,
show_cluster_time=True, ax=None, figsize=None,
title_fontsize="large", text_fontsize="medium"):
"""Plots elbow curve of different values of K for KMeans clustering.
Args:
clf: Clusterer instance that implements ``fit``,``fit_predict``, and
``score`` methods, and an ``n_clusters`` hyperparameter.
e.g. :class:`sklearn.cluster.KMeans` instance
X (array-like, shape (n_samples, n_features)):
Data to cluster, where n_samples is the number of samples and
n_features is the number of features.
title (string, optional): Title of the generated plot. Defaults to
"Elbow Plot"
cluster_ranges (None or :obj:`list` of int, optional): List of
n_clusters for which to plot the explained variances. Defaults to
``range(1, 12, 2)``.
n_jobs (int, optional): Number of jobs to run in parallel. Defaults to
1.
show_cluster_time (bool, optional): Include plot of time it took to
cluster for a particular K.
ax (:class:`matplotlib.axes.Axes`, optional): The axes upon which to
plot the curve. If None, the plot is drawn on a new set of axes.
figsize (2-tuple, optional): Tuple denoting figure size of the plot
e.g. (6, 6). Defaults to ``None``.
title_fontsize (string or int, optional): Matplotlib-style fontsizes.
Use e.g. "small", "medium", "large" or integer-values. Defaults to
"large".
text_fontsize (string or int, optional): Matplotlib-style fontsizes.
Use e.g. "small", "medium", "large" or integer-values. Defaults to
"medium".
Returns:
ax (:class:`matplotlib.axes.Axes`): The axes on which the plot was
drawn.
Example:
>>> import scikitplot as skplt
>>> kmeans = KMeans(random_state=1)
>>> skplt.cluster.plot_elbow_curve(kmeans, cluster_ranges=range(1, 30))
<matplotlib.axes._subplots.AxesSubplot object at 0x7fe967d64490>
>>> plt.show()
.. image:: _static/examples/plot_elbow_curve.png
:align: center
:alt: Elbow Curve
"""
if cluster_ranges is None:
cluster_ranges = range(1, 12, 2)
else:
cluster_ranges = sorted(cluster_ranges)
if not hasattr(clf, 'n_clusters'):
raise TypeError('"n_clusters" attribute not in classifier. '
'Cannot plot elbow method.')
tuples = Parallel(n_jobs=n_jobs)(delayed(_clone_and_score_clusterer)
(clf, X, i) for i in cluster_ranges)
clfs, times = zip(*tuples)
if ax is None:
fig, ax = plt.subplots(1, 1, figsize=figsize)
ax.set_title(title, fontsize=title_fontsize)
ax.plot(cluster_ranges, np.absolute(clfs), 'b*-')
ax.grid(True)
ax.set_xlabel('Number of clusters', fontsize=text_fontsize)
ax.set_ylabel('Sum of Squared Errors', fontsize=text_fontsize)
ax.tick_params(labelsize=text_fontsize)
if show_cluster_time:
ax2_color = 'green'
ax2 = ax.twinx()
ax2.plot(cluster_ranges, times, ':', alpha=0.75, color=ax2_color)
ax2.set_ylabel('Clustering duration (seconds)',
color=ax2_color, alpha=0.75,
fontsize=text_fontsize)
ax2.tick_params(colors=ax2_color, labelsize=text_fontsize)
return ax
def _clone_and_score_clusterer(clf, X, n_clusters):
"""Clones and scores clusterer instance.
Args:
clf: Clusterer instance that implements ``fit``,``fit_predict``, and
``score`` methods, and an ``n_clusters`` hyperparameter.
e.g. :class:`sklearn.cluster.KMeans` instance
X (array-like, shape (n_samples, n_features)):
Data to cluster, where n_samples is the number of samples and
n_features is the number of features.
n_clusters (int): Number of clusters
Returns:
score: Score of clusters
time: Number of seconds it took to fit cluster
"""
start = time.time()
clf = clone(clf)
setattr(clf, 'n_clusters', n_clusters)
return clf.fit(X).score(X), time.time() - start
| mit |
fabioticconi/scikit-learn | examples/classification/plot_classification_probability.py | 138 | 2871 | """
===============================
Plot classification probability
===============================
Plot the classification probability for different classifiers. We use a 3
class dataset, and we classify it with a Support Vector classifier, L1
and L2 penalized logistic regression with either a One-Vs-Rest or multinomial
setting, and Gaussian process classification.
The logistic regression is not a multiclass classifier out of the box. As
a result it can identify only the first class.
"""
print(__doc__)
# Author: Alexandre Gramfort <[email protected]>
# License: BSD 3 clause
import matplotlib.pyplot as plt
import numpy as np
from sklearn.linear_model import LogisticRegression
from sklearn.svm import SVC
from sklearn.gaussian_process import GaussianProcessClassifier
from sklearn.gaussian_process.kernels import RBF
from sklearn import datasets
iris = datasets.load_iris()
X = iris.data[:, 0:2] # we only take the first two features for visualization
y = iris.target
n_features = X.shape[1]
C = 1.0
kernel = 1.0 * RBF([1.0, 1.0]) # for GPC
# Create different classifiers. The logistic regression cannot do
# multiclass out of the box.
classifiers = {'L1 logistic': LogisticRegression(C=C, penalty='l1'),
'L2 logistic (OvR)': LogisticRegression(C=C, penalty='l2'),
'Linear SVC': SVC(kernel='linear', C=C, probability=True,
random_state=0),
'L2 logistic (Multinomial)': LogisticRegression(
C=C, solver='lbfgs', multi_class='multinomial'),
'GPC': GaussianProcessClassifier(kernel)
}
n_classifiers = len(classifiers)
plt.figure(figsize=(3 * 2, n_classifiers * 2))
plt.subplots_adjust(bottom=.2, top=.95)
xx = np.linspace(3, 9, 100)
yy = np.linspace(1, 5, 100).T
xx, yy = np.meshgrid(xx, yy)
Xfull = np.c_[xx.ravel(), yy.ravel()]
for index, (name, classifier) in enumerate(classifiers.items()):
classifier.fit(X, y)
y_pred = classifier.predict(X)
classif_rate = np.mean(y_pred.ravel() == y.ravel()) * 100
print("classif_rate for %s : %f " % (name, classif_rate))
# View probabilities=
probas = classifier.predict_proba(Xfull)
n_classes = np.unique(y_pred).size
for k in range(n_classes):
plt.subplot(n_classifiers, n_classes, index * n_classes + k + 1)
plt.title("Class %d" % k)
if k == 0:
plt.ylabel(name)
imshow_handle = plt.imshow(probas[:, k].reshape((100, 100)),
extent=(3, 9, 1, 5), origin='lower')
plt.xticks(())
plt.yticks(())
idx = (y_pred == k)
if idx.any():
plt.scatter(X[idx, 0], X[idx, 1], marker='o', c='k')
ax = plt.axes([0.15, 0.04, 0.7, 0.05])
plt.title("Probability")
plt.colorbar(imshow_handle, cax=ax, orientation='horizontal')
plt.show()
| bsd-3-clause |
akrherz/idep | scripts/tillage_timing/dump_mar10_tab4.py | 2 | 1963 | """Dump for tab2 of requested 10 march 2020 spreadsheet.
"""
import glob
from pyiem.dep import read_wb, read_env
from pyiem.util import logger
import pandas as pd
LOG = logger()
def do_scenario(scenario, hucs):
"""Process this scenario."""
smdfs = []
for huc12 in hucs:
for fn in glob.glob(
"/i/%s/wb/%s/%s/*" % (scenario, huc12[:8], huc12[8:])
):
smdfs.append(read_wb(fn))
smdf = pd.concat(smdfs).reset_index()
smdf = smdf[smdf["ofe"] == 1]
smdf = smdf[
(smdf["date"] >= pd.to_datetime("2018/1/1"))
& (smdf["date"] < pd.to_datetime("2019/1/1"))
]
smdf["jday"] = pd.to_numeric(
smdf["date"].dt.strftime("%j"), downcast="integer"
)
smres = smdf.groupby("jday").mean()
envdfs = []
for huc12 in hucs:
for fn in glob.glob(
"/i/%s/env/%s/%s/*" % (scenario, huc12[:8], huc12[8:])
):
df = read_env(fn)
if df.empty:
continue
envdfs.append(df)
envdf = pd.concat(envdfs).reset_index()
envdf = envdf[
(envdf["date"] >= pd.to_datetime("2018/1/1"))
& (envdf["date"] < pd.to_datetime("2019/1/1"))
]
envdf["jday"] = pd.to_numeric(
envdf["date"].dt.strftime("%j"), downcast="integer"
)
# Manually compute to account for zeros
res = (envdf.groupby("jday").sum() / float(len(envdfs))).copy()
smres["precip"] = res["precip"]
smres["av_det"] = res["av_det"]
smres = smres.fillna(0)
cols = ["precip", "av_det", "sw1"] if scenario == 81 else ["av_det", "sw1"]
LOG.info("dumping scenario: %s", scenario)
smres[cols].to_csv(
"tab4_%s.csv" % (scenario,), float_format="%.4f", index=False
)
def main():
"""Go Main Go."""
hucs = [x.strip() for x in open("myhucs.txt").readlines()]
for scenario in range(81, 91):
do_scenario(scenario, hucs)
if __name__ == "__main__":
main()
| mit |
befelix/GPy | GPy/plotting/__init__.py | 4 | 6717 | # Copyright (c) 2014, GPy authors (see AUTHORS.txt).
# Licensed under the BSD 3-clause license (see LICENSE.txt)
current_lib = [None]
supported_libraries = ['matplotlib', 'plotly', 'plotly_online', 'plotly_offline', 'none']
error_suggestion = "Please make sure you specify your plotting library in your configuration file (<User>/.config/GPy/user.cfg).\n\n[plotting]\nlibrary = <library>\n\nCurrently supported libraries: {}".format(", ".join(supported_libraries))
def change_plotting_library(lib, **kwargs):
try:
#===========================================================================
# Load in your plotting library here and
# save it under the name plotting_library!
# This is hooking the library in
# for the usage in GPy:
if lib not in supported_libraries:
raise ValueError("Warning: Plotting library {} not recognized, currently supported libraries are: \n {}".format(lib, ", ".join(supported_libraries)))
if lib == 'matplotlib':
import matplotlib
from .matplot_dep.plot_definitions import MatplotlibPlots
from .matplot_dep import visualize, mapping_plots, priors_plots, ssgplvm, svig_plots, variational_plots, img_plots
current_lib[0] = MatplotlibPlots()
if lib in ['plotly', 'plotly_online']:
import plotly
from .plotly_dep.plot_definitions import PlotlyPlotsOnline
current_lib[0] = PlotlyPlotsOnline(**kwargs)
if lib == 'plotly_offline':
import plotly
from .plotly_dep.plot_definitions import PlotlyPlotsOffline
current_lib[0] = PlotlyPlotsOffline(**kwargs)
if lib == 'none':
current_lib[0] = None
inject_plotting()
#===========================================================================
except (ImportError, NameError):
config.set('plotting', 'library', 'none')
raise
import warnings
warnings.warn(ImportWarning("You spevified {} in your configuration, but is not available. Install newest version of {} for plotting".format(lib, lib)))
def inject_plotting():
if current_lib[0] is not None:
# Inject the plots into classes here:
# Already converted to new style:
from . import gpy_plot
from ..core import GP
GP.plot_data = gpy_plot.data_plots.plot_data
GP.plot_data_error = gpy_plot.data_plots.plot_data_error
GP.plot_errorbars_trainset = gpy_plot.data_plots.plot_errorbars_trainset
GP.plot_mean = gpy_plot.gp_plots.plot_mean
GP.plot_confidence = gpy_plot.gp_plots.plot_confidence
GP.plot_density = gpy_plot.gp_plots.plot_density
GP.plot_samples = gpy_plot.gp_plots.plot_samples
GP.plot = gpy_plot.gp_plots.plot
GP.plot_f = gpy_plot.gp_plots.plot_f
GP.plot_latent = gpy_plot.gp_plots.plot_f
GP.plot_noiseless = gpy_plot.gp_plots.plot_f
GP.plot_magnification = gpy_plot.latent_plots.plot_magnification
from ..models import StateSpace
StateSpace.plot_data = gpy_plot.data_plots.plot_data
StateSpace.plot_data_error = gpy_plot.data_plots.plot_data_error
StateSpace.plot_errorbars_trainset = gpy_plot.data_plots.plot_errorbars_trainset
StateSpace.plot_mean = gpy_plot.gp_plots.plot_mean
StateSpace.plot_confidence = gpy_plot.gp_plots.plot_confidence
StateSpace.plot_density = gpy_plot.gp_plots.plot_density
StateSpace.plot_samples = gpy_plot.gp_plots.plot_samples
StateSpace.plot = gpy_plot.gp_plots.plot
StateSpace.plot_f = gpy_plot.gp_plots.plot_f
StateSpace.plot_latent = gpy_plot.gp_plots.plot_f
StateSpace.plot_noiseless = gpy_plot.gp_plots.plot_f
from ..core import SparseGP
SparseGP.plot_inducing = gpy_plot.data_plots.plot_inducing
from ..models import GPLVM, BayesianGPLVM, bayesian_gplvm_minibatch, SSGPLVM, SSMRD
GPLVM.plot_latent = gpy_plot.latent_plots.plot_latent
GPLVM.plot_scatter = gpy_plot.latent_plots.plot_latent_scatter
GPLVM.plot_inducing = gpy_plot.latent_plots.plot_latent_inducing
GPLVM.plot_steepest_gradient_map = gpy_plot.latent_plots.plot_steepest_gradient_map
BayesianGPLVM.plot_latent = gpy_plot.latent_plots.plot_latent
BayesianGPLVM.plot_scatter = gpy_plot.latent_plots.plot_latent_scatter
BayesianGPLVM.plot_inducing = gpy_plot.latent_plots.plot_latent_inducing
BayesianGPLVM.plot_steepest_gradient_map = gpy_plot.latent_plots.plot_steepest_gradient_map
bayesian_gplvm_minibatch.BayesianGPLVMMiniBatch.plot_latent = gpy_plot.latent_plots.plot_latent
bayesian_gplvm_minibatch.BayesianGPLVMMiniBatch.plot_scatter = gpy_plot.latent_plots.plot_latent_scatter
bayesian_gplvm_minibatch.BayesianGPLVMMiniBatch.plot_inducing = gpy_plot.latent_plots.plot_latent_inducing
bayesian_gplvm_minibatch.BayesianGPLVMMiniBatch.plot_steepest_gradient_map = gpy_plot.latent_plots.plot_steepest_gradient_map
SSGPLVM.plot_latent = gpy_plot.latent_plots.plot_latent
SSGPLVM.plot_scatter = gpy_plot.latent_plots.plot_latent_scatter
SSGPLVM.plot_inducing = gpy_plot.latent_plots.plot_latent_inducing
SSGPLVM.plot_steepest_gradient_map = gpy_plot.latent_plots.plot_steepest_gradient_map
from ..kern import Kern
Kern.plot_covariance = gpy_plot.kernel_plots.plot_covariance
def deprecate_plot(self, *args, **kwargs):
import warnings
warnings.warn(DeprecationWarning('Kern.plot is being deprecated and will not be available in the 1.0 release. Use Kern.plot_covariance instead'))
return self.plot_covariance(*args, **kwargs)
Kern.plot = deprecate_plot
Kern.plot_ARD = gpy_plot.kernel_plots.plot_ARD
from ..inference.optimization import Optimizer
Optimizer.plot = gpy_plot.inference_plots.plot_optimizer
# Variational plot!
def plotting_library():
if current_lib[0] is None:
raise RuntimeError("No plotting library was loaded. \n{}".format(error_suggestion))
return current_lib[0]
def show(figure, **kwargs):
"""
Show the specific plotting library figure, returned by
add_to_canvas().
kwargs are the plotting library specific options
for showing/drawing a figure.
"""
return plotting_library().show_canvas(figure, **kwargs)
from ..util.config import config, NoOptionError
try:
lib = config.get('plotting', 'library')
change_plotting_library(lib)
except NoOptionError:
print("No plotting library was specified in config file. \n{}".format(error_suggestion))
| bsd-3-clause |
nelson-liu/scikit-learn | sklearn/utils/tests/test_sparsefuncs.py | 78 | 17611 | import numpy as np
import scipy.sparse as sp
from scipy import linalg
from numpy.testing import (assert_array_almost_equal,
assert_array_equal,
assert_equal)
from numpy.random import RandomState
from sklearn.datasets import make_classification
from sklearn.utils.sparsefuncs import (mean_variance_axis,
incr_mean_variance_axis,
inplace_column_scale,
inplace_row_scale,
inplace_swap_row, inplace_swap_column,
min_max_axis,
count_nonzero, csc_median_axis_0)
from sklearn.utils.sparsefuncs_fast import (assign_rows_csr,
inplace_csr_row_normalize_l1,
inplace_csr_row_normalize_l2)
from sklearn.utils.testing import assert_raises
def test_mean_variance_axis0():
X, _ = make_classification(5, 4, random_state=0)
# Sparsify the array a little bit
X[0, 0] = 0
X[2, 1] = 0
X[4, 3] = 0
X_lil = sp.lil_matrix(X)
X_lil[1, 0] = 0
X[1, 0] = 0
assert_raises(TypeError, mean_variance_axis, X_lil, axis=0)
X_csr = sp.csr_matrix(X_lil)
X_csc = sp.csc_matrix(X_lil)
expected_dtypes = [(np.float32, np.float32),
(np.float64, np.float64),
(np.int32, np.float64),
(np.int64, np.float64)]
for input_dtype, output_dtype in expected_dtypes:
X_test = X.astype(input_dtype)
for X_sparse in (X_csr, X_csc):
X_sparse = X_sparse.astype(input_dtype)
X_means, X_vars = mean_variance_axis(X_sparse, axis=0)
assert_equal(X_means.dtype, output_dtype)
assert_equal(X_vars.dtype, output_dtype)
assert_array_almost_equal(X_means, np.mean(X_test, axis=0))
assert_array_almost_equal(X_vars, np.var(X_test, axis=0))
def test_mean_variance_axis1():
X, _ = make_classification(5, 4, random_state=0)
# Sparsify the array a little bit
X[0, 0] = 0
X[2, 1] = 0
X[4, 3] = 0
X_lil = sp.lil_matrix(X)
X_lil[1, 0] = 0
X[1, 0] = 0
assert_raises(TypeError, mean_variance_axis, X_lil, axis=1)
X_csr = sp.csr_matrix(X_lil)
X_csc = sp.csc_matrix(X_lil)
expected_dtypes = [(np.float32, np.float32),
(np.float64, np.float64),
(np.int32, np.float64),
(np.int64, np.float64)]
for input_dtype, output_dtype in expected_dtypes:
X_test = X.astype(input_dtype)
for X_sparse in (X_csr, X_csc):
X_sparse = X_sparse.astype(input_dtype)
X_means, X_vars = mean_variance_axis(X_sparse, axis=0)
assert_equal(X_means.dtype, output_dtype)
assert_equal(X_vars.dtype, output_dtype)
assert_array_almost_equal(X_means, np.mean(X_test, axis=0))
assert_array_almost_equal(X_vars, np.var(X_test, axis=0))
def test_incr_mean_variance_axis():
for axis in [0, 1]:
rng = np.random.RandomState(0)
n_features = 50
n_samples = 10
data_chunks = [rng.randint(0, 2, size=n_features)
for i in range(n_samples)]
# default params for incr_mean_variance
last_mean = np.zeros(n_features)
last_var = np.zeros_like(last_mean)
last_n = 0
# Test errors
X = np.array(data_chunks[0])
X = np.atleast_2d(X)
X_lil = sp.lil_matrix(X)
X_csr = sp.csr_matrix(X_lil)
assert_raises(TypeError, incr_mean_variance_axis, axis,
last_mean, last_var, last_n)
assert_raises(TypeError, incr_mean_variance_axis, axis,
last_mean, last_var, last_n)
assert_raises(TypeError, incr_mean_variance_axis, X_lil, axis,
last_mean, last_var, last_n)
# Test _incr_mean_and_var with a 1 row input
X_means, X_vars = mean_variance_axis(X_csr, axis)
X_means_incr, X_vars_incr, n_incr = \
incr_mean_variance_axis(X_csr, axis, last_mean, last_var, last_n)
assert_array_almost_equal(X_means, X_means_incr)
assert_array_almost_equal(X_vars, X_vars_incr)
assert_equal(X.shape[axis], n_incr) # X.shape[axis] picks # samples
X_csc = sp.csc_matrix(X_lil)
X_means, X_vars = mean_variance_axis(X_csc, axis)
assert_array_almost_equal(X_means, X_means_incr)
assert_array_almost_equal(X_vars, X_vars_incr)
assert_equal(X.shape[axis], n_incr)
# Test _incremental_mean_and_var with whole data
X = np.vstack(data_chunks)
X_lil = sp.lil_matrix(X)
X_csr = sp.csr_matrix(X_lil)
X_csc = sp.csc_matrix(X_lil)
expected_dtypes = [(np.float32, np.float32),
(np.float64, np.float64),
(np.int32, np.float64),
(np.int64, np.float64)]
for input_dtype, output_dtype in expected_dtypes:
for X_sparse in (X_csr, X_csc):
X_sparse = X_sparse.astype(input_dtype)
X_means, X_vars = mean_variance_axis(X_sparse, axis)
X_means_incr, X_vars_incr, n_incr = \
incr_mean_variance_axis(X_sparse, axis, last_mean,
last_var, last_n)
assert_equal(X_means_incr.dtype, output_dtype)
assert_equal(X_vars_incr.dtype, output_dtype)
assert_array_almost_equal(X_means, X_means_incr)
assert_array_almost_equal(X_vars, X_vars_incr)
assert_equal(X.shape[axis], n_incr)
def test_mean_variance_illegal_axis():
X, _ = make_classification(5, 4, random_state=0)
# Sparsify the array a little bit
X[0, 0] = 0
X[2, 1] = 0
X[4, 3] = 0
X_csr = sp.csr_matrix(X)
assert_raises(ValueError, mean_variance_axis, X_csr, axis=-3)
assert_raises(ValueError, mean_variance_axis, X_csr, axis=2)
assert_raises(ValueError, mean_variance_axis, X_csr, axis=-1)
assert_raises(ValueError, incr_mean_variance_axis, X_csr, axis=-3,
last_mean=None, last_var=None, last_n=None)
assert_raises(ValueError, incr_mean_variance_axis, X_csr, axis=2,
last_mean=None, last_var=None, last_n=None)
assert_raises(ValueError, incr_mean_variance_axis, X_csr, axis=-1,
last_mean=None, last_var=None, last_n=None)
def test_densify_rows():
for dtype in (np.float32, np.float64):
X = sp.csr_matrix([[0, 3, 0],
[2, 4, 0],
[0, 0, 0],
[9, 8, 7],
[4, 0, 5]], dtype=dtype)
X_rows = np.array([0, 2, 3], dtype=np.intp)
out = np.ones((6, X.shape[1]), dtype=dtype)
out_rows = np.array([1, 3, 4], dtype=np.intp)
expect = np.ones_like(out)
expect[out_rows] = X[X_rows, :].toarray()
assign_rows_csr(X, X_rows, out_rows, out)
assert_array_equal(out, expect)
def test_inplace_column_scale():
rng = np.random.RandomState(0)
X = sp.rand(100, 200, 0.05)
Xr = X.tocsr()
Xc = X.tocsc()
XA = X.toarray()
scale = rng.rand(200)
XA *= scale
inplace_column_scale(Xc, scale)
inplace_column_scale(Xr, scale)
assert_array_almost_equal(Xr.toarray(), Xc.toarray())
assert_array_almost_equal(XA, Xc.toarray())
assert_array_almost_equal(XA, Xr.toarray())
assert_raises(TypeError, inplace_column_scale, X.tolil(), scale)
X = X.astype(np.float32)
scale = scale.astype(np.float32)
Xr = X.tocsr()
Xc = X.tocsc()
XA = X.toarray()
XA *= scale
inplace_column_scale(Xc, scale)
inplace_column_scale(Xr, scale)
assert_array_almost_equal(Xr.toarray(), Xc.toarray())
assert_array_almost_equal(XA, Xc.toarray())
assert_array_almost_equal(XA, Xr.toarray())
assert_raises(TypeError, inplace_column_scale, X.tolil(), scale)
def test_inplace_row_scale():
rng = np.random.RandomState(0)
X = sp.rand(100, 200, 0.05)
Xr = X.tocsr()
Xc = X.tocsc()
XA = X.toarray()
scale = rng.rand(100)
XA *= scale.reshape(-1, 1)
inplace_row_scale(Xc, scale)
inplace_row_scale(Xr, scale)
assert_array_almost_equal(Xr.toarray(), Xc.toarray())
assert_array_almost_equal(XA, Xc.toarray())
assert_array_almost_equal(XA, Xr.toarray())
assert_raises(TypeError, inplace_column_scale, X.tolil(), scale)
X = X.astype(np.float32)
scale = scale.astype(np.float32)
Xr = X.tocsr()
Xc = X.tocsc()
XA = X.toarray()
XA *= scale.reshape(-1, 1)
inplace_row_scale(Xc, scale)
inplace_row_scale(Xr, scale)
assert_array_almost_equal(Xr.toarray(), Xc.toarray())
assert_array_almost_equal(XA, Xc.toarray())
assert_array_almost_equal(XA, Xr.toarray())
assert_raises(TypeError, inplace_column_scale, X.tolil(), scale)
def test_inplace_swap_row():
X = np.array([[0, 3, 0],
[2, 4, 0],
[0, 0, 0],
[9, 8, 7],
[4, 0, 5]], dtype=np.float64)
X_csr = sp.csr_matrix(X)
X_csc = sp.csc_matrix(X)
swap = linalg.get_blas_funcs(('swap',), (X,))
swap = swap[0]
X[0], X[-1] = swap(X[0], X[-1])
inplace_swap_row(X_csr, 0, -1)
inplace_swap_row(X_csc, 0, -1)
assert_array_equal(X_csr.toarray(), X_csc.toarray())
assert_array_equal(X, X_csc.toarray())
assert_array_equal(X, X_csr.toarray())
X[2], X[3] = swap(X[2], X[3])
inplace_swap_row(X_csr, 2, 3)
inplace_swap_row(X_csc, 2, 3)
assert_array_equal(X_csr.toarray(), X_csc.toarray())
assert_array_equal(X, X_csc.toarray())
assert_array_equal(X, X_csr.toarray())
assert_raises(TypeError, inplace_swap_row, X_csr.tolil())
X = np.array([[0, 3, 0],
[2, 4, 0],
[0, 0, 0],
[9, 8, 7],
[4, 0, 5]], dtype=np.float32)
X_csr = sp.csr_matrix(X)
X_csc = sp.csc_matrix(X)
swap = linalg.get_blas_funcs(('swap',), (X,))
swap = swap[0]
X[0], X[-1] = swap(X[0], X[-1])
inplace_swap_row(X_csr, 0, -1)
inplace_swap_row(X_csc, 0, -1)
assert_array_equal(X_csr.toarray(), X_csc.toarray())
assert_array_equal(X, X_csc.toarray())
assert_array_equal(X, X_csr.toarray())
X[2], X[3] = swap(X[2], X[3])
inplace_swap_row(X_csr, 2, 3)
inplace_swap_row(X_csc, 2, 3)
assert_array_equal(X_csr.toarray(), X_csc.toarray())
assert_array_equal(X, X_csc.toarray())
assert_array_equal(X, X_csr.toarray())
assert_raises(TypeError, inplace_swap_row, X_csr.tolil())
def test_inplace_swap_column():
X = np.array([[0, 3, 0],
[2, 4, 0],
[0, 0, 0],
[9, 8, 7],
[4, 0, 5]], dtype=np.float64)
X_csr = sp.csr_matrix(X)
X_csc = sp.csc_matrix(X)
swap = linalg.get_blas_funcs(('swap',), (X,))
swap = swap[0]
X[:, 0], X[:, -1] = swap(X[:, 0], X[:, -1])
inplace_swap_column(X_csr, 0, -1)
inplace_swap_column(X_csc, 0, -1)
assert_array_equal(X_csr.toarray(), X_csc.toarray())
assert_array_equal(X, X_csc.toarray())
assert_array_equal(X, X_csr.toarray())
X[:, 0], X[:, 1] = swap(X[:, 0], X[:, 1])
inplace_swap_column(X_csr, 0, 1)
inplace_swap_column(X_csc, 0, 1)
assert_array_equal(X_csr.toarray(), X_csc.toarray())
assert_array_equal(X, X_csc.toarray())
assert_array_equal(X, X_csr.toarray())
assert_raises(TypeError, inplace_swap_column, X_csr.tolil())
X = np.array([[0, 3, 0],
[2, 4, 0],
[0, 0, 0],
[9, 8, 7],
[4, 0, 5]], dtype=np.float32)
X_csr = sp.csr_matrix(X)
X_csc = sp.csc_matrix(X)
swap = linalg.get_blas_funcs(('swap',), (X,))
swap = swap[0]
X[:, 0], X[:, -1] = swap(X[:, 0], X[:, -1])
inplace_swap_column(X_csr, 0, -1)
inplace_swap_column(X_csc, 0, -1)
assert_array_equal(X_csr.toarray(), X_csc.toarray())
assert_array_equal(X, X_csc.toarray())
assert_array_equal(X, X_csr.toarray())
X[:, 0], X[:, 1] = swap(X[:, 0], X[:, 1])
inplace_swap_column(X_csr, 0, 1)
inplace_swap_column(X_csc, 0, 1)
assert_array_equal(X_csr.toarray(), X_csc.toarray())
assert_array_equal(X, X_csc.toarray())
assert_array_equal(X, X_csr.toarray())
assert_raises(TypeError, inplace_swap_column, X_csr.tolil())
def test_min_max_axis0():
X = np.array([[0, 3, 0],
[2, -1, 0],
[0, 0, 0],
[9, 8, 7],
[4, 0, 5]], dtype=np.float64)
X_csr = sp.csr_matrix(X)
X_csc = sp.csc_matrix(X)
mins_csr, maxs_csr = min_max_axis(X_csr, axis=0)
assert_array_equal(mins_csr, X.min(axis=0))
assert_array_equal(maxs_csr, X.max(axis=0))
mins_csc, maxs_csc = min_max_axis(X_csc, axis=0)
assert_array_equal(mins_csc, X.min(axis=0))
assert_array_equal(maxs_csc, X.max(axis=0))
X = X.astype(np.float32)
X_csr = sp.csr_matrix(X)
X_csc = sp.csc_matrix(X)
mins_csr, maxs_csr = min_max_axis(X_csr, axis=0)
assert_array_equal(mins_csr, X.min(axis=0))
assert_array_equal(maxs_csr, X.max(axis=0))
mins_csc, maxs_csc = min_max_axis(X_csc, axis=0)
assert_array_equal(mins_csc, X.min(axis=0))
assert_array_equal(maxs_csc, X.max(axis=0))
def test_min_max_axis1():
X = np.array([[0, 3, 0],
[2, -1, 0],
[0, 0, 0],
[9, 8, 7],
[4, 0, 5]], dtype=np.float64)
X_csr = sp.csr_matrix(X)
X_csc = sp.csc_matrix(X)
mins_csr, maxs_csr = min_max_axis(X_csr, axis=1)
assert_array_equal(mins_csr, X.min(axis=1))
assert_array_equal(maxs_csr, X.max(axis=1))
mins_csc, maxs_csc = min_max_axis(X_csc, axis=1)
assert_array_equal(mins_csc, X.min(axis=1))
assert_array_equal(maxs_csc, X.max(axis=1))
X = X.astype(np.float32)
X_csr = sp.csr_matrix(X)
X_csc = sp.csc_matrix(X)
mins_csr, maxs_csr = min_max_axis(X_csr, axis=1)
assert_array_equal(mins_csr, X.min(axis=1))
assert_array_equal(maxs_csr, X.max(axis=1))
mins_csc, maxs_csc = min_max_axis(X_csc, axis=1)
assert_array_equal(mins_csc, X.min(axis=1))
assert_array_equal(maxs_csc, X.max(axis=1))
def test_min_max_axis_errors():
X = np.array([[0, 3, 0],
[2, -1, 0],
[0, 0, 0],
[9, 8, 7],
[4, 0, 5]], dtype=np.float64)
X_csr = sp.csr_matrix(X)
X_csc = sp.csc_matrix(X)
assert_raises(TypeError, min_max_axis, X_csr.tolil(), axis=0)
assert_raises(ValueError, min_max_axis, X_csr, axis=2)
assert_raises(ValueError, min_max_axis, X_csc, axis=-3)
def test_count_nonzero():
X = np.array([[0, 3, 0],
[2, -1, 0],
[0, 0, 0],
[9, 8, 7],
[4, 0, 5]], dtype=np.float64)
X_csr = sp.csr_matrix(X)
X_csc = sp.csc_matrix(X)
X_nonzero = X != 0
sample_weight = [.5, .2, .3, .1, .1]
X_nonzero_weighted = X_nonzero * np.array(sample_weight)[:, None]
for axis in [0, 1, -1, -2, None]:
assert_array_almost_equal(count_nonzero(X_csr, axis=axis),
X_nonzero.sum(axis=axis))
assert_array_almost_equal(count_nonzero(X_csr, axis=axis,
sample_weight=sample_weight),
X_nonzero_weighted.sum(axis=axis))
assert_raises(TypeError, count_nonzero, X_csc)
assert_raises(ValueError, count_nonzero, X_csr, axis=2)
def test_csc_row_median():
# Test csc_row_median actually calculates the median.
# Test that it gives the same output when X is dense.
rng = np.random.RandomState(0)
X = rng.rand(100, 50)
dense_median = np.median(X, axis=0)
csc = sp.csc_matrix(X)
sparse_median = csc_median_axis_0(csc)
assert_array_equal(sparse_median, dense_median)
# Test that it gives the same output when X is sparse
X = rng.rand(51, 100)
X[X < 0.7] = 0.0
ind = rng.randint(0, 50, 10)
X[ind] = -X[ind]
csc = sp.csc_matrix(X)
dense_median = np.median(X, axis=0)
sparse_median = csc_median_axis_0(csc)
assert_array_equal(sparse_median, dense_median)
# Test for toy data.
X = [[0, -2], [-1, -1], [1, 0], [2, 1]]
csc = sp.csc_matrix(X)
assert_array_equal(csc_median_axis_0(csc), np.array([0.5, -0.5]))
X = [[0, -2], [-1, -5], [1, -3]]
csc = sp.csc_matrix(X)
assert_array_equal(csc_median_axis_0(csc), np.array([0., -3]))
# Test that it raises an Error for non-csc matrices.
assert_raises(TypeError, csc_median_axis_0, sp.csr_matrix(X))
def test_inplace_normalize():
ones = np.ones((10, 1))
rs = RandomState(10)
for inplace_csr_row_normalize in (inplace_csr_row_normalize_l1,
inplace_csr_row_normalize_l2):
for dtype in (np.float64, np.float32):
X = rs.randn(10, 5).astype(dtype)
X_csr = sp.csr_matrix(X)
inplace_csr_row_normalize(X_csr)
assert_equal(X_csr.dtype, dtype)
if inplace_csr_row_normalize is inplace_csr_row_normalize_l2:
X_csr.data **= 2
assert_array_almost_equal(np.abs(X_csr).sum(axis=1), ones)
| bsd-3-clause |
dwillmer/fastats | tests/scaling/test_scaling.py | 2 | 5406 |
import numpy as np
import pandas as pd
import sys
from numba import njit
from numpy.testing import assert_allclose
from pytest import mark, raises, approx
from scipy.stats import rankdata
from sklearn.preprocessing import StandardScaler, MinMaxScaler
from fastats.scaling.scaling import (standard, min_max, rank, scale, demean, standard_parallel, min_max_parallel,
demean_parallel, shrink_off_diagonals)
from tests.data.datasets import SKLearnDataSets
def test_scale_no_op():
data = np.arange(100, dtype=float)
data[13] = np.nan
output = scale(data)
assert np.allclose(data, output, equal_nan=True)
@mark.parametrize('A', SKLearnDataSets)
def test_standard_scale_versus_sklearn(A):
data = A.value
expected = StandardScaler().fit_transform(data)
output = standard(data)
assert np.allclose(expected, output)
@mark.parametrize('A', SKLearnDataSets)
def test_standard_scale_with_bessel_correction_versus_sklearn(A):
data = A.value
df = pd.DataFrame(data)
def zscore(data):
return (data - data.mean()) / data.std(ddof=1)
expected = df.apply(zscore).values
output = standard(data, ddof=1)
assert np.allclose(expected, output)
def test_standard_scale_raises_if_ddof_ne_0_or_1():
data = np.arange(20, dtype=float).reshape(2, 10)
for ddof in -1, 2:
with raises(ValueError):
_ = standard(data, ddof=ddof)
@mark.parametrize('A', SKLearnDataSets)
def test_min_max_scale_versus_sklearn(A):
data = A.value
expected = MinMaxScaler().fit_transform(data)
output = min_max(data)
assert np.allclose(expected, output)
@mark.parametrize('A', SKLearnDataSets)
def test_rank_scale_versus_scipy(A):
data = A.value
# rank the data all at once
output = rank(data)
# check each column versus scipy equivalent
for i in range(data.shape[1]):
feature = data[:, i]
expected = rankdata(feature)
assert np.allclose(expected, output[:, i])
@mark.parametrize('A', SKLearnDataSets)
def test_demean(A):
data = A.value
expected = data - data.mean(axis=0)
output = demean(data)
assert np.allclose(expected, output)
@mark.parametrize('factor', np.linspace(-1, 1, 9), ids='factor_{0:.2f}'.format)
def test_shrink_off_diagonals(factor):
A = np.empty(shape=(10, 10))
m, n = A.shape
for i in range(m):
for j in range(n):
A[i, j] = 1.0 - abs(i - j) / m
output = shrink_off_diagonals(A, factor)
# diagonals should be unaffected
assert_allclose(np.diag(output), np.diag(A))
# all other values should have been shrunk
for i in range(m):
for j in range(n):
if i != j:
assert output[i, j] == approx(A[i, j] * factor)
def test_shrink_off_diagonals_factor_zero():
A = np.arange(100, dtype=np.float64).reshape(10, 10)
# special case where factor is 0 - we expect an output
# where all off diagonal values are zeroed out
output = shrink_off_diagonals(A, 0)
assert_allclose(output, np.diag(np.diag(A)))
# ----------------------------------------------------------------
# explicitly parallel algorithm tests
#
# Note: parallel not supported on 32bit platforms
# ----------------------------------------------------------------
parallel = not (sys.platform == 'win32')
demean_parallel_jit = njit(demean_parallel, parallel=parallel)
min_max_parallel_jit = njit(min_max_parallel, parallel=parallel)
standard_parallel_jit = njit(standard_parallel, parallel=parallel)
@mark.parametrize('A', SKLearnDataSets)
def test_demean_parallel(A):
data = A.value
expected = data - data.mean(axis=0)
for fn in demean_parallel, demean_parallel_jit:
output = fn(data)
assert np.allclose(expected, output)
@mark.parametrize('A', SKLearnDataSets)
def test_min_max_scale_parallel_versus_sklearn(A):
data = A.value
expected = MinMaxScaler().fit_transform(data)
for fn in min_max_parallel, min_max_parallel_jit:
output = fn(data)
assert np.allclose(expected, output)
@mark.parametrize('A', SKLearnDataSets)
def test_standard_scale_parallel_versus_sklearn(A):
data = A.value
expected = StandardScaler().fit_transform(data)
for fn in standard_parallel, standard_parallel_jit:
output = fn(data)
assert np.allclose(expected, output)
@mark.parametrize('A', SKLearnDataSets)
def test_standard_scale_parallel_with_bessel_correction_versus_sklearn(A):
data = A.value
df = pd.DataFrame(data)
def zscore(data):
return (data - data.mean()) / data.std(ddof=1)
expected = df.apply(zscore).values
# Issues seen here running standard_parallel_jit using
# numba 0.35 on OS X.
# The standard parallel variant works fine, but the
# jit version is returning garbage float values for
# some (not all) data sets.
# Looks very much like a threading issue.
for fn in (standard_parallel, standard_parallel_jit):
output = fn(data, ddof=1)
assert np.allclose(expected, output)
def test_standard_scale_parallel_raises_if_ddof_ne_0_or_1():
data = np.arange(20, dtype=float).reshape(2, 10)
for fn in standard_parallel, standard_parallel_jit:
with raises(AssertionError):
_ = fn(data, ddof=-1)
if __name__ == '__main__':
import pytest
pytest.main([__file__])
| mit |
MetrodataTeam/incubator-airflow | airflow/hooks/base_hook.py | 23 | 2895 | # -*- coding: utf-8 -*-
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
from builtins import object
import logging
import os
import random
from airflow import settings
from airflow.models import Connection
from airflow.exceptions import AirflowException
CONN_ENV_PREFIX = 'AIRFLOW_CONN_'
class BaseHook(object):
"""
Abstract base class for hooks, hooks are meant as an interface to
interact with external systems. MySqlHook, HiveHook, PigHook return
object that can handle the connection and interaction to specific
instances of these systems, and expose consistent methods to interact
with them.
"""
def __init__(self, source):
pass
@classmethod
def _get_connections_from_db(cls, conn_id):
session = settings.Session()
db = (
session.query(Connection)
.filter(Connection.conn_id == conn_id)
.all()
)
session.expunge_all()
session.close()
if not db:
raise AirflowException(
"The conn_id `{0}` isn't defined".format(conn_id))
return db
@classmethod
def _get_connection_from_env(cls, conn_id):
environment_uri = os.environ.get(CONN_ENV_PREFIX + conn_id.upper())
conn = None
if environment_uri:
conn = Connection(conn_id=conn_id, uri=environment_uri)
return conn
@classmethod
def get_connections(cls, conn_id):
conn = cls._get_connection_from_env(conn_id)
if conn:
conns = [conn]
else:
conns = cls._get_connections_from_db(conn_id)
return conns
@classmethod
def get_connection(cls, conn_id):
conn = random.choice(cls.get_connections(conn_id))
if conn.host:
logging.info("Using connection to: " + conn.host)
return conn
@classmethod
def get_hook(cls, conn_id):
connection = cls.get_connection(conn_id)
return connection.get_hook()
def get_conn(self):
raise NotImplementedError()
def get_records(self, sql):
raise NotImplementedError()
def get_pandas_df(self, sql):
raise NotImplementedError()
def run(self, sql):
raise NotImplementedError()
| apache-2.0 |
pathfinder14/OpenSAPM | utils/bicompact_method/test_bcomp.py | 1 | 6707 | import matplotlib.pyplot as plt
import math as math
from bicompact import bicompact_method
import numpy as np
'''
Тест для функции решения уравнения переноса (TVD)
'''
aTestList = ['const','tear', 'hat']
uTestList = ['step', 'gauss', 'peak']
for aIndex in range(len(aTestList)):
for uIndex in range(len(uTestList)):
aTest = aTestList[aIndex] # параметр a
uTest = uTestList[uIndex] # начальное условие
a0 = -0.5
left = 1.0 # пределы по оси x
right = 5.0
dx = 0.01
cfl = 0.09
dt = cfl*dx/abs(a0)
t_end = 0.6 # Время (t_end / dt задает количество итераций)
n = int((right - left) / dx)
a = np.zeros(n)
# Задание параметра a
if aTest == 'const': # постоянный параметр
for i in range(int(n/2)):
a[i] = 0.5
for i in range(int(n/2), n):
a[i] = 0.5
elif aTest == 'tear': # разрыв
for i in range(int(n/2)):
a[i] = 0.5
for i in range(int(n/2), n):
a[i] = 1
elif aTest == 'hat': # "шляпка"
mu = (right + left) / 2.0
for i in range(n):
a[i] = 0.5 + 0.5/(1 + 100*(left + i*dx - mu)*(left + i*dx - mu))
# Задание тестовой функции
u_0 = np.zeros(n)
u0 = np.zeros(n)
if uTest == 'step': # ступенька
for i in range(int(n/50)):
u_0[i] = 1
for i in range(int(n/50), int(n/3)):
u_0[i] = 2
for i in range(int(n/3), n):
u_0[i] = 1
elif uTest == 'gauss': # функция Гаусса
sigma = 0.02
pi = 3.14
# mu = 1.5
mu = left + (right - left) / 2.5
for i in range(n):
u_0[i] = 1.0 + 0.05 * 1.0 / (sigma * math.sqrt(2*pi)) * math.exp(-((left + i*dx - mu)*(left + i*dx - mu)) / (2.0 * sigma * sigma))
elif uTest == 'peak': # пик
sigma = 0.1
pi = 3.14
# mu = 1.5
mu = left + (right - left) / 2.5
delta = (mu - left) / 2.0
length = right - left
for i in range(int(delta/length * n), int(3 * delta/length * n)): # пик на финитном носителе
if (left + i*dx - mu) == 0.0:
u_0[i] = 1.0
else:
u_0[i] = math.sin(100*(left + i*dx - mu)) / (100*(left + i*dx - mu))
for i in range(n):
u0[i] = u_0[i]
u = np.zeros(n, dtype=np.float)
x = np.zeros(n, dtype=np.float)
# Вычисление u
ut1 = np.zeros(n, dtype=np.float)
ut2 = np.zeros(n, dtype=np.float)
ut3 = np.zeros(n, dtype=np.float)
#отработка метода на векторе параметров
for k in range(int(t_end / dt)):
for j in range (u0.size-5):
tmpx1 = np.copy(u[j:j+3])
tmp = bicompact_method(a[j+1],dt,dx,u0[j:j+3],tmpx1)
u[j+1] = tmp[0]
for i in range(n-2):
u0[i+1] = u[i]
# Граничные условия
u0[0] = u0[1]
u0[n-1] = u0[n-2]
if k == int(t_end / dt * 0.25):
for i in range(n):
ut1[i] = u0[i]
elif k == int(t_end / dt * 0.5):
for i in range(n):
ut2[i] = u0[i]
elif k == int(t_end / dt * 0.75):
for i in range(n):
ut3[i] = u0[i]
# Инициализация вектора x для построения графика
j = 0
i= left
for j in range(n-2):
x[j] = i
j = j + 1
i = i + dx
x1 = np.zeros(n, dtype=np.float) # длины n для построения графиков входных данных
x2 = np.ones(n, dtype=np.float) # для построения аналитического решения
j = 0
i= left
for j in range(n):
x1[j] = i
j = j + 1
i = i + dx
j = 0
i = left + a[0] * t_end
while (j < n) and (i < right):
x2[j] = i
j = j + 1
i = i + dx
# Построение графика
plt.subplot(311)
plt.plot(x, u, label = 'x_1 (t = ' + str(t_end) + ')')
if (aTest == 'const'):
plt.plot(x2, u_0, label = 'x (t = ' + str(t_end) + ')')
plt.axis([1.0,5.4,1.0,2.2])
if uTest == 'peak':
plt.axis([1.0,5.4,-0.4,1.2])
else:
plt.plot(x1, u_0, label = 'x_0 (t = 0)')
if uTest != 'peak':
plt.axis([1.0,5.4,1,2.2])
plt.legend(bbox_to_anchor=(0.35, 1), loc=1, borderaxespad=0.25)
plt.ylabel('x')
plt.xlabel('h')
plt.subplot(312)
plt.plot(x1, u_0)
plt.plot(x1, ut1, label = 'x (t = ' + str(t_end * 0.25) + ')')
plt.plot(x1, ut2, label = 'x(t = ' + str(t_end * 0.5) + ')')
plt.plot(x1, ut3, label = 'x (t = ' + str(t_end * 0.75) + ')')
plt.plot(x, u,label = 'x (t = ' + str(t_end) + ')')
if uTest != 'peak':
plt.axis([1.0,5.4,1,2.2])
plt.legend(bbox_to_anchor=(0.33, 1), loc=1, borderaxespad=0.25)
plt.subplot(313)
plt.plot(x1, a, marker = '.', label = 'a')
plt.ylabel('a')
plt.xlabel('h')
plt.legend(bbox_to_anchor=(0.16, 1), loc=1, borderaxespad=0.25)
plt.savefig('plots/' + 'a_' + aTest + '_x0_' + uTest + '.png')
#plt.show()
plt.close()
| mit |
octopicorn/bcikit | Analysis/modules/SignalGenerator.py | 1 | 15488 | __author__ = 'odrulea'
from Analysis.modules.ModuleAbstract import ModuleAbstract
import time
import random
import numpy as np
from lib.constants import *
import pandas
"""
This module generates a signal and publishes to the message queue.
This can include the following types of data:
- random numbers (eeg)
- sine waves
- class labels
"""
class SignalGenerator(ModuleAbstract):
MODULE_NAME = "Signal Generator"
# __init__ is handled by parent ModuleAbstract
def setup(self):
ModuleAbstract.setup(self)
# time counter, counts number of ticks in current period
self.period_counter = 0;
self.lines_counter = 0;
self.total_counter = 0;
# params
# sampling_rate (Hz)
self.sampling_rate = float(self.module_settings["sampling_rate"]) if "sampling_rate" in self.module_settings else 100.
# frequency (Hz)
self.frequency = float(self.module_settings["sine_frequency"]) if "sine_frequency" in self.module_settings else 10.
# range
self.range = self.module_settings["range"] if "range" in self.module_settings else [0,1]
# separator
self.separator = self.module_settings["separator"] if "separator" in self.module_settings else '\t'
# skip_lines_prefix
self.skip_lines_prefix = self.module_settings["skip_lines_prefix"] if "skip_lines_prefix" in self.module_settings else None
# skip_lines_num
self.skip_lines = self.module_settings["skip_lines"] if "skip_lines" in self.module_settings else None
# skip_columns
self.skip_columns = self.module_settings["skip_columns"] if "skip_columns" in self.module_settings else None
# include_columns
self.include_columns = self.module_settings["include_columns"] if "include_columns" in self.module_settings else None
# data contains timestamp flag
self.data_already_contains_timestamp = self.module_settings["data_already_contains_timestamp"] if "data_already_contains_timestamp" in self.module_settings else False
# timestamp_column
self.timestamp_column = self.module_settings["timestamp_column"] if "timestamp_column" in self.module_settings else 0
# class_label_column
self.class_label_column = self.module_settings["class_label_column"] if "class_label_column" in self.module_settings else None
# flag: whether to playback at the samplig_rate. if false, generate as fast as possible
self.generate_at_sampling_rate = self.module_settings["generate_at_sampling_rate"] if "generate_at_sampling_rate" in self.module_settings else True
# pattern
self.pattern = "rand"
if self.module_settings["pattern"]:
self.pattern = self.module_settings["pattern"]
# what pattern to generate
if self.pattern == "sine":
# SINE WAVE PATTERN
# formula for sine wave is:
# sine_wave = amp.*sin(2*pi*freq.*time);
# amplitude = half the distance between the min and max
amp = abs(float(self.range[1]) - float(self.range[0])) / 2
# zero will be halfway between the min and max
offset = float(self.range[1]) - amp
# np.linspace(-np.pi, np.pi, sampling_rate) --> make a range of x values, as many as sampling rate
# this is equivalent to 2*pi*time
sine_x = np.linspace(-np.pi, np.pi, self.sampling_rate)
self.sine_waves = []
# sine wave 1
sine1 = [(amp * np.sin(t * self.sampling_rate/self.frequency)) + offset for t in sine_x]
self.sine_waves.append(np.tile(sine1,self.frequency))
# sine wave 2 (double amp, triple freq)
sine2 = [((2*amp) * np.sin(3 * t * self.sampling_rate/self.frequency)) + offset for t in sine_x]
self.sine_waves.append(np.tile(sine2,self.frequency))
# default to the first sine wave (only used if sine)
self.sine_wave_to_use = 0
self.generate_pattern_func = "generateSine"
elif self.pattern == "files":
# get file list from settings
self.file_list = self.module_settings["files"]
# force file list to be list if not already
if type(self.file_list) != list:
self.file_list = [self.file_list]
self.current_file_index = -1
self.current_file = None
self.generate_pattern_func = "generateFromFiles"
elif self.pattern == "class_labels":
self.first_class_sent = False
self.inactive_class = 0
self.class_length_sec = self.module_settings["class_sec"] if "class_sec" in self.module_settings else 3
self.inactive_length_sec = self.module_settings["between_class_sec"] if "between_class_sec" in self.module_settings else 6
self.class_samples_per_trial = self.class_length_sec * self.sampling_rate
self.inactive_samples_per_trial = self.inactive_length_sec * self.sampling_rate
self.current_class = self.inactive_class
self.initial_countdown = self.module_settings["start_delay_sec"] if "start_delay_sec" in self.module_settings else 10
self.countdown = (self.initial_countdown+1) * self.sampling_rate
self.generate_pattern_func = "generateClassLabel"
else:
# RANDOM PATTERN
self.generate_pattern_func = "generateRandom"
if self.class_label_column:
self.lastClassLabel = None
#if self.debug:
# print "SAMPLING_RATE: " + str(self.sampling_rate) + " Hz"
# print "RANGE: " + str(self.range)
def getNextFile(self):
if self.debug:
print "************* GET NEXT FILE *******************"
# open the next available file
self.current_file_index += 1
# if we have advanced to the next index, and it's bigger than len of file array
if self.current_file_index >= len(self.file_list):
# start with the first file again (infinite loop)
self.current_file_index = 0
# open the current file
# print "opening file " + str(self.current_file_index)
fname = self.file_list[self.current_file_index]
self.current_file = open(fname)
def generateSine(self,x):
message = {"channel_%s" % i: round(self.sine_waves[self.sine_wave_to_use][x],3) for i in xrange(self.num_channels)}
return message
def generateFromFiles(self, x):
message = None
timestamp = None
classLabel = None
# if no file open, open the next one
if self.current_file is None:
self.getNextFile()
# get the next line in file
nextline = self.current_file.readline()
if len(nextline) == 0:
print "------------------------------------------------------------------------------------------------------"
print "------------------------------------------------------------------------------------------------------"
print "------------------------------------------------------------------------------------------------------"
print "------------------------------------------------------------------------------------------------------"
print "------------------------------------------------------------------------------------------------------"
print "REACHED END OF FILE"
self.getNextFile()
nextline = self.current_file.readline()
self.lines_counter = 0
# increment line number
self.lines_counter += 1
# Skip line conditions: skip by line number
# if we are skipping current, just return none
if self.skip_lines and self.lines_counter <= self.skip_lines:
return [message, classLabel, timestamp]
# Skip line conditions: skip by line prefix
if self.skip_lines_prefix and nextline.startswith(self.skip_lines_prefix):
print "prefix"
return [message, classLabel, timestamp]
# split new line into data by separator
nextline = np.array(nextline.strip().split(self.separator), dtype=float)
# TODO deal with edge case of timestamp column coming after class column, may have to use some other strategy besides pop()
# for now, it's ok to just pop the classLabel first, then timestamp
# pop the class label off the line if it's present
if self.class_label_column is not None:
classLabel = nextline[self.class_label_column]
nextline = np.delete(nextline, self.class_label_column)
# add the timestamp to data if it's not already present
if self.data_already_contains_timestamp is True:
# in this case we already have the timestamp, in the 0th position
timestamp = nextline[self.timestamp_column]
nextline = np.delete(nextline, self.timestamp_column)
message = {"channel_%s" % i: float(nextline[i]) for i in xrange(len(nextline))}
message['timestamp'] = timestamp
return [message, classLabel, timestamp]
# column filters
if self.skip_columns and self.skip_columns:
# filter out specific columns
nextline = np.delete(nextline, self.skip_columns)
elif self.include_columns:
# only allow certain columns
nextline = nextline[self.include_columns]
# just loop through all the elements in the line, assuming each element = 1 channel sample
message = {"channel_%s" % i: int(float(nextline[i])) for i in xrange(len(nextline))}
return [message, classLabel, timestamp]
def generateRandom(self,x):
if self.outputs['data']['data_type'] == DATA_TYPE_RAW_DATA:
message = {"channel_%s" % i: random.randint(self.range[0],self.range[1]) * random.random() for i in xrange(self.num_channels)}
elif self.outputs['data']['data_type'] == DATA_TYPE_CLASS_LABELS:
message = {"class":random.choice(self.range)}
return message
def generateClassLabel(self,x):
if self.outputs['data']['data_type'] == DATA_TYPE_CLASS_LABELS:
#we're counting down in the inactive class
if self.countdown > 1:
self.countdown = self.countdown-1
#if not self.first_class_sent:
message = {"class":self.current_class}
#self.first_class_sent = True
#print message
return message
else:
# we are done with the current countdown
if self.current_class == self.inactive_class:
# switch from inactive to pick one class
self.current_class = random.choice(self.range)
self.countdown = self.class_samples_per_trial
if not self.first_class_sent:
self.first_class_sent =True
else:
# switch from class to inactive state
self.current_class = self.inactive_class
self.countdown = self.inactive_samples_per_trial
message = {"class":self.current_class}
#print message
return message
def generate(self):
# set calibration to True only when you are trying to calibrate the best possible sampling rate
# accuracy for your system (see comments below)
calibration = False
sleep_padding = 1.
# how many fractions of 1 whole second? that is how long we will sleep
max_time_per_loop = (1. / float(self.sampling_rate))
"""
sleep time needs to be reduced to account for processing time
the factor by which you multiply the sleep time is called "sleep_padding"
for example:
a sleep_padding of .61 means that if your max_time_per_loop = 0.002, you
will sleep for 60% of that time, or 0.00126
the remainder of the time, the "fine tuned" wait time, will be spent busy-waiting, since that
is more accurate than sleeping
we have to account for this sleep_padding factor because we have some processing to do in each loop
and this takes time, so if we sleep for a full duration of max_time_per_loop, there's no time
left for the processing
this is a fine calibration that needs to be run on each individual computer
with debug set to true, run only this module by itself
ideally, with the best sleep_padding, you should see as close to 1 sec per sampling_rate # of samples
like this:
1.00076007843 sec to get 500.0 samples (1173 busy wait ticks)
1.00126099586 sec to get 500.0 samples (770 busy wait ticks)
1.00085878372 sec to get 500.0 samples (1769 busy wait ticks)
this is a fairly good accuracy, although for lower Hz it can be even more accurate to within 1.000
"""
# different padding works better for different Hz
if self.sampling_rate >= 500.:
sleep_padding = .07
elif self.sampling_rate >= 375:
sleep_padding = .30
elif self.sampling_rate >= 250:
sleep_padding = .30
elif self.sampling_rate >= 100:
sleep_padding = .5
elif self.sampling_rate >= 50:
sleep_padding = .79
if calibration:
print "********* sampling rate: " + str(self.sampling_rate)
# sleep will take most but not all of the time per loop
sleep_length = float(max_time_per_loop * sleep_padding)
if calibration:
print "********* max time per loop: " + str(max_time_per_loop)
print "********* sleep length: " + str(sleep_length)
# start timer
self.period_counter = 0
busy_wait_ticks = 0
time_start_period = time_start_loop = time.time()
while(True):
if self.generate_at_sampling_rate:
# sleep first
time.sleep(sleep_length)
# this gets us most of the way there
#
#
#
# now do the processing work
# generate message by whatever pattern has been specified
#classLabel = None
classLabel = None;
if self.pattern == "files":
message,classLabel,timestamp = getattr(self,self.generate_pattern_func)(self.period_counter)
else:
message = getattr(self,self.generate_pattern_func)(self.period_counter)
# increment period counter
self.period_counter += 1
self.total_counter += 1
# deliver 'data' output
if message:
# generate a timestamp if not already present
if self.data_already_contains_timestamp is False:
timestamp = int(time.time() * 1000000)
message['timestamp'] = timestamp
# PUBLISH message
self.write('data', message)
if self.debug:
print message
# deliver 'labels' output
if classLabel is not None:
# if class label has changed, generate a new class label message
# and then update lastClassLabel
if classLabel != self.lastClassLabel:
class_label_message = {"timestamp":timestamp,"class":classLabel}
self.write('labels', class_label_message)
self.lastClassLabel = classLabel
if self.debug:
print "******************************************************************************"
print "******************************************************************************"
print "******************************************************************************"
print "******************************************************************************"
print "******************************************************************************"
print "******************************************************************************"
print "******************************************************************************"
print class_label_message
if self.debug:
print "CLASS: " + classLabel + " TIME: " + timestamp
if self.generate_at_sampling_rate:
# now busy wait until we have reached the end of the wait period
time_elapsed_loop = time.time() - time_start_loop
while time_elapsed_loop < max_time_per_loop :
# busy wait
# print time_elapsed_loop
time_elapsed_loop = time.time() - time_start_loop
busy_wait_ticks = busy_wait_ticks + 1
# when busy-wait while loop is done, we've reached the end of one loop
# see how long it took to get our samples per second
if(self.period_counter == self.sampling_rate):
time_elapsed_total = time.time() - time_start_period
# debug message
if calibration:
print str(time_elapsed_total) + " sec to get " + str(self.sampling_rate) + " samples (" + str(busy_wait_ticks) + " busy wait ticks)"
# reset period counter at end of each period
self.period_counter = 0
# alternate sine wave pattern if sine
if self.pattern == "sine":
self.sine_wave_to_use = 1 if self.sine_wave_to_use == 0 else 0
# reset period timer
time_start_period = time.time()
# at end of every loop, reset per-loop timer and ticks counter
time_start_loop = time.time()
busy_wait_ticks = 0
| agpl-3.0 |
Ized06/GID_Internal | client/examples/ipynb_util.py | 3 | 1742 | import os, sys, time, re
import numpy as np
import matplotlib.pyplot as plt
class Timer(object):
def __init__(self, name=None):
self.name = name
def __enter__(self):
self.tstart = time.time()
def __exit__(self, type, value, traceback):
if self.name:
print '[%s]' % self.name,
print 'Elapsed: %s' % (time.time() - self.tstart)
def plot_object_color(object_list, color_mapping):
N = len(object_list)
object_id = 1
for object_name in object_list:
color = color_mapping[object_name]
plt.subplot(1,N,object_id)
plot_color(color, object_name)
object_id += 1
def generate_objectcatetory_json(scene_objects):
# Use http://www.jsoneditoronline.org/ to clean the json
# http://jsonformat.com/#jsondataurllabel
''' Get object category from object name, with some manual editing '''
print '{'
for obj in scene_objects:
objtype = obj.replace('SM_', '').split('_')[0].replace('BookLP', 'Book').replace('Wire1', 'Wire')
print ' ', repr(obj), ':', repr(objtype), ','
print '}'
def check_coverage(dic_instance_mask):
''' Check the portion of labeled image '''
marked_region = None
for object_name in dic_instance_mask.keys():
instance_mask = dic_instance_mask[object_name]
if marked_region is None:
marked_region = np.zeros(instance_mask.shape[0:2])
marked_region += instance_mask
assert(marked_region.max() == 1)
if marked_region.max() > 1:
print 'There are invalid regions in the labeling'
coverage = float(marked_region.sum()) / (marked_region.shape[0] * marked_region.shape[1])
print 'Coverage %.2f' % coverage
return marked_region
| mit |
hrjn/scikit-learn | sklearn/cluster/bicluster.py | 26 | 19870 | """Spectral biclustering algorithms.
Authors : Kemal Eren
License: BSD 3 clause
"""
from abc import ABCMeta, abstractmethod
import numpy as np
from scipy.sparse import dia_matrix
from scipy.sparse import issparse
from . import KMeans, MiniBatchKMeans
from ..base import BaseEstimator, BiclusterMixin
from ..externals import six
from ..utils import check_random_state
from ..utils.arpack import eigsh, svds
from ..utils.extmath import (make_nonnegative, norm, randomized_svd,
safe_sparse_dot)
from ..utils.validation import assert_all_finite, check_array
__all__ = ['SpectralCoclustering',
'SpectralBiclustering']
def _scale_normalize(X):
"""Normalize ``X`` by scaling rows and columns independently.
Returns the normalized matrix and the row and column scaling
factors.
"""
X = make_nonnegative(X)
row_diag = np.asarray(1.0 / np.sqrt(X.sum(axis=1))).squeeze()
col_diag = np.asarray(1.0 / np.sqrt(X.sum(axis=0))).squeeze()
row_diag = np.where(np.isnan(row_diag), 0, row_diag)
col_diag = np.where(np.isnan(col_diag), 0, col_diag)
if issparse(X):
n_rows, n_cols = X.shape
r = dia_matrix((row_diag, [0]), shape=(n_rows, n_rows))
c = dia_matrix((col_diag, [0]), shape=(n_cols, n_cols))
an = r * X * c
else:
an = row_diag[:, np.newaxis] * X * col_diag
return an, row_diag, col_diag
def _bistochastic_normalize(X, max_iter=1000, tol=1e-5):
"""Normalize rows and columns of ``X`` simultaneously so that all
rows sum to one constant and all columns sum to a different
constant.
"""
# According to paper, this can also be done more efficiently with
# deviation reduction and balancing algorithms.
X = make_nonnegative(X)
X_scaled = X
dist = None
for _ in range(max_iter):
X_new, _, _ = _scale_normalize(X_scaled)
if issparse(X):
dist = norm(X_scaled.data - X.data)
else:
dist = norm(X_scaled - X_new)
X_scaled = X_new
if dist is not None and dist < tol:
break
return X_scaled
def _log_normalize(X):
"""Normalize ``X`` according to Kluger's log-interactions scheme."""
X = make_nonnegative(X, min_value=1)
if issparse(X):
raise ValueError("Cannot compute log of a sparse matrix,"
" because log(x) diverges to -infinity as x"
" goes to 0.")
L = np.log(X)
row_avg = L.mean(axis=1)[:, np.newaxis]
col_avg = L.mean(axis=0)
avg = L.mean()
return L - row_avg - col_avg + avg
class BaseSpectral(six.with_metaclass(ABCMeta, BaseEstimator,
BiclusterMixin)):
"""Base class for spectral biclustering."""
@abstractmethod
def __init__(self, n_clusters=3, svd_method="randomized",
n_svd_vecs=None, mini_batch=False, init="k-means++",
n_init=10, n_jobs=1, random_state=None):
self.n_clusters = n_clusters
self.svd_method = svd_method
self.n_svd_vecs = n_svd_vecs
self.mini_batch = mini_batch
self.init = init
self.n_init = n_init
self.n_jobs = n_jobs
self.random_state = random_state
def _check_parameters(self):
legal_svd_methods = ('randomized', 'arpack')
if self.svd_method not in legal_svd_methods:
raise ValueError("Unknown SVD method: '{0}'. svd_method must be"
" one of {1}.".format(self.svd_method,
legal_svd_methods))
def fit(self, X):
"""Creates a biclustering for X.
Parameters
----------
X : array-like, shape (n_samples, n_features)
"""
X = check_array(X, accept_sparse='csr', dtype=np.float64)
self._check_parameters()
self._fit(X)
return self
def _svd(self, array, n_components, n_discard):
"""Returns first `n_components` left and right singular
vectors u and v, discarding the first `n_discard`.
"""
if self.svd_method == 'randomized':
kwargs = {}
if self.n_svd_vecs is not None:
kwargs['n_oversamples'] = self.n_svd_vecs
u, _, vt = randomized_svd(array, n_components,
random_state=self.random_state,
**kwargs)
elif self.svd_method == 'arpack':
u, _, vt = svds(array, k=n_components, ncv=self.n_svd_vecs)
if np.any(np.isnan(vt)):
# some eigenvalues of A * A.T are negative, causing
# sqrt() to be np.nan. This causes some vectors in vt
# to be np.nan.
A = safe_sparse_dot(array.T, array)
random_state = check_random_state(self.random_state)
# initialize with [-1,1] as in ARPACK
v0 = random_state.uniform(-1, 1, A.shape[0])
_, v = eigsh(A, ncv=self.n_svd_vecs, v0=v0)
vt = v.T
if np.any(np.isnan(u)):
A = safe_sparse_dot(array, array.T)
random_state = check_random_state(self.random_state)
# initialize with [-1,1] as in ARPACK
v0 = random_state.uniform(-1, 1, A.shape[0])
_, u = eigsh(A, ncv=self.n_svd_vecs, v0=v0)
assert_all_finite(u)
assert_all_finite(vt)
u = u[:, n_discard:]
vt = vt[n_discard:]
return u, vt.T
def _k_means(self, data, n_clusters):
if self.mini_batch:
model = MiniBatchKMeans(n_clusters,
init=self.init,
n_init=self.n_init,
random_state=self.random_state)
else:
model = KMeans(n_clusters, init=self.init,
n_init=self.n_init, n_jobs=self.n_jobs,
random_state=self.random_state)
model.fit(data)
centroid = model.cluster_centers_
labels = model.labels_
return centroid, labels
class SpectralCoclustering(BaseSpectral):
"""Spectral Co-Clustering algorithm (Dhillon, 2001).
Clusters rows and columns of an array `X` to solve the relaxed
normalized cut of the bipartite graph created from `X` as follows:
the edge between row vertex `i` and column vertex `j` has weight
`X[i, j]`.
The resulting bicluster structure is block-diagonal, since each
row and each column belongs to exactly one bicluster.
Supports sparse matrices, as long as they are nonnegative.
Read more in the :ref:`User Guide <spectral_coclustering>`.
Parameters
----------
n_clusters : integer, optional, default: 3
The number of biclusters to find.
svd_method : string, optional, default: 'randomized'
Selects the algorithm for finding singular vectors. May be
'randomized' or 'arpack'. If 'randomized', use
:func:`sklearn.utils.extmath.randomized_svd`, which may be faster
for large matrices. If 'arpack', use
:func:`sklearn.utils.arpack.svds`, which is more accurate, but
possibly slower in some cases.
n_svd_vecs : int, optional, default: None
Number of vectors to use in calculating the SVD. Corresponds
to `ncv` when `svd_method=arpack` and `n_oversamples` when
`svd_method` is 'randomized`.
mini_batch : bool, optional, default: False
Whether to use mini-batch k-means, which is faster but may get
different results.
init : {'k-means++', 'random' or an ndarray}
Method for initialization of k-means algorithm; defaults to
'k-means++'.
n_init : int, optional, default: 10
Number of random initializations that are tried with the
k-means algorithm.
If mini-batch k-means is used, the best initialization is
chosen and the algorithm runs once. Otherwise, the algorithm
is run for each initialization and the best solution chosen.
n_jobs : int, optional, default: 1
The number of jobs to use for the computation. This works by breaking
down the pairwise matrix into n_jobs even slices and computing them in
parallel.
If -1 all CPUs are used. If 1 is given, no parallel computing code is
used at all, which is useful for debugging. For n_jobs below -1,
(n_cpus + 1 + n_jobs) are used. Thus for n_jobs = -2, all CPUs but one
are used.
random_state : int seed, RandomState instance, or None (default)
A pseudo random number generator used by the K-Means
initialization.
Attributes
----------
rows_ : array-like, shape (n_row_clusters, n_rows)
Results of the clustering. `rows[i, r]` is True if
cluster `i` contains row `r`. Available only after calling ``fit``.
columns_ : array-like, shape (n_column_clusters, n_columns)
Results of the clustering, like `rows`.
row_labels_ : array-like, shape (n_rows,)
The bicluster label of each row.
column_labels_ : array-like, shape (n_cols,)
The bicluster label of each column.
References
----------
* Dhillon, Inderjit S, 2001. `Co-clustering documents and words using
bipartite spectral graph partitioning
<http://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.140.3011>`__.
"""
def __init__(self, n_clusters=3, svd_method='randomized',
n_svd_vecs=None, mini_batch=False, init='k-means++',
n_init=10, n_jobs=1, random_state=None):
super(SpectralCoclustering, self).__init__(n_clusters,
svd_method,
n_svd_vecs,
mini_batch,
init,
n_init,
n_jobs,
random_state)
def _fit(self, X):
normalized_data, row_diag, col_diag = _scale_normalize(X)
n_sv = 1 + int(np.ceil(np.log2(self.n_clusters)))
u, v = self._svd(normalized_data, n_sv, n_discard=1)
z = np.vstack((row_diag[:, np.newaxis] * u,
col_diag[:, np.newaxis] * v))
_, labels = self._k_means(z, self.n_clusters)
n_rows = X.shape[0]
self.row_labels_ = labels[:n_rows]
self.column_labels_ = labels[n_rows:]
self.rows_ = np.vstack(self.row_labels_ == c
for c in range(self.n_clusters))
self.columns_ = np.vstack(self.column_labels_ == c
for c in range(self.n_clusters))
class SpectralBiclustering(BaseSpectral):
"""Spectral biclustering (Kluger, 2003).
Partitions rows and columns under the assumption that the data has
an underlying checkerboard structure. For instance, if there are
two row partitions and three column partitions, each row will
belong to three biclusters, and each column will belong to two
biclusters. The outer product of the corresponding row and column
label vectors gives this checkerboard structure.
Read more in the :ref:`User Guide <spectral_biclustering>`.
Parameters
----------
n_clusters : integer or tuple (n_row_clusters, n_column_clusters)
The number of row and column clusters in the checkerboard
structure.
method : string, optional, default: 'bistochastic'
Method of normalizing and converting singular vectors into
biclusters. May be one of 'scale', 'bistochastic', or 'log'.
The authors recommend using 'log'. If the data is sparse,
however, log normalization will not work, which is why the
default is 'bistochastic'. CAUTION: if `method='log'`, the
data must not be sparse.
n_components : integer, optional, default: 6
Number of singular vectors to check.
n_best : integer, optional, default: 3
Number of best singular vectors to which to project the data
for clustering.
svd_method : string, optional, default: 'randomized'
Selects the algorithm for finding singular vectors. May be
'randomized' or 'arpack'. If 'randomized', uses
`sklearn.utils.extmath.randomized_svd`, which may be faster
for large matrices. If 'arpack', uses
`sklearn.utils.arpack.svds`, which is more accurate, but
possibly slower in some cases.
n_svd_vecs : int, optional, default: None
Number of vectors to use in calculating the SVD. Corresponds
to `ncv` when `svd_method=arpack` and `n_oversamples` when
`svd_method` is 'randomized`.
mini_batch : bool, optional, default: False
Whether to use mini-batch k-means, which is faster but may get
different results.
init : {'k-means++', 'random' or an ndarray}
Method for initialization of k-means algorithm; defaults to
'k-means++'.
n_init : int, optional, default: 10
Number of random initializations that are tried with the
k-means algorithm.
If mini-batch k-means is used, the best initialization is
chosen and the algorithm runs once. Otherwise, the algorithm
is run for each initialization and the best solution chosen.
n_jobs : int, optional, default: 1
The number of jobs to use for the computation. This works by breaking
down the pairwise matrix into n_jobs even slices and computing them in
parallel.
If -1 all CPUs are used. If 1 is given, no parallel computing code is
used at all, which is useful for debugging. For n_jobs below -1,
(n_cpus + 1 + n_jobs) are used. Thus for n_jobs = -2, all CPUs but one
are used.
random_state : int seed, RandomState instance, or None (default)
A pseudo random number generator used by the K-Means
initialization.
Attributes
----------
rows_ : array-like, shape (n_row_clusters, n_rows)
Results of the clustering. `rows[i, r]` is True if
cluster `i` contains row `r`. Available only after calling ``fit``.
columns_ : array-like, shape (n_column_clusters, n_columns)
Results of the clustering, like `rows`.
row_labels_ : array-like, shape (n_rows,)
Row partition labels.
column_labels_ : array-like, shape (n_cols,)
Column partition labels.
References
----------
* Kluger, Yuval, et. al., 2003. `Spectral biclustering of microarray
data: coclustering genes and conditions
<http://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.135.1608>`__.
"""
def __init__(self, n_clusters=3, method='bistochastic',
n_components=6, n_best=3, svd_method='randomized',
n_svd_vecs=None, mini_batch=False, init='k-means++',
n_init=10, n_jobs=1, random_state=None):
super(SpectralBiclustering, self).__init__(n_clusters,
svd_method,
n_svd_vecs,
mini_batch,
init,
n_init,
n_jobs,
random_state)
self.method = method
self.n_components = n_components
self.n_best = n_best
def _check_parameters(self):
super(SpectralBiclustering, self)._check_parameters()
legal_methods = ('bistochastic', 'scale', 'log')
if self.method not in legal_methods:
raise ValueError("Unknown method: '{0}'. method must be"
" one of {1}.".format(self.method, legal_methods))
try:
int(self.n_clusters)
except TypeError:
try:
r, c = self.n_clusters
int(r)
int(c)
except (ValueError, TypeError):
raise ValueError("Incorrect parameter n_clusters has value:"
" {}. It should either be a single integer"
" or an iterable with two integers:"
" (n_row_clusters, n_column_clusters)")
if self.n_components < 1:
raise ValueError("Parameter n_components must be greater than 0,"
" but its value is {}".format(self.n_components))
if self.n_best < 1:
raise ValueError("Parameter n_best must be greater than 0,"
" but its value is {}".format(self.n_best))
if self.n_best > self.n_components:
raise ValueError("n_best cannot be larger than"
" n_components, but {} > {}"
"".format(self.n_best, self.n_components))
def _fit(self, X):
n_sv = self.n_components
if self.method == 'bistochastic':
normalized_data = _bistochastic_normalize(X)
n_sv += 1
elif self.method == 'scale':
normalized_data, _, _ = _scale_normalize(X)
n_sv += 1
elif self.method == 'log':
normalized_data = _log_normalize(X)
n_discard = 0 if self.method == 'log' else 1
u, v = self._svd(normalized_data, n_sv, n_discard)
ut = u.T
vt = v.T
try:
n_row_clusters, n_col_clusters = self.n_clusters
except TypeError:
n_row_clusters = n_col_clusters = self.n_clusters
best_ut = self._fit_best_piecewise(ut, self.n_best,
n_row_clusters)
best_vt = self._fit_best_piecewise(vt, self.n_best,
n_col_clusters)
self.row_labels_ = self._project_and_cluster(X, best_vt.T,
n_row_clusters)
self.column_labels_ = self._project_and_cluster(X.T, best_ut.T,
n_col_clusters)
self.rows_ = np.vstack(self.row_labels_ == label
for label in range(n_row_clusters)
for _ in range(n_col_clusters))
self.columns_ = np.vstack(self.column_labels_ == label
for _ in range(n_row_clusters)
for label in range(n_col_clusters))
def _fit_best_piecewise(self, vectors, n_best, n_clusters):
"""Find the ``n_best`` vectors that are best approximated by piecewise
constant vectors.
The piecewise vectors are found by k-means; the best is chosen
according to Euclidean distance.
"""
def make_piecewise(v):
centroid, labels = self._k_means(v.reshape(-1, 1), n_clusters)
return centroid[labels].ravel()
piecewise_vectors = np.apply_along_axis(make_piecewise,
axis=1, arr=vectors)
dists = np.apply_along_axis(norm, axis=1,
arr=(vectors - piecewise_vectors))
result = vectors[np.argsort(dists)[:n_best]]
return result
def _project_and_cluster(self, data, vectors, n_clusters):
"""Project ``data`` to ``vectors`` and cluster the result."""
projected = safe_sparse_dot(data, vectors)
_, labels = self._k_means(projected, n_clusters)
return labels
| bsd-3-clause |
bvillasen/phyGPU | hydrodynamics/hydro_3D.py | 1 | 14983 | import sys, time, os
import numpy as np
import pycuda.driver as cuda
from pycuda.compiler import SourceModule
import pycuda.gpuarray as gpuarray
import pycuda.cumath as cumath
#import pycuda.curandom as curandom
from pycuda.elementwise import ElementwiseKernel
from pycuda.reduction import ReductionKernel
import h5py as h5
import matplotlib.pyplot as plt
#Add Modules from other directories
currentDirectory = os.getcwd()
parentDirectory = currentDirectory[:currentDirectory.rfind("/")]
toolsDirectory = parentDirectory + "/tools"
volumeRenderDirectory = parentDirectory + "/volumeRender"
dataDir = "/home/bruno/Desktop/data/qTurbulence/"
sys.path.extend( [toolsDirectory, volumeRenderDirectory] )
from cudaTools import setCudaDevice, getFreeMemory, gpuArray3DtocudaArray, kernelMemoryInfo
from tools import ensureDirectory, printProgressTime
cudaP = "double"
nPoints = 128
useDevice = None
usingAnimation = True
showKernelMemInfo = False
usingGravity = False
for option in sys.argv:
if option == "grav": usingGravity = True
if option == "float": cudaP = "float"
if option == "anim": usingAnimation = True
if option == "mem": showKernelMemInfo = True
if option == "128" or option == "256": nPoints = int(option)
if option.find("dev=") != -1: useDevice = int(option[-1])
precision = {"float":(np.float32, np.complex64), "double":(np.float64,np.complex128) }
cudaPre, cudaPreComplex = precision[cudaP]
#set simulation volume dimentions
nWidth = nPoints
nHeight = nPoints
nDepth = nPoints
nData = nWidth*nHeight*nDepth
Lx = 1.
Ly = 1.
Lz = 1.
xMax, xMin = Lx/2, -Lx/2
yMax, yMin = Ly/2, -Ly/2
zMax, zMin = Lz/2, -Lz/2
dx, dy, dz = Lx/(nWidth-1), Ly/(nHeight-1), Lz/(nDepth-1 )
Z, Y, X = np.mgrid[ zMin:zMax:nDepth*1j, yMin:yMax:nHeight*1j, xMin:xMax:nWidth*1j ]
xPoints = X[0,0,:]
yPoints = Y[0,:,0]
zPoints = Z[0,0,:]
R = np.sqrt( X*X + Y*Y + Z*Z )
sphereR = 0.25
sphereOffCenter = 0.05
sphere = np.sqrt( (X)*(X) + Y*Y + Z*Z ) < 0.2
sphere_left = ( np.sqrt( (X+sphereOffCenter)*(X+sphereOffCenter) + Y*Y + Z*Z ) < sphereR )
sphere_right = ( np.sqrt( (X-sphereOffCenter)*(X-sphereOffCenter) + Y*Y + Z*Z ) < sphereR )
spheres = sphere_right + sphere_left
gamma = 7./5.
c0 = 0.5
#Change precision of the parameters
dx, dy, dz = cudaPre(dx), cudaPre(dy), cudaPre(dz)
Lx, Ly, Lz = cudaPre(Lx), cudaPre(Ly), cudaPre(Lz)
xMin, yMin, zMin = cudaPre(xMin), cudaPre(yMin), cudaPre(zMin)
pi4 = cudaPre( 4*np.pi )
#Initialize openGL
if usingAnimation:
import volumeRender
volumeRender.nWidth = nWidth
volumeRender.nHeight = nHeight
volumeRender.nDepth = nDepth
volumeRender.windowTitle = "Hydro 3D nPoints={0}".format(nPoints)
volumeRender.initGL()
#initialize pyCUDA context
cudaDevice = setCudaDevice( devN=useDevice, usingAnimation=usingAnimation)
#set thread grid for CUDA kernels
block_size_x, block_size_y, block_size_z = 32,4,4 #hardcoded, tune to your needs
gridx = nWidth // block_size_x + 1 * ( nWidth % block_size_x != 0 )
gridy = nHeight // block_size_y + 1 * ( nHeight % block_size_y != 0 )
gridz = nDepth // block_size_z + 1 * ( nDepth % block_size_z != 0 )
block3D = (block_size_x, block_size_y, block_size_z)
grid3D = (gridx, gridy, gridz)
nBlocks3D = grid3D[0]*grid3D[1]*grid3D[2]
grid3D_poisson = (gridx//2, gridy, gridz)
nPointsBlock = block3D[0]*block3D[1]*block3D[2]
nBlocksGrid = gridx * gridy * gridz
block2D = ( 16, 16, 1 )
grid2D = ( nWidth/block2D[0], nHeight/block2D[1], 1 )
print "\nCompiling CUDA code"
cudaCodeFile = open("cuda_hydro_3D.cu","r")
cudaCodeString = cudaCodeFile.read().replace( "cudaP", cudaP )
cudaCodeString = cudaCodeString.replace( "THREADS_PER_BLOCK", str(nPointsBlock) )
#"B_WIDTH":block3D[0], "B_HEIGHT":block3D[1], "B_DEPTH":block3D[2],
#'blockDim.x': block3D[0], 'blockDim.y': block3D[1], 'blockDim.z': block3D[2],
#'gridDim.x': grid3D[0], 'gridDim.y': grid3D[1], 'gridDim.z': grid3D[2] }
cudaCode = SourceModule(cudaCodeString)
#setFlux_kernel = cudaCode.get_function('setFlux')
setInterFlux_hll_kernel = cudaCode.get_function('setInterFlux_hll')
getInterFlux_hll_kernel = cudaCode.get_function('getInterFlux_hll')
iterPoissonStep_kernel = cudaCode.get_function('iterPoissonStep')
getGravityForce_kernel = cudaCode.get_function('getGravityForce')
getBounderyPotential_kernel = cudaCode.get_function('getBounderyPotential')
reduceDensity_kernel = cudaCode.get_function('reduceDensity' )
tex_1 = cudaCode.get_texref("tex_1")
tex_2 = cudaCode.get_texref("tex_2")
tex_3 = cudaCode.get_texref("tex_3")
tex_4 = cudaCode.get_texref("tex_4")
tex_5 = cudaCode.get_texref("tex_5")
surf_1 = cudaCode.get_surfref("surf_1")
surf_2 = cudaCode.get_surfref("surf_2")
surf_3 = cudaCode.get_surfref("surf_3")
surf_4 = cudaCode.get_surfref("surf_4")
surf_5 = cudaCode.get_surfref("surf_5")
########################################################################
convertToUCHAR = ElementwiseKernel(arguments="cudaP normaliztion, cudaP *values, unsigned char *psiUCHAR".replace("cudaP", cudaP),
operation = "psiUCHAR[i] = (unsigned char) ( -255*( values[i]*normaliztion -1 ) );",
name = "sendModuloToUCHAR_kernel")
########################################################################
getTimeMin_kernel = ReductionKernel( np.dtype( cudaPre ),
neutral = "1e6",
arguments=" float delta, cudaP* cnsv_rho, cudaP* cnsv_vel, float* soundVel".replace("cudaP", cudaP),
map_expr = " delta / ( abs( cnsv_vel[i]/ cnsv_rho[i] ) + soundVel[i] ) ",
reduce_expr = "min(a,b)",
name = "getTimeMin_kernel")
###################################################
def timeStepHydro():
for coord in [ 1, 2, 3]:
#Bind textures to read conserved
tex_1.set_array( cnsv1_array )
tex_2.set_array( cnsv2_array )
tex_3.set_array( cnsv3_array )
tex_4.set_array( cnsv4_array )
tex_5.set_array( cnsv5_array )
#Bind surfaces to write inter-cell fluxes
surf_1.set_array( flx1_array )
surf_2.set_array( flx2_array )
surf_3.set_array( flx3_array )
surf_4.set_array( flx4_array )
surf_5.set_array( flx5_array )
setInterFlux_hll_kernel( np.int32( coord ), cudaPre( gamma ), cudaPre(dx), cudaPre(dy), cudaPre(dz), cnsv1_d, cnsv2_d, cnsv3_d, cnsv4_d, cnsv5_d, times_d, grid=grid3D, block=block3D )
if coord == 1:
dt = c0 * gpuarray.min( times_d ).get()
print dt
#Bind textures to read inter-cell fluxes
tex_1.set_array( flx1_array )
tex_2.set_array( flx2_array )
tex_3.set_array( flx3_array )
tex_4.set_array( flx4_array )
tex_5.set_array( flx5_array )
getInterFlux_hll_kernel( np.int32( coord ), cudaPre( dt ), cudaPre( gamma ), cudaPre(dx), cudaPre(dy), cudaPre(dz),
cnsv1_d, cnsv2_d, cnsv3_d, cnsv4_d, cnsv5_d,
gForceX_d, gForceY_d, gForceZ_d, gravWork_d, grid=grid3D, block=block3D )
copy3D_cnsv1()
copy3D_cnsv2()
copy3D_cnsv3()
copy3D_cnsv4()
copy3D_cnsv5()
########################################################################
def solvePoisson( show=False ):
maxIter = 1000
for n in range(maxIter):
converged.set( one_Array )
tex_1.set_array( phi_array_1 )
surf_1.set_array( phi_array_2 )
iterPoissonStep_kernel( converged, np.int32( 0 ), np.int32( nWidth ), cudaPre( omega ), pi4,
cudaPre( dx ), cudaPre(dy), cudaPre(dz),
cnsv1_d, phi_d, phiWall_l_d, grid=grid3D_poisson, block=block3D )
tex_1.set_array( phi_array_2 )
surf_1.set_array( phi_array_1 )
iterPoissonStep_kernel( converged, np.int32( 1 ), np.int32( nWidth ), cudaPre( omega ), pi4,
cudaPre( dx ), cudaPre(dy), cudaPre(dz),
cnsv1_d, phi_d, phiWall_l_d, grid=grid3D_poisson, block=block3D )
copy3D_phi_1()
copy3D_phi_2()
if converged.get()[0] == 1:
if show: print 'Poisson converged: ', n+1
return
if show: print 'Poisson converged: ', maxIter
########################################################################
def getGravForce( showConverIter=False):
solvePoisson( show=showConverIter )
tex_1.set_array( phi_array_1 )
getGravityForce_kernel( np.int32( nWidth ), np.int32( nHeight ), np.int32( nDepth ),
dx, dy, dz, gForceX_d, gForceY_d, gForceZ_d,
cnsv1_d, cnsv2_d, cnsv3_d, cnsv4_d, gravWork_d, phiWall_l_d, grid=grid3D, block=block3D )
########################################################################
def stepFuntion():
maxVal = ( gpuarray.max( cnsv1_d ) ).get()
convertToUCHAR( cudaPre( 0.95/maxVal ), cnsv1_d, plotData_d)
copyToScreenArray()
timeStepHydro()
if usingGravity: getGravForce()
########################################################################
if showKernelMemInfo:
#kernelMemoryInfo( setFlux_kernel, 'setFlux_kernel')
#print ""
kernelMemoryInfo( setInterFlux_hll_kernel, 'setInterFlux_hll_kernel')
print ""
kernelMemoryInfo( getInterFlux_hll_kernel, 'getInterFlux_hll_kernel')
print ""
kernelMemoryInfo( iterPoissonStep_kernel, 'iterPoissonStep_kernel')
print ""
kernelMemoryInfo( getBounderyPotential_kernel, 'getBounderyPotential_kernel')
print ""
kernelMemoryInfo( reduceDensity_kernel, 'reduceDensity_kernel')
print ""
########################################################################
########################################################################
print "\nInitializing Data"
initialMemory = getFreeMemory( show=True )
rho = np.zeros( X.shape, dtype=cudaPre ) #density
vx = np.zeros( X.shape, dtype=cudaPre )
vy = np.zeros( X.shape, dtype=cudaPre )
vz = np.zeros( X.shape, dtype=cudaPre )
p = np.zeros( X.shape, dtype=cudaPre ) #pressure
#####################################################
#Initialize a centerd sphere
overDensity = sphere
rho[ overDensity ] = 1.
rho[ np.negative(overDensity) ] = 0.6
overPresure = sphere
p[ overPresure ] = 10
p[ np.negative(overPresure) ] = 1
v2 = vx*vx + vy*vy + vz*vz
#####################################################
#Initialize conserved values
cnsv1_h = rho
cnsv2_h = rho * vx
cnsv3_h = rho * vy
cnsv4_h = rho * vz
cnsv5_h = rho*v2/2. + p/(gamma-1)
#phi_h = np.ones_like( rho )
#phi_h = np.random.rand( nWidth, nHeight, nDepth ).astype( cudaPre )
#phi_h = -1./R.astype( cudaPre )
#phi_h = np.zeros_like( rho )
phi_h = rho
gForce_h = np.zeros_like( rho )
#####################################################
#Initialize device global data
cnsv1_d = gpuarray.to_gpu( cnsv1_h )
cnsv2_d = gpuarray.to_gpu( cnsv2_h )
cnsv3_d = gpuarray.to_gpu( cnsv3_h )
cnsv4_d = gpuarray.to_gpu( cnsv4_h )
cnsv5_d = gpuarray.to_gpu( cnsv5_h )
times_d = gpuarray.to_gpu( np.zeros( X.shape, dtype=np.float32 ) )
#For Gravitational potential
phi_d = gpuarray.to_gpu( phi_h )
omega = 2. / ( 1 + np.pi / nWidth )
one_Array = np.array([ 1 ]).astype( np.int32 )
converged = gpuarray.to_gpu( one_Array )
gForceX_d = gpuarray.to_gpu( gForce_h )
gForceY_d = gpuarray.to_gpu( gForce_h )
gForceZ_d = gpuarray.to_gpu( gForce_h )
gravWork_d = gpuarray.to_gpu( gForce_h )
phiWall_l_d = gpuarray.to_gpu( np.zeros( (nHeight, nDepth), dtype=np.float32 ) )
phiWall_r_d = gpuarray.to_gpu( np.zeros( (nHeight, nDepth), dtype=np.float32 ) )
rhoReduced_d = gpuarray.to_gpu( np.zeros( nBlocksGrid, dtype=np.float32 ) )
blockX_d = gpuarray.to_gpu( np.zeros( nBlocksGrid, dtype=np.float32 ) )
blockY_d = gpuarray.to_gpu( np.zeros( nBlocksGrid, dtype=np.float32 ) )
blockZ_d = gpuarray.to_gpu( np.zeros( nBlocksGrid, dtype=np.float32 ) )
#Texture and surface arrays
cnsv1_array, copy3D_cnsv1 = gpuArray3DtocudaArray( cnsv1_d, allowSurfaceBind=True, precision=cudaP )
cnsv2_array, copy3D_cnsv2 = gpuArray3DtocudaArray( cnsv2_d, allowSurfaceBind=True, precision=cudaP )
cnsv3_array, copy3D_cnsv3 = gpuArray3DtocudaArray( cnsv3_d, allowSurfaceBind=True, precision=cudaP )
cnsv4_array, copy3D_cnsv4 = gpuArray3DtocudaArray( cnsv4_d, allowSurfaceBind=True, precision=cudaP )
cnsv5_array, copy3D_cnsv5 = gpuArray3DtocudaArray( cnsv5_d, allowSurfaceBind=True, precision=cudaP )
flx1_array, copy3D_flx1_1 = gpuArray3DtocudaArray( cnsv1_d, allowSurfaceBind=True, precision=cudaP )
flx2_array, copy3D_flx2_1 = gpuArray3DtocudaArray( cnsv2_d, allowSurfaceBind=True, precision=cudaP )
flx3_array, copy3D_flx3_1 = gpuArray3DtocudaArray( cnsv3_d, allowSurfaceBind=True, precision=cudaP )
flx4_array, copy3D_flx4_1 = gpuArray3DtocudaArray( cnsv4_d, allowSurfaceBind=True, precision=cudaP )
flx5_array, copy3D_flx5_1 = gpuArray3DtocudaArray( cnsv5_d, allowSurfaceBind=True, precision=cudaP )
#Arrays for gravitational potential; checkboard iteration 2 arrays
phi_array_1, copy3D_phi_1 = gpuArray3DtocudaArray( phi_d, allowSurfaceBind=True, precision=cudaP )
phi_array_2, copy3D_phi_2 = gpuArray3DtocudaArray( phi_d, allowSurfaceBind=True, precision=cudaP )
if usingAnimation:
plotData_d = gpuarray.to_gpu(np.zeros([nDepth, nHeight, nWidth], dtype = np.uint8))
volumeRender.plotData_dArray, copyToScreenArray = gpuArray3DtocudaArray( plotData_d )
print "Total Global Memory Used: {0:.2f} MB\n".format(float(initialMemory-getFreeMemory( show=False ))/1e6)
if usingGravity:
print 'Getting initial Gravity Force...'
start, end = cuda.Event(), cuda.Event()
start.record() # start timing
getGravForce( showConverIter=True )
end.record(), end.synchronize()
secs = start.time_till( end )*1e-3
print 'Time: {0:0.4f}\n'.format( secs )
#plt.figure( 1 )
#phi = phi_d.get()
#plt.imshow( phi[nDepth/2,:,:], extent=[xMin, xMax, yMin, yMax] )
#plt.colorbar()
##plt.show()
#plt.figure( 2 )
#forceX = gForceX_d.get()
#forceY = gForceY_d.get()
#forceZ = gForceZ_d.get()
#force = np.sqrt( forceX*forceX + forceY*forceY + forceZ*forceZ )
#plt.imshow( force[nDepth/2,:,:], extent=[xMin, xMax, yMin, yMax] )
#plt.colorbar()
#plt.figure( 3 )
#plt.plot( xPoints, phi[nDepth/2,nHeight/2, :] )
#plt.figure( 4 )
#plt.plot( xPoints, forceX[nDepth/2,nHeight/2, :] )
#for i in range(500):
#timeStepHydro()
#if usingGravity: getGravForce()
#getGravForce()
#plt.figure( 5 )
#phi = phi_d.get()
#plt.imshow( phi[nDepth/2,:,:], extent=[xMin, xMax, yMin, yMax] )
#plt.colorbar()
##plt.show()
#plt.figure( 6 )
#forceX = gForceX_d.get()
#forceY = gForceY_d.get()
#forceZ = gForceZ_d.get()
#force = np.sqrt( forceX*forceX + forceY*forceY + forceZ*forceZ )
#plt.imshow( force[nDepth/2,:,:], extent=[xMin, xMax, yMin, yMax] )
#plt.colorbar()
#plt.figure( 7 )
#plt.plot( xPoints, phi[nDepth/2,nHeight/2, :] )
#plt.figure( 8 )
#plt.plot( xPoints, forceX[nDepth/2,nHeight/2, :] )
#plt.show()
#from mpl_toolkits.mplot3d import Axes3D
#x = blockX_d.get()
#y = blockY_d.get()
#z = blockZ_d.get()
#fig = plt.figure()
#ax = fig.add_subplot(111, projection='3d')
#ax.scatter(x, y, z)
#plt.show()
#configure volumeRender functions
if usingAnimation:
#volumeRender.viewTranslation[2] = -2
volumeRender.transferScale = np.float32( 2.8 )
#volumeRender.keyboard = keyboard
#volumeRender.specialKeys = specialKeyboardFunc
volumeRender.stepFunc = stepFuntion
#run volumeRender animation
volumeRender.animate()
| gpl-3.0 |
henningjp/CoolProp | Web/scripts/fluid_properties.REFPROPcomparison.py | 2 | 3508 | from __future__ import print_function
import os.path
import CoolProp, CoolProp.CoolProp as CP
import subprocess
import sys
web_dir = os.path.abspath(os.path.join(os.path.dirname(__file__), '..'))
root_dir = os.path.abspath(os.path.join(web_dir, '..'))
fluids_path = os.path.join(web_dir, 'fluid_properties', 'fluids')
plots_path = os.path.join(web_dir, 'fluid_properties', 'fluids', 'REFPROPplots')
template = """
from __future__ import division, print_function
import matplotlib
matplotlib.use('Agg') # Use a non-GUI backend
import numpy as np, matplotlib.pyplot as plt
import CoolProp
CP = CoolProp.CoolProp
fluid = '{fluid:s}'
fig, ax = plt.subplots()
plt.ylim(10**-18, 10**2)
not_in_REFPROP = False
try:
if CP.get_fluid_param_string(fluid, "REFPROP_name") == 'N/A':
not_in_REFPROP = True
else:
RPfluid = 'REFPROP::' + CP.get_fluid_param_string(fluid, "REFPROP_name")
CAS = CP.get_fluid_param_string(RPfluid, "CAS")
except (RuntimeError,ValueError) as E:
not_in_REFPROP = True
if not_in_REFPROP:
ax.set_xlim(0,1)
xlims = ax.get_xlim()
ylims = ax.get_ylim()
ax.plot([xlims[0],xlims[1]],[ylims[0],ylims[1]],lw = 3,c = 'r')
ax.plot([xlims[0],xlims[1]],[ylims[1],ylims[0]],lw = 3,c = 'r')
x = 0.5
y = (ylims[0]*ylims[1])**0.5
ax.text(x,y,'Not\\nin\\nREFPROP',ha='center',va ='center',bbox = dict(fc = 'white'))
else:
RPfluid = 'REFPROP::' + CP.get_fluid_param_string(fluid, "REFPROP_name")
symbols = ["o", "v", "^", "<", ">","8", "s","p","*","h","H","+","x"]
T = np.min([1.01*CP.PropsSI(fluid, 'Tcrit'),CP.PropsSI(fluid, 'Tmax')])
rhoc = CP.PropsSI(fluid, 'rhomolar_critical')
# Normal properties
rho = np.linspace(1e-10, 2*rhoc)
normalkeys = ['P','V','L','Cpmolar','Cvmolar']
RPdata = CP.PropsSI(normalkeys, 'T', T, 'Dmolar', rho, RPfluid)
CPdata = CP.PropsSI(normalkeys, 'T', T, 'Dmolar', rho, fluid)
for i, key in enumerate(normalkeys):
plt.plot(rho/rhoc, np.abs(RPdata[:,i]/CPdata[:,i]-1)*100, lw = 0, label = key, marker = symbols[i%len(symbols)])
# Special properties
rho = np.linspace(1e-10, 2*rhoc)
keys = ['Hmolar','Smolar']
for i, key in enumerate(keys):
RPdata = CP.PropsSI(key, 'T', T, 'Dmolar', rho, RPfluid) - CP.PropsSI(key, 'T', T, 'Dmolar', 1, RPfluid)
CPdata = CP.PropsSI(key, 'T', T, 'Dmolar', rho, fluid) - CP.PropsSI(key, 'T', T, 'Dmolar', 1, fluid)
plt.plot(rho/rhoc, np.abs(RPdata/CPdata-1)*100, lw = 0, label = key, marker = symbols[(i+len(normalkeys))%len(symbols)])
ax.legend(loc='best', ncol = 2)
plt.xlabel(r'Reduced density [$\\rho/\\rho_c$]')
plt.ylabel(r'Relative deviation $(y_{{CP}}/y_{{RP}}-1)\\times 100$ [%]')
ax.set_yscale('log')
plt.title('Comparison between CoolProp and REFPROP({rpv:s}) along T = 1.01*Tc')
plt.savefig(fluid+'.png', dpi = 100)
plt.savefig(fluid+'.pdf')
plt.close('all')
"""
if not os.path.exists(plots_path):
os.makedirs(plots_path)
with open(os.path.join(plots_path, 'matplotlibrc'), 'w') as fp:
fp.write("backend : agg\n")
for fluid in CoolProp.__fluids__:
print('fluid:', fluid)
file_string = template.format(fluid=fluid, rpv=CP.get_global_param_string("REFPROP_version"))
file_path = os.path.join(plots_path, fluid + '.py')
print('Writing to', file_path)
with open(file_path, 'w') as fp:
fp.write(file_string)
subprocess.check_call('python "' + fluid + '.py"', cwd=plots_path, stdout=sys.stdout, stderr=sys.stderr, shell=True)
| mit |
Djabbz/scikit-learn | sklearn/utils/metaestimators.py | 283 | 2353 | """Utilities for meta-estimators"""
# Author: Joel Nothman
# Andreas Mueller
# Licence: BSD
from operator import attrgetter
from functools import update_wrapper
__all__ = ['if_delegate_has_method']
class _IffHasAttrDescriptor(object):
"""Implements a conditional property using the descriptor protocol.
Using this class to create a decorator will raise an ``AttributeError``
if the ``attribute_name`` is not present on the base object.
This allows ducktyping of the decorated method based on ``attribute_name``.
See https://docs.python.org/3/howto/descriptor.html for an explanation of
descriptors.
"""
def __init__(self, fn, attribute_name):
self.fn = fn
self.get_attribute = attrgetter(attribute_name)
# update the docstring of the descriptor
update_wrapper(self, fn)
def __get__(self, obj, type=None):
# raise an AttributeError if the attribute is not present on the object
if obj is not None:
# delegate only on instances, not the classes.
# this is to allow access to the docstrings.
self.get_attribute(obj)
# lambda, but not partial, allows help() to work with update_wrapper
out = lambda *args, **kwargs: self.fn(obj, *args, **kwargs)
# update the docstring of the returned function
update_wrapper(out, self.fn)
return out
def if_delegate_has_method(delegate):
"""Create a decorator for methods that are delegated to a sub-estimator
This enables ducktyping by hasattr returning True according to the
sub-estimator.
>>> from sklearn.utils.metaestimators import if_delegate_has_method
>>>
>>>
>>> class MetaEst(object):
... def __init__(self, sub_est):
... self.sub_est = sub_est
...
... @if_delegate_has_method(delegate='sub_est')
... def predict(self, X):
... return self.sub_est.predict(X)
...
>>> class HasPredict(object):
... def predict(self, X):
... return X.sum(axis=1)
...
>>> class HasNoPredict(object):
... pass
...
>>> hasattr(MetaEst(HasPredict()), 'predict')
True
>>> hasattr(MetaEst(HasNoPredict()), 'predict')
False
"""
return lambda fn: _IffHasAttrDescriptor(fn, '%s.%s' % (delegate, fn.__name__))
| bsd-3-clause |
roxana-lafuente/POCs | data_science/MachineLearning-AndrewNg-Coursera/ProgrammingAssignmentI-LinearRegression/sklearn_linear_regression.py | 1 | 1481 | # Author: Roxana Anabel Lafuente
# Following tutorial: https://github.com/jdwittenauer/ipython-notebooks/blob/master/notebooks/ml/ML-Exercise1.ipynb
# Started on: August 2016
import os
import numpy
import pandas
import matplotlib.pyplot
from sklearn import linear_model
# Path to the data set file.
data_path = '/data/ex1data1.txt'
path = os.getcwd() + data_path
if __name__ == "__main__":
# Get data from file.
data = pandas.read_csv(path,
header=None,
names=['Population', 'Profit'])
# Insert ones.
data.insert(0, 'Ones', 1)
# Set X (training data) - Ones and Population.
cols = data.shape[1]
X = data.iloc[:,0:cols-1]
X = numpy.matrix(X.values)
# Set y (target variable) - profit.
y = data.iloc[:,cols-1:cols]
y = numpy.matrix(y.values)
# Sklearn linear regression.
model = linear_model.LinearRegression()
model.fit(X, y)
# Print parameters.
print "Coefficients: ", model.coef_
# Prepare data for printing.
x = numpy.array(X[:, 1].A1)
f = model.predict(X).flatten()
# Plot.
fig, ax = matplotlib.pyplot.subplots(figsize=(12,8))
ax.plot(x, f, 'r', label='Prediction', color='Cyan')
ax.scatter(data.Population, data.Profit, label='Traning Data')
ax.legend(loc=2)
ax.set_xlabel('Population')
ax.set_ylabel('Profit')
ax.set_title('Predicted Profit vs. Population Size')
# Show graphics
matplotlib.pyplot.show() | gpl-3.0 |
aaichsmn/tacc_stats | tacc_stats/analysis/plot/metadatarate.py | 2 | 2047 | from plots import Plot
from matplotlib.figure import Figure
from tacc_stats.analysis.gen import tspl_utils
import numpy
class MetaDataRatePlot(Plot):
k1=['llite', 'llite', 'llite', 'llite', 'llite',
'llite', 'llite', 'llite', 'llite', 'llite',
'llite', 'llite', 'llite', 'llite', 'llite',
'llite', 'llite', 'llite', 'llite', 'llite',
'llite', 'llite', 'llite', ]
k2=['open','close','mmap','fsync','setattr',
'truncate','flock','getattr','statfs','alloc_inode',
'setxattr',' listxattr',
'removexattr', 'readdir',
'create','lookup','link','unlink','symlink','mkdir',
'rmdir','mknod','rename',]
def plot(self,jobid,job_data=None):
self.setup(jobid,job_data=job_data)
ts = self.ts
self.fig = Figure(figsize=(10,8),dpi=80)
self.ax=self.fig.add_subplot(1,1,1)
self.ax=[self.ax]
self.fig.subplots_adjust(hspace=0.35)
markers = ('o','x','+','^','s','8','p',
'h','*','D','<','>','v','d','.')
colors = ('b','g','r','c','m','k','y')
tmid=(self.ts.t[:-1]+self.ts.t[1:])/2.0
cnt=0
for v in ts.data:
for host in v:
for vals in v[host]:
rate=numpy.diff(vals)/numpy.diff(ts.t)
c=colors[cnt % len(colors)]
m=markers[cnt % len(markers)]
self.ax[0].plot(tmid/3600., rate, marker=m,
markeredgecolor=c, linestyle='-', color=c,
markerfacecolor='None', label=self.k2[cnt])
self.ax[0].hold=True
cnt=cnt+1
self.ax[0].set_ylabel('Meta Data Rate (op/s)')
tspl_utils.adjust_yaxis_range(self.ax[0],0.1)
handles,labels=self.ax[0].get_legend_handles_labels()
new_handles={}
for h,l in zip(handles,labels):
new_handles[l]=h
box = self.ax[0].get_position()
self.ax[0].set_position([box.x0, box.y0, box.width * 0.9, box.height])
self.ax[0].legend(new_handles.values(),new_handles.keys(),prop={'size':8},
bbox_to_anchor=(1.05,1), borderaxespad=0., loc=2)
self.output('metadata')
| lgpl-2.1 |
simon-pepin/scikit-learn | examples/tree/plot_iris.py | 271 | 2186 | """
================================================================
Plot the decision surface of a decision tree on the iris dataset
================================================================
Plot the decision surface of a decision tree trained on pairs
of features of the iris dataset.
See :ref:`decision tree <tree>` for more information on the estimator.
For each pair of iris features, the decision tree learns decision
boundaries made of combinations of simple thresholding rules inferred from
the training samples.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from sklearn.datasets import load_iris
from sklearn.tree import DecisionTreeClassifier
# Parameters
n_classes = 3
plot_colors = "bry"
plot_step = 0.02
# Load data
iris = load_iris()
for pairidx, pair in enumerate([[0, 1], [0, 2], [0, 3],
[1, 2], [1, 3], [2, 3]]):
# We only take the two corresponding features
X = iris.data[:, pair]
y = iris.target
# Shuffle
idx = np.arange(X.shape[0])
np.random.seed(13)
np.random.shuffle(idx)
X = X[idx]
y = y[idx]
# Standardize
mean = X.mean(axis=0)
std = X.std(axis=0)
X = (X - mean) / std
# Train
clf = DecisionTreeClassifier().fit(X, y)
# Plot the decision boundary
plt.subplot(2, 3, pairidx + 1)
x_min, x_max = X[:, 0].min() - 1, X[:, 0].max() + 1
y_min, y_max = X[:, 1].min() - 1, X[:, 1].max() + 1
xx, yy = np.meshgrid(np.arange(x_min, x_max, plot_step),
np.arange(y_min, y_max, plot_step))
Z = clf.predict(np.c_[xx.ravel(), yy.ravel()])
Z = Z.reshape(xx.shape)
cs = plt.contourf(xx, yy, Z, cmap=plt.cm.Paired)
plt.xlabel(iris.feature_names[pair[0]])
plt.ylabel(iris.feature_names[pair[1]])
plt.axis("tight")
# Plot the training points
for i, color in zip(range(n_classes), plot_colors):
idx = np.where(y == i)
plt.scatter(X[idx, 0], X[idx, 1], c=color, label=iris.target_names[i],
cmap=plt.cm.Paired)
plt.axis("tight")
plt.suptitle("Decision surface of a decision tree using paired features")
plt.legend()
plt.show()
| bsd-3-clause |
OpenWeavers/openanalysis | openanalysis/string_matching.py | 2 | 8301 | from warnings import warn
import numpy as np
import os
import matplotlib.pyplot as plt
from random import randint, randrange
from mpl_toolkits.mplot3d import Axes3D
from matplotlib.mlab import griddata
__all__ = ['StringMatchingAnalyzer', 'StringMatchingAlgorithm']
class StringMatchingAlgorithm:
"""
Base class for all (Comparision Based) String Matching algorithms
Increment the number of basic comparisions, 'self.count' in the inner-most
loop of your algorithmic implementation every time the control enters the
loop to obtain correct visualization
"""
def __init__(self, name):
self.name = name
self.count = 0 # Number of basic comparison
self.dat = np.array([])
def match(self, text, pattern):
"""
The core matching function
:param text: Source Text
:param pattern: String to be matched
:return: True if pattern in text else False
"""
self.count = 0
pass
class StringMatchingAnalyzer:
__package_directory = os.path.dirname(os.path.abspath(__file__))
__sample_path = os.path.join(__package_directory, 'string_matching_samples') # openanalysis/string_matching_samples
__samples_list = os.listdir(__sample_path)
__min_text_length = 5000
__min_patt_length = 500
# The samples are text files stored in string_matching_samples directory of Current Working
# Directory. You can download the sample tar.gz texts from the SMART website.
# https://www.dmi.unict.it/~faro/smart/download/data/
def __init__(self, matcher):
"""
Constructor for Analyzer
:param matcher: A class which is derived from StringMatchingBase
"""
self.matcher = matcher() # Instantiate
def analyze(self, max_text_length=10000, max_patt_length=1000, progress=True, input_file_path=None):
"""
Analyzes given algorithm by varying both text and pattern length and plots it in 3D space
:param max_text_length: Maximum length of text used in analysis. Should be greater than 5000
:param max_patt_length: Maximum length of pattern used in analysis. Should be greater than 500
:param progress: If True, Progress bar is shown
:param input_file_path: Path to the sample file. Must be larger than 5000 char length. If None, analysis is done with in-built sample
:return: 3D plot of running time vs text and pattern length
"""
# Analyzes the matching algorithm
if max_text_length < self.__min_text_length:
raise ValueError('Minimum text length is {}'.format(self.__min_text_length))
if max_patt_length < self.__min_patt_length:
raise ValueError('Minimum text length is {}'.format(self.__min_patt_length))
if max_text_length < max_patt_length:
raise ValueError(
'Pattern length {} is incompatible with Text Length {}'.format(max_patt_length, max_text_length))
if input_file_path is None:
input_file_path = os.path.join(self.__sample_path,
self.__samples_list[randrange(0, len(self.__samples_list))])
file = open(input_file_path, 'r')
file_text = file.read()
if max_text_length > len(file_text):
raise ValueError('File length {} is smaller than {}'.format(len(file_text), max_text_length))
data_array = []
print('Please wait while analysing {} algorithm'.format(self.matcher.name))
if progress:
import progressbar
count = 0
max_count = (max_text_length - 1000) // 100 * (max_patt_length - 100) // 5
bar = progressbar.ProgressBar(max_value=max_count)
for n in range(1000, max_text_length, 100):
for m in range(100, max_patt_length, 5):
if progress:
bar.update(count)
count += 1
pos = randint(0, len(file_text) - n)
text = file_text[pos:pos + n] # Select a random text of size n
pos = randint(0, len(text) - m)
pattern = text[pos:pos + m] # Select a random pattern of size m from text, where m<n
self.matcher.match(text, pattern) # Run the string matching algorithm with T and P as parameters
data_array.append((n, m, self.matcher.count))
dat = np.array(data_array)
fig = plt.figure()
ax = fig.gca(projection='3d')
x = dat[:, 0]
y = dat[:, 1]
z = dat[:, 2]
xi = np.linspace(min(x), max(x))
yi = np.linspace(min(y), max(y))
X, Y = np.meshgrid(xi, yi)
Z = griddata(x, y, z, xi, yi, interp='linear')
surf = ax.plot_surface(X, Y, Z, rstride=5, cstride=5, cmap=plt.get_cmap('jet'), linewidth=1, antialiased=True)
ax.set_xlabel('length of text $n$')
ax.set_ylabel('length of pattern $m$')
ax.set_zlabel('number of basic operations performed $c$')
plt.suptitle('{0} Analysis\n Sample = {1}'.format(self.matcher.name, os.path.basename(input_file_path)))
ax.set_zlim3d(np.min(Z), np.max(Z))
fig.colorbar(surf)
plt.show()
@staticmethod
def compare(algorithms, n=1000, m=500, maxrun=5, progress=True, input_file_path=None):
"""
Compares the string matching algorithms
:param algorithms: List of String Matching Algorithm classes
:param n: Text length to be used for comparision
:param m: Pattern Length to be used for comparision
:param maxrun: Number of times the test has to be performed. Warns if it is greater than 5
:param progress: Boolean indicating whether to show Progress bar during comparision, True by default
:param input_file_path: The path of custom file to be used for analysis. If not given, default file is selected from in-built file
:return: Bar charts showing the average of basic operations performed
"""
algorithms = [x() for x in algorithms]
if input_file_path is None:
input_file_path = os.path.join(StringMatchingAnalyzer.__sample_path,
StringMatchingAnalyzer.__samples_list[
randrange(0, len(StringMatchingAnalyzer.__samples_list))])
file = open(input_file_path, 'r')
file_text = file.read()
if n > len(file_text):
raise ValueError('{n} is greater than file text length {l}'.format(n=n, l=len(file_text)))
if m > n:
raise ValueError('Text length {n} is lesser than Pattern Length {m}'.format(n=n, m=m))
if maxrun > 5:
warn('More than 5 loops for testing can take significant amount of time')
operations = {x.name: 0 for x in algorithms}
if progress:
import progressbar
count = 0
max_count = maxrun * len(algorithms)
bar = progressbar.ProgressBar(max_value=max_count)
for i in range(maxrun):
pos = randint(0, len(file_text) - n)
text = file_text[pos:pos + n] # Select a random text of size n
pos = randint(0, len(text) - m)
pattern = text[pos:pos + m] # Select a random pattern of size m from text, where m<n
for algorithm in algorithms:
if progress:
count += 1
bar.update(count)
algorithm.match(text, pattern)
operations[algorithm.name] += algorithm.count
operations = [(k, v / maxrun) for k, v in operations.items()]
plt.suptitle(
'Comparision of String Matching Algorithms\n n = {}, m = {}\n Averaged over {} loops'.format(n, m, maxrun))
operations = sorted(operations, key=lambda x: x[0])
rects = plt.bar(left=np.arange(len(operations)), height=[y for (x, y) in operations])
plt.xticks(np.arange(len(operations)), [x for (x, y) in operations])
ax = plt.axes()
for rect in rects:
height = rect.get_height()
ax.text(rect.get_x() + rect.get_width() / 2., 1.05 * height,
'%d' % int(height),
ha='center', va='bottom')
plt.show()
| gpl-3.0 |
DGrady/pandas | pandas/core/computation/scope.py | 9 | 9228 | """
Module for scope operations
"""
import sys
import struct
import inspect
import datetime
import itertools
import pprint
import numpy as np
import pandas
import pandas as pd # noqa
from pandas.compat import DeepChainMap, map, StringIO
from pandas.core.base import StringMixin
import pandas.core.computation as compu
def _ensure_scope(level, global_dict=None, local_dict=None, resolvers=(),
target=None, **kwargs):
"""Ensure that we are grabbing the correct scope."""
return Scope(level + 1, global_dict=global_dict, local_dict=local_dict,
resolvers=resolvers, target=target)
def _replacer(x):
"""Replace a number with its hexadecimal representation. Used to tag
temporary variables with their calling scope's id.
"""
# get the hex repr of the binary char and remove 0x and pad by pad_size
# zeros
try:
hexin = ord(x)
except TypeError:
# bytes literals masquerade as ints when iterating in py3
hexin = x
return hex(hexin)
def _raw_hex_id(obj):
"""Return the padded hexadecimal id of ``obj``."""
# interpret as a pointer since that's what really what id returns
packed = struct.pack('@P', id(obj))
return ''.join(map(_replacer, packed))
_DEFAULT_GLOBALS = {
'Timestamp': pandas._libs.lib.Timestamp,
'datetime': datetime.datetime,
'True': True,
'False': False,
'list': list,
'tuple': tuple,
'inf': np.inf,
'Inf': np.inf,
}
def _get_pretty_string(obj):
"""Return a prettier version of obj
Parameters
----------
obj : object
Object to pretty print
Returns
-------
s : str
Pretty print object repr
"""
sio = StringIO()
pprint.pprint(obj, stream=sio)
return sio.getvalue()
class Scope(StringMixin):
"""Object to hold scope, with a few bells to deal with some custom syntax
and contexts added by pandas.
Parameters
----------
level : int
global_dict : dict or None, optional, default None
local_dict : dict or Scope or None, optional, default None
resolvers : list-like or None, optional, default None
target : object
Attributes
----------
level : int
scope : DeepChainMap
target : object
temps : dict
"""
__slots__ = 'level', 'scope', 'target', 'temps'
def __init__(self, level, global_dict=None, local_dict=None, resolvers=(),
target=None):
self.level = level + 1
# shallow copy because we don't want to keep filling this up with what
# was there before if there are multiple calls to Scope/_ensure_scope
self.scope = DeepChainMap(_DEFAULT_GLOBALS.copy())
self.target = target
if isinstance(local_dict, Scope):
self.scope.update(local_dict.scope)
if local_dict.target is not None:
self.target = local_dict.target
self.update(local_dict.level)
frame = sys._getframe(self.level)
try:
# shallow copy here because we don't want to replace what's in
# scope when we align terms (alignment accesses the underlying
# numpy array of pandas objects)
self.scope = self.scope.new_child((global_dict or
frame.f_globals).copy())
if not isinstance(local_dict, Scope):
self.scope = self.scope.new_child((local_dict or
frame.f_locals).copy())
finally:
del frame
# assumes that resolvers are going from outermost scope to inner
if isinstance(local_dict, Scope):
resolvers += tuple(local_dict.resolvers.maps)
self.resolvers = DeepChainMap(*resolvers)
self.temps = {}
def __unicode__(self):
scope_keys = _get_pretty_string(list(self.scope.keys()))
res_keys = _get_pretty_string(list(self.resolvers.keys()))
unicode_str = '{name}(scope={scope_keys}, resolvers={res_keys})'
return unicode_str.format(name=type(self).__name__,
scope_keys=scope_keys,
res_keys=res_keys)
@property
def has_resolvers(self):
"""Return whether we have any extra scope.
For example, DataFrames pass Their columns as resolvers during calls to
``DataFrame.eval()`` and ``DataFrame.query()``.
Returns
-------
hr : bool
"""
return bool(len(self.resolvers))
def resolve(self, key, is_local):
"""Resolve a variable name in a possibly local context
Parameters
----------
key : text_type
A variable name
is_local : bool
Flag indicating whether the variable is local or not (prefixed with
the '@' symbol)
Returns
-------
value : object
The value of a particular variable
"""
try:
# only look for locals in outer scope
if is_local:
return self.scope[key]
# not a local variable so check in resolvers if we have them
if self.has_resolvers:
return self.resolvers[key]
# if we're here that means that we have no locals and we also have
# no resolvers
assert not is_local and not self.has_resolvers
return self.scope[key]
except KeyError:
try:
# last ditch effort we look in temporaries
# these are created when parsing indexing expressions
# e.g., df[df > 0]
return self.temps[key]
except KeyError:
raise compu.ops.UndefinedVariableError(key, is_local)
def swapkey(self, old_key, new_key, new_value=None):
"""Replace a variable name, with a potentially new value.
Parameters
----------
old_key : str
Current variable name to replace
new_key : str
New variable name to replace `old_key` with
new_value : object
Value to be replaced along with the possible renaming
"""
if self.has_resolvers:
maps = self.resolvers.maps + self.scope.maps
else:
maps = self.scope.maps
maps.append(self.temps)
for mapping in maps:
if old_key in mapping:
mapping[new_key] = new_value
return
def _get_vars(self, stack, scopes):
"""Get specifically scoped variables from a list of stack frames.
Parameters
----------
stack : list
A list of stack frames as returned by ``inspect.stack()``
scopes : sequence of strings
A sequence containing valid stack frame attribute names that
evaluate to a dictionary. For example, ('locals', 'globals')
"""
variables = itertools.product(scopes, stack)
for scope, (frame, _, _, _, _, _) in variables:
try:
d = getattr(frame, 'f_' + scope)
self.scope = self.scope.new_child(d)
finally:
# won't remove it, but DECREF it
# in Py3 this probably isn't necessary since frame won't be
# scope after the loop
del frame
def update(self, level):
"""Update the current scope by going back `level` levels.
Parameters
----------
level : int or None, optional, default None
"""
sl = level + 1
# add sl frames to the scope starting with the
# most distant and overwriting with more current
# makes sure that we can capture variable scope
stack = inspect.stack()
try:
self._get_vars(stack[:sl], scopes=['locals'])
finally:
del stack[:], stack
def add_tmp(self, value):
"""Add a temporary variable to the scope.
Parameters
----------
value : object
An arbitrary object to be assigned to a temporary variable.
Returns
-------
name : basestring
The name of the temporary variable created.
"""
name = '{name}_{num}_{hex_id}'.format(name=type(value).__name__,
num=self.ntemps,
hex_id=_raw_hex_id(self))
# add to inner most scope
assert name not in self.temps
self.temps[name] = value
assert name in self.temps
# only increment if the variable gets put in the scope
return name
@property
def ntemps(self):
"""The number of temporary variables in this scope"""
return len(self.temps)
@property
def full_scope(self):
"""Return the full scope for use with passing to engines transparently
as a mapping.
Returns
-------
vars : DeepChainMap
All variables in this scope.
"""
maps = [self.temps] + self.resolvers.maps + self.scope.maps
return DeepChainMap(*maps)
| bsd-3-clause |
DailyActie/Surrogate-Model | 01-codes/scikit-learn-master/examples/plot_kernel_ridge_regression.py | 1 | 6262 | """
=============================================
Comparison of kernel ridge regression and SVR
=============================================
Both kernel ridge regression (KRR) and SVR learn a non-linear function by
employing the kernel trick, i.e., they learn a linear function in the space
induced by the respective kernel which corresponds to a non-linear function in
the original space. They differ in the loss functions (ridge versus
epsilon-insensitive loss). In contrast to SVR, fitting a KRR can be done in
closed-form and is typically faster for medium-sized datasets. On the other
hand, the learned model is non-sparse and thus slower than SVR at
prediction-time.
This example illustrates both methods on an artificial dataset, which
consists of a sinusoidal target function and strong noise added to every fifth
datapoint. The first figure compares the learned model of KRR and SVR when both
complexity/regularization and bandwidth of the RBF kernel are optimized using
grid-search. The learned functions are very similar; however, fitting KRR is
approx. seven times faster than fitting SVR (both with grid-search). However,
prediction of 100000 target values is more than tree times faster with SVR
since it has learned a sparse model using only approx. 1/3 of the 100 training
datapoints as support vectors.
The next figure compares the time for fitting and prediction of KRR and SVR for
different sizes of the training set. Fitting KRR is faster than SVR for medium-
sized training sets (less than 1000 samples); however, for larger training sets
SVR scales better. With regard to prediction time, SVR is faster than
KRR for all sizes of the training set because of the learned sparse
solution. Note that the degree of sparsity and thus the prediction time depends
on the parameters epsilon and C of the SVR.
"""
# Authors: Jan Hendrik Metzen <[email protected]>
# License: BSD 3 clause
from __future__ import division
import time
import matplotlib.pyplot as plt
import numpy as np
from sklearn.kernel_ridge import KernelRidge
from sklearn.model_selection import GridSearchCV
from sklearn.model_selection import learning_curve
from sklearn.svm import SVR
rng = np.random.RandomState(0)
#############################################################################
# Generate sample data
X = 5 * rng.rand(10000, 1)
y = np.sin(X).ravel()
# Add noise to targets
y[::5] += 3 * (0.5 - rng.rand(X.shape[0] / 5))
X_plot = np.linspace(0, 5, 100000)[:, None]
#############################################################################
# Fit regression model
train_size = 100
svr = GridSearchCV(SVR(kernel='rbf', gamma=0.1), cv=5,
param_grid={"C": [1e0, 1e1, 1e2, 1e3],
"gamma": np.logspace(-2, 2, 5)})
kr = GridSearchCV(KernelRidge(kernel='rbf', gamma=0.1), cv=5,
param_grid={"alpha": [1e0, 0.1, 1e-2, 1e-3],
"gamma": np.logspace(-2, 2, 5)})
t0 = time.time()
svr.fit(X[:train_size], y[:train_size])
svr_fit = time.time() - t0
print("SVR complexity and bandwidth selected and model fitted in %.3f s"
% svr_fit)
t0 = time.time()
kr.fit(X[:train_size], y[:train_size])
kr_fit = time.time() - t0
print("KRR complexity and bandwidth selected and model fitted in %.3f s"
% kr_fit)
sv_ratio = svr.best_estimator_.support_.shape[0] / train_size
print("Support vector ratio: %.3f" % sv_ratio)
t0 = time.time()
y_svr = svr.predict(X_plot)
svr_predict = time.time() - t0
print("SVR prediction for %d inputs in %.3f s"
% (X_plot.shape[0], svr_predict))
t0 = time.time()
y_kr = kr.predict(X_plot)
kr_predict = time.time() - t0
print("KRR prediction for %d inputs in %.3f s"
% (X_plot.shape[0], kr_predict))
#############################################################################
# look at the results
sv_ind = svr.best_estimator_.support_
plt.scatter(X[sv_ind], y[sv_ind], c='r', s=50, label='SVR support vectors',
zorder=2)
plt.scatter(X[:100], y[:100], c='k', label='data', zorder=1)
plt.hold('on')
plt.plot(X_plot, y_svr, c='r',
label='SVR (fit: %.3fs, predict: %.3fs)' % (svr_fit, svr_predict))
plt.plot(X_plot, y_kr, c='g',
label='KRR (fit: %.3fs, predict: %.3fs)' % (kr_fit, kr_predict))
plt.xlabel('data')
plt.ylabel('target')
plt.title('SVR versus Kernel Ridge')
plt.legend()
# Visualize training and prediction time
plt.figure()
# Generate sample data
X = 5 * rng.rand(10000, 1)
y = np.sin(X).ravel()
y[::5] += 3 * (0.5 - rng.rand(X.shape[0] / 5))
sizes = np.logspace(1, 4, 7)
for name, estimator in {"KRR": KernelRidge(kernel='rbf', alpha=0.1,
gamma=10),
"SVR": SVR(kernel='rbf', C=1e1, gamma=10)}.items():
train_time = []
test_time = []
for train_test_size in sizes:
t0 = time.time()
estimator.fit(X[:train_test_size], y[:train_test_size])
train_time.append(time.time() - t0)
t0 = time.time()
estimator.predict(X_plot[:1000])
test_time.append(time.time() - t0)
plt.plot(sizes, train_time, 'o-', color="r" if name == "SVR" else "g",
label="%s (train)" % name)
plt.plot(sizes, test_time, 'o--', color="r" if name == "SVR" else "g",
label="%s (test)" % name)
plt.xscale("log")
plt.yscale("log")
plt.xlabel("Train size")
plt.ylabel("Time (seconds)")
plt.title('Execution Time')
plt.legend(loc="best")
# Visualize learning curves
plt.figure()
svr = SVR(kernel='rbf', C=1e1, gamma=0.1)
kr = KernelRidge(kernel='rbf', alpha=0.1, gamma=0.1)
train_sizes, train_scores_svr, test_scores_svr = \
learning_curve(svr, X[:100], y[:100], train_sizes=np.linspace(0.1, 1, 10),
scoring="mean_squared_error", cv=10)
train_sizes_abs, train_scores_kr, test_scores_kr = \
learning_curve(kr, X[:100], y[:100], train_sizes=np.linspace(0.1, 1, 10),
scoring="mean_squared_error", cv=10)
plt.plot(train_sizes, test_scores_svr.mean(1), 'o-', color="r",
label="SVR")
plt.plot(train_sizes, test_scores_kr.mean(1), 'o-', color="g",
label="KRR")
plt.xlabel("Train size")
plt.ylabel("Mean Squared Error")
plt.title('Learning curves')
plt.legend(loc="best")
plt.show()
| mit |
PatrickOReilly/scikit-learn | sklearn/decomposition/tests/test_truncated_svd.py | 73 | 6086 | """Test truncated SVD transformer."""
import numpy as np
import scipy.sparse as sp
from sklearn.decomposition import TruncatedSVD
from sklearn.utils import check_random_state
from sklearn.utils.testing import (assert_array_almost_equal, assert_equal,
assert_raises, assert_greater,
assert_array_less)
# Make an X that looks somewhat like a small tf-idf matrix.
# XXX newer versions of SciPy have scipy.sparse.rand for this.
shape = 60, 55
n_samples, n_features = shape
rng = check_random_state(42)
X = rng.randint(-100, 20, np.product(shape)).reshape(shape)
X = sp.csr_matrix(np.maximum(X, 0), dtype=np.float64)
X.data[:] = 1 + np.log(X.data)
Xdense = X.A
def test_algorithms():
svd_a = TruncatedSVD(30, algorithm="arpack")
svd_r = TruncatedSVD(30, algorithm="randomized", random_state=42)
Xa = svd_a.fit_transform(X)[:, :6]
Xr = svd_r.fit_transform(X)[:, :6]
assert_array_almost_equal(Xa, Xr, decimal=5)
comp_a = np.abs(svd_a.components_)
comp_r = np.abs(svd_r.components_)
# All elements are equal, but some elements are more equal than others.
assert_array_almost_equal(comp_a[:9], comp_r[:9])
assert_array_almost_equal(comp_a[9:], comp_r[9:], decimal=2)
def test_attributes():
for n_components in (10, 25, 41):
tsvd = TruncatedSVD(n_components).fit(X)
assert_equal(tsvd.n_components, n_components)
assert_equal(tsvd.components_.shape, (n_components, n_features))
def test_too_many_components():
for algorithm in ["arpack", "randomized"]:
for n_components in (n_features, n_features + 1):
tsvd = TruncatedSVD(n_components=n_components, algorithm=algorithm)
assert_raises(ValueError, tsvd.fit, X)
def test_sparse_formats():
for fmt in ("array", "csr", "csc", "coo", "lil"):
Xfmt = Xdense if fmt == "dense" else getattr(X, "to" + fmt)()
tsvd = TruncatedSVD(n_components=11)
Xtrans = tsvd.fit_transform(Xfmt)
assert_equal(Xtrans.shape, (n_samples, 11))
Xtrans = tsvd.transform(Xfmt)
assert_equal(Xtrans.shape, (n_samples, 11))
def test_inverse_transform():
for algo in ("arpack", "randomized"):
# We need a lot of components for the reconstruction to be "almost
# equal" in all positions. XXX Test means or sums instead?
tsvd = TruncatedSVD(n_components=52, random_state=42)
Xt = tsvd.fit_transform(X)
Xinv = tsvd.inverse_transform(Xt)
assert_array_almost_equal(Xinv, Xdense, decimal=1)
def test_integers():
Xint = X.astype(np.int64)
tsvd = TruncatedSVD(n_components=6)
Xtrans = tsvd.fit_transform(Xint)
assert_equal(Xtrans.shape, (n_samples, tsvd.n_components))
def test_explained_variance():
# Test sparse data
svd_a_10_sp = TruncatedSVD(10, algorithm="arpack")
svd_r_10_sp = TruncatedSVD(10, algorithm="randomized", random_state=42)
svd_a_20_sp = TruncatedSVD(20, algorithm="arpack")
svd_r_20_sp = TruncatedSVD(20, algorithm="randomized", random_state=42)
X_trans_a_10_sp = svd_a_10_sp.fit_transform(X)
X_trans_r_10_sp = svd_r_10_sp.fit_transform(X)
X_trans_a_20_sp = svd_a_20_sp.fit_transform(X)
X_trans_r_20_sp = svd_r_20_sp.fit_transform(X)
# Test dense data
svd_a_10_de = TruncatedSVD(10, algorithm="arpack")
svd_r_10_de = TruncatedSVD(10, algorithm="randomized", random_state=42)
svd_a_20_de = TruncatedSVD(20, algorithm="arpack")
svd_r_20_de = TruncatedSVD(20, algorithm="randomized", random_state=42)
X_trans_a_10_de = svd_a_10_de.fit_transform(X.toarray())
X_trans_r_10_de = svd_r_10_de.fit_transform(X.toarray())
X_trans_a_20_de = svd_a_20_de.fit_transform(X.toarray())
X_trans_r_20_de = svd_r_20_de.fit_transform(X.toarray())
# helper arrays for tests below
svds = (svd_a_10_sp, svd_r_10_sp, svd_a_20_sp, svd_r_20_sp, svd_a_10_de,
svd_r_10_de, svd_a_20_de, svd_r_20_de)
svds_trans = (
(svd_a_10_sp, X_trans_a_10_sp),
(svd_r_10_sp, X_trans_r_10_sp),
(svd_a_20_sp, X_trans_a_20_sp),
(svd_r_20_sp, X_trans_r_20_sp),
(svd_a_10_de, X_trans_a_10_de),
(svd_r_10_de, X_trans_r_10_de),
(svd_a_20_de, X_trans_a_20_de),
(svd_r_20_de, X_trans_r_20_de),
)
svds_10_v_20 = (
(svd_a_10_sp, svd_a_20_sp),
(svd_r_10_sp, svd_r_20_sp),
(svd_a_10_de, svd_a_20_de),
(svd_r_10_de, svd_r_20_de),
)
svds_sparse_v_dense = (
(svd_a_10_sp, svd_a_10_de),
(svd_a_20_sp, svd_a_20_de),
(svd_r_10_sp, svd_r_10_de),
(svd_r_20_sp, svd_r_20_de),
)
# Assert the 1st component is equal
for svd_10, svd_20 in svds_10_v_20:
assert_array_almost_equal(
svd_10.explained_variance_ratio_,
svd_20.explained_variance_ratio_[:10],
decimal=5,
)
# Assert that 20 components has higher explained variance than 10
for svd_10, svd_20 in svds_10_v_20:
assert_greater(
svd_20.explained_variance_ratio_.sum(),
svd_10.explained_variance_ratio_.sum(),
)
# Assert that all the values are greater than 0
for svd in svds:
assert_array_less(0.0, svd.explained_variance_ratio_)
# Assert that total explained variance is less than 1
for svd in svds:
assert_array_less(svd.explained_variance_ratio_.sum(), 1.0)
# Compare sparse vs. dense
for svd_sparse, svd_dense in svds_sparse_v_dense:
assert_array_almost_equal(svd_sparse.explained_variance_ratio_,
svd_dense.explained_variance_ratio_)
# Test that explained_variance is correct
for svd, transformed in svds_trans:
total_variance = np.var(X.toarray(), axis=0).sum()
variances = np.var(transformed, axis=0)
true_explained_variance_ratio = variances / total_variance
assert_array_almost_equal(
svd.explained_variance_ratio_,
true_explained_variance_ratio,
)
| bsd-3-clause |
festeh/BuildingMachineLearningSystemsWithPython | ch09/01_fft_based_classifier.py | 24 | 3740 | # This code is supporting material for the book
# Building Machine Learning Systems with Python
# by Willi Richert and Luis Pedro Coelho
# published by PACKT Publishing
#
# It is made available under the MIT License
import numpy as np
from collections import defaultdict
from sklearn.metrics import precision_recall_curve, roc_curve
from sklearn.metrics import auc
from sklearn.cross_validation import ShuffleSplit
from sklearn.metrics import confusion_matrix
from utils import plot_pr, plot_roc, plot_confusion_matrix, GENRE_LIST
from fft import read_fft
genre_list = GENRE_LIST
def train_model(clf_factory, X, Y, name, plot=False):
labels = np.unique(Y)
cv = ShuffleSplit(
n=len(X), n_iter=1, test_size=0.3, indices=True, random_state=0)
train_errors = []
test_errors = []
scores = []
pr_scores = defaultdict(list)
precisions, recalls, thresholds = defaultdict(
list), defaultdict(list), defaultdict(list)
roc_scores = defaultdict(list)
tprs = defaultdict(list)
fprs = defaultdict(list)
clfs = [] # just to later get the median
cms = []
for train, test in cv:
X_train, y_train = X[train], Y[train]
X_test, y_test = X[test], Y[test]
clf = clf_factory()
clf.fit(X_train, y_train)
clfs.append(clf)
train_score = clf.score(X_train, y_train)
test_score = clf.score(X_test, y_test)
scores.append(test_score)
train_errors.append(1 - train_score)
test_errors.append(1 - test_score)
y_pred = clf.predict(X_test)
cm = confusion_matrix(y_test, y_pred)
cms.append(cm)
for label in labels:
y_label_test = np.asarray(y_test == label, dtype=int)
proba = clf.predict_proba(X_test)
proba_label = proba[:, label]
precision, recall, pr_thresholds = precision_recall_curve(
y_label_test, proba_label)
pr_scores[label].append(auc(recall, precision))
precisions[label].append(precision)
recalls[label].append(recall)
thresholds[label].append(pr_thresholds)
fpr, tpr, roc_thresholds = roc_curve(y_label_test, proba_label)
roc_scores[label].append(auc(fpr, tpr))
tprs[label].append(tpr)
fprs[label].append(fpr)
if plot:
for label in labels:
print("Plotting %s" % genre_list[label])
scores_to_sort = roc_scores[label]
median = np.argsort(scores_to_sort)[len(scores_to_sort) / 2]
desc = "%s %s" % (name, genre_list[label])
plot_pr(pr_scores[label][median], desc, precisions[label][median],
recalls[label][median], label='%s vs rest' % genre_list[label])
plot_roc(roc_scores[label][median], desc, tprs[label][median],
fprs[label][median], label='%s vs rest' % genre_list[label])
all_pr_scores = np.asarray(pr_scores.values()).flatten()
summary = (np.mean(scores), np.std(scores),
np.mean(all_pr_scores), np.std(all_pr_scores))
print("%.3f\t%.3f\t%.3f\t%.3f\t" % summary)
return np.mean(train_errors), np.mean(test_errors), np.asarray(cms)
def create_model():
from sklearn.linear_model.logistic import LogisticRegression
clf = LogisticRegression()
return clf
if __name__ == "__main__":
X, y = read_fft(genre_list)
train_avg, test_avg, cms = train_model(
create_model, X, y, "Log Reg FFT", plot=True)
cm_avg = np.mean(cms, axis=0)
cm_norm = cm_avg / np.sum(cm_avg, axis=0)
plot_confusion_matrix(cm_norm, genre_list, "fft",
"Confusion matrix of an FFT based classifier")
| mit |
probablytom/assessed | Level5/MLM/labs/ae1/debug.py | 1 | 3578 | from model import Model
import numpy as np
import matplotlib as mpl
mpl.use('Agg')
import matplotlib.pyplot as plt
# KNN accuracy across fold sizes
knn_model = Model('epi_stroma_data.tsv', '\t')
accuracies = []
knn_model.chosen_classifier = knn_model.KNN
x_range = range(2, 16)
for fold_size in x_range:
results = knn_model.cross_validate(fold_count=fold_size)
accuracies.append(results.count(True)/len(results))
fig, ax = plt.subplots()
ax.plot(x_range, accuracies)
ax.set_xlabel('Number of Folds')
ax.set_ylabel('Accuracy')
plt.savefig('cv_knn.png')
fig.clear()
knn_folds = np.argmax([point
for point in accuracies
if float(point) != 1])
knn_folds = int(knn_folds) # Convert from numpy.float64
print("KNN Folds: " + str(knn_folds))
# Optimise K for KNN at the optimal number of folds
accuracies = []
k_range = range(1, 51)
for k in k_range:
knn_model.K = k
results = knn_model.cross_validate(fold_count=knn_folds)
accuracies.append(results.count(True)/len(results))
fig, ax = plt.subplots()
ax.plot(accuracies)
ax.set_xlabel('Number of Neighbours (K)')
ax.set_ylabel('Accuracy')
plt.savefig('optimal_k.png')
fig.clear()
optimal_k = np.argmax(accuracies)
print("Optimal K: " + str(optimal_k))
knn_model.K = int(optimal_k) # Set K to be whatever's best for later experimentation
# GNB accuracy across fold sizes
gnb_model = Model('epi_stroma_data.tsv', '\t')
accuracies = []
gnb_model.chosen_classifier = gnb_model.Bayes
x_range = range(2, 16)
for fold_size in x_range:
results = gnb_model.cross_validate(fold_count=fold_size)
accuracies.append(results.count(True)/len(results))
fig, ax = plt.subplots()
ax.plot(x_range, accuracies)
ax.set_xlabel('Number of Folds')
ax.set_ylabel('Accuracy')
plt.savefig('cv_gnb.png')
fig.clear()
gnb_folds = np.argmax([point
for point in accuracies
if float(point) != 1])
gnb_folds = int(gnb_folds) # Convert from numpy.float64
print("GNB folds: " + str(gnb_folds))
# Finding the optimal number of features for knn
accuracies = []
f_range = range(1,25)
for i in f_range:
knn_model.select_features(i)
results = knn_model.cross_validate(fold_count=knn_folds)
accuracies.append(results.count(True)/len(results))
fig, ax = plt.subplots()
ax.plot(f_range, accuracies)
ax.set_xlabel('Number of Features Selected')
ax.set_ylabel('Accuracy')
plt.savefig('knn_optimal_features.png')
fig.clear()
optimal_features_knn = np.argmax(accuracies)
print("Optimal features for KNN: " + str(optimal_features_knn))
# Finding the optimal number of features for gnb
accuracies = []
f_range = range(1,25)
for i in f_range:
gnb_model.select_features(i)
results = gnb_model.cross_validate(fold_count=gnb_folds)
accuracies.append(results.count(True)/len(results))
fig, ax = plt.subplots()
ax.plot(f_range, accuracies)
ax.set_xlabel('Number of Features Selected')
ax.set_ylabel('Accuracy')
plt.savefig('gnb_optimal_features.png')
fig.clear()
# Finding the optimal number of features for gnb, all features
accuracies = []
f_range = range(1, 113)
for i in f_range:
gnb_model.select_features(i)
results = gnb_model.cross_validate(fold_count=gnb_folds)
accuracies.append(results.count(True)/len(results))
fig, ax = plt.subplots()
ax.plot(f_range, accuracies)
ax.set_xlabel('Number of Features Selected')
ax.set_ylabel('Accuracy')
plt.savefig('gnb_optimal_features__all_features.png')
fig.clear()
optimal_features_gnb = np.argmax(accuracies)
print("Optimal features for GNB: " + str(optimal_features_gnb))
| artistic-2.0 |
WojciechMigda/TCO-PCFStupskiPrize1 | src/cross_val_ada.py | 1 | 5715 | #!/opt/anaconda2/bin/python
# -*- coding: utf-8 -*-
"""
################################################################################
#
# Copyright (c) 2015 Wojciech Migda
# All rights reserved
# Distributed under the terms of the MIT license
#
################################################################################
#
# Filename: clf_ert.py
#
# Decription:
# ExtraRandomTrees regressor cross-validation
#
# Authors:
# Wojciech Migda
#
################################################################################
#
# History:
# --------
# Date Who Ticket Description
# ---------- --- --------- ------------------------------------------------
# 2015-12-26 wm Initial version
#
################################################################################
"""
from __future__ import print_function
DEBUG = False
__all__ = []
__version__ = 0.1
__date__ = '2015-12-26'
__updated__ = '2015-12-26'
from sys import path
path.insert(0, './Pipe')
import pipe as P
def work(
in_y_train_csv,
in_train_feat_csv,
n_ngb,
n_folds):
from pypipes import as_csv_rows
y_train = (
in_y_train_csv
| as_csv_rows
| P.select(lambda x: float(x[1]))
| P.as_list
)
X_train = (
in_train_feat_csv
| as_csv_rows
| P.select(lambda l: [float(x) for x in l])
| P.as_list
)
from sklearn.ensemble import AdaBoostRegressor
from sklearn.tree import DecisionTreeRegressor
from sklearn.ensemble import ExtraTreesRegressor
#clf = AdaBoostRegressor(DecisionTreeRegressor(max_depth=4),
# n_estimators=300, random_state=1)
clf = AdaBoostRegressor(ExtraTreesRegressor(verbose=0,
n_estimators=100,
random_state=1,
n_jobs=4),
n_estimators=300, random_state=1,
loss='square')
def score_gen(n_folds):
from sklearn.cross_validation import KFold
from numpy import array
kf = KFold(len(y_train), n_folds=n_folds)
for itrain, itest in kf:
ytrain = array(y_train)[itrain]
Xtrain = array(X_train)[itrain]
ytest = array(y_train)[itest]
Xtest = array(X_train)[itest]
clf.fit(Xtrain, ytrain)
from sklearn.metrics import mean_squared_error
result = mean_squared_error(clf.predict(Xtest), ytest)
print(result)
yield result
return
CVscore = sum(score_gen(n_folds)) / n_folds
print("avg score:", CVscore)
print("TCO score:", 1e9 / (1e5 * CVscore + 1.))
pass
def main(argv=None): # IGNORE:C0111
'''Command line options.'''
from sys import argv as Argv
if argv is None:
argv = Argv
pass
else:
Argv.extend(argv)
pass
from os.path import basename
program_name = basename(Argv[0])
program_version = "v%s" % __version__
program_build_date = str(__updated__)
program_version_message = '%%(prog)s %s (%s)' % (program_version, program_build_date)
program_shortdesc = __import__('__main__').__doc__.split("\n")[1]
program_license = '''%s
Created by Wojciech Migda on %s.
Copyright 2015 Wojciech Migda. All rights reserved.
Licensed under the MIT License
Distributed on an "AS IS" basis without warranties
or conditions of any kind, either express or implied.
USAGE
''' % (program_shortdesc, str(__date__))
try:
from argparse import ArgumentParser
from argparse import RawDescriptionHelpFormatter
from argparse import FileType
from sys import stdout
# Setup argument parser
parser = ArgumentParser(description=program_license, formatter_class=RawDescriptionHelpFormatter)
#parser.add_argument("-D", "--data-dir",
# type=str, action='store', dest="data_dir", required=True,
# help="directory with input CSV files, BMP 'train' and 'test' subfolders, and where H5 will be stored")
parser.add_argument("--in-y-train-csv",
action='store', dest="in_y_train_csv", required=True,
help="input training data CSV file name with y labels")
parser.add_argument("--in-train-feat-csv",
action='store', dest="in_train_feat_csv", required=True,
help="input X train features data CSV file name")
parser.add_argument("-N", "--n-ngb",
type=int, default=3000, action='store', dest="n_ngb",
help="number of neighbors")
parser.add_argument("-k", "--n-folds",
type=int, default=10, action='store', dest="n_folds",
help="number of folds for cross-validation")
# Process arguments
args = parser.parse_args()
for k, v in args.__dict__.items():
print(str(k) + ' => ' + str(v))
pass
work(
args.in_y_train_csv,
args.in_train_feat_csv,
args.n_ngb,
args.n_folds)
return 0
except KeyboardInterrupt:
### handle keyboard interrupt ###
return 0
except Exception as e:
if DEBUG:
raise(e)
pass
indent = len(program_name) * " "
from sys import stderr
stderr.write(program_name + ": " + repr(e) + "\n")
stderr.write(indent + " for help use --help")
return 2
pass
if __name__ == "__main__":
if DEBUG:
from sys import argv
argv.append("-h")
pass
from sys import exit as Exit
Exit(main())
pass
| mit |
neale/CS-program | 434-MachineLearning/final_project/linearClassifier/sklearn/model_selection/tests/test_search.py | 23 | 30837 | """Test the search module"""
from collections import Iterable, Sized
from sklearn.externals.six.moves import cStringIO as StringIO
from sklearn.externals.six.moves import xrange
from itertools import chain, product
import pickle
import sys
import numpy as np
import scipy.sparse as sp
from sklearn.utils.fixes import sp_version
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_not_equal
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_warns
from sklearn.utils.testing import assert_raise_message
from sklearn.utils.testing import assert_false, assert_true
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_no_warnings
from sklearn.utils.testing import ignore_warnings
from sklearn.utils.mocking import CheckingClassifier, MockDataFrame
from scipy.stats import bernoulli, expon, uniform
from sklearn.externals.six.moves import zip
from sklearn.base import BaseEstimator
from sklearn.datasets import make_classification
from sklearn.datasets import make_blobs
from sklearn.datasets import make_multilabel_classification
from sklearn.model_selection import KFold
from sklearn.model_selection import StratifiedKFold
from sklearn.model_selection import StratifiedShuffleSplit
from sklearn.model_selection import LeaveOneLabelOut
from sklearn.model_selection import LeavePLabelOut
from sklearn.model_selection import LabelKFold
from sklearn.model_selection import LabelShuffleSplit
from sklearn.model_selection import GridSearchCV
from sklearn.model_selection import RandomizedSearchCV
from sklearn.model_selection import ParameterGrid
from sklearn.model_selection import ParameterSampler
# TODO Import from sklearn.exceptions once merged.
from sklearn.base import ChangedBehaviorWarning
from sklearn.model_selection._validation import FitFailedWarning
from sklearn.svm import LinearSVC, SVC
from sklearn.tree import DecisionTreeRegressor
from sklearn.tree import DecisionTreeClassifier
from sklearn.cluster import KMeans
from sklearn.neighbors import KernelDensity
from sklearn.metrics import f1_score
from sklearn.metrics import make_scorer
from sklearn.metrics import roc_auc_score
from sklearn.preprocessing import Imputer
from sklearn.pipeline import Pipeline
# Neither of the following two estimators inherit from BaseEstimator,
# to test hyperparameter search on user-defined classifiers.
class MockClassifier(object):
"""Dummy classifier to test the parameter search algorithms"""
def __init__(self, foo_param=0):
self.foo_param = foo_param
def fit(self, X, Y):
assert_true(len(X) == len(Y))
return self
def predict(self, T):
return T.shape[0]
predict_proba = predict
decision_function = predict
transform = predict
def score(self, X=None, Y=None):
if self.foo_param > 1:
score = 1.
else:
score = 0.
return score
def get_params(self, deep=False):
return {'foo_param': self.foo_param}
def set_params(self, **params):
self.foo_param = params['foo_param']
return self
class LinearSVCNoScore(LinearSVC):
"""An LinearSVC classifier that has no score method."""
@property
def score(self):
raise AttributeError
X = np.array([[-1, -1], [-2, -1], [1, 1], [2, 1]])
y = np.array([1, 1, 2, 2])
def assert_grid_iter_equals_getitem(grid):
assert_equal(list(grid), [grid[i] for i in range(len(grid))])
def test_parameter_grid():
# Test basic properties of ParameterGrid.
params1 = {"foo": [1, 2, 3]}
grid1 = ParameterGrid(params1)
assert_true(isinstance(grid1, Iterable))
assert_true(isinstance(grid1, Sized))
assert_equal(len(grid1), 3)
assert_grid_iter_equals_getitem(grid1)
params2 = {"foo": [4, 2],
"bar": ["ham", "spam", "eggs"]}
grid2 = ParameterGrid(params2)
assert_equal(len(grid2), 6)
# loop to assert we can iterate over the grid multiple times
for i in xrange(2):
# tuple + chain transforms {"a": 1, "b": 2} to ("a", 1, "b", 2)
points = set(tuple(chain(*(sorted(p.items())))) for p in grid2)
assert_equal(points,
set(("bar", x, "foo", y)
for x, y in product(params2["bar"], params2["foo"])))
assert_grid_iter_equals_getitem(grid2)
# Special case: empty grid (useful to get default estimator settings)
empty = ParameterGrid({})
assert_equal(len(empty), 1)
assert_equal(list(empty), [{}])
assert_grid_iter_equals_getitem(empty)
assert_raises(IndexError, lambda: empty[1])
has_empty = ParameterGrid([{'C': [1, 10]}, {}, {'C': [.5]}])
assert_equal(len(has_empty), 4)
assert_equal(list(has_empty), [{'C': 1}, {'C': 10}, {}, {'C': .5}])
assert_grid_iter_equals_getitem(has_empty)
def test_grid_search():
# Test that the best estimator contains the right value for foo_param
clf = MockClassifier()
grid_search = GridSearchCV(clf, {'foo_param': [1, 2, 3]}, verbose=3)
# make sure it selects the smallest parameter in case of ties
old_stdout = sys.stdout
sys.stdout = StringIO()
grid_search.fit(X, y)
sys.stdout = old_stdout
assert_equal(grid_search.best_estimator_.foo_param, 2)
for i, foo_i in enumerate([1, 2, 3]):
assert_true(grid_search.grid_scores_[i][0]
== {'foo_param': foo_i})
# Smoke test the score etc:
grid_search.score(X, y)
grid_search.predict_proba(X)
grid_search.decision_function(X)
grid_search.transform(X)
# Test exception handling on scoring
grid_search.scoring = 'sklearn'
assert_raises(ValueError, grid_search.fit, X, y)
@ignore_warnings
def test_grid_search_no_score():
# Test grid-search on classifier that has no score function.
clf = LinearSVC(random_state=0)
X, y = make_blobs(random_state=0, centers=2)
Cs = [.1, 1, 10]
clf_no_score = LinearSVCNoScore(random_state=0)
grid_search = GridSearchCV(clf, {'C': Cs}, scoring='accuracy')
grid_search.fit(X, y)
grid_search_no_score = GridSearchCV(clf_no_score, {'C': Cs},
scoring='accuracy')
# smoketest grid search
grid_search_no_score.fit(X, y)
# check that best params are equal
assert_equal(grid_search_no_score.best_params_, grid_search.best_params_)
# check that we can call score and that it gives the correct result
assert_equal(grid_search.score(X, y), grid_search_no_score.score(X, y))
# giving no scoring function raises an error
grid_search_no_score = GridSearchCV(clf_no_score, {'C': Cs})
assert_raise_message(TypeError, "no scoring", grid_search_no_score.fit,
[[1]])
def test_grid_search_score_method():
X, y = make_classification(n_samples=100, n_classes=2, flip_y=.2,
random_state=0)
clf = LinearSVC(random_state=0)
grid = {'C': [.1]}
search_no_scoring = GridSearchCV(clf, grid, scoring=None).fit(X, y)
search_accuracy = GridSearchCV(clf, grid, scoring='accuracy').fit(X, y)
search_no_score_method_auc = GridSearchCV(LinearSVCNoScore(), grid,
scoring='roc_auc').fit(X, y)
search_auc = GridSearchCV(clf, grid, scoring='roc_auc').fit(X, y)
# Check warning only occurs in situation where behavior changed:
# estimator requires score method to compete with scoring parameter
score_no_scoring = assert_no_warnings(search_no_scoring.score, X, y)
score_accuracy = assert_warns(ChangedBehaviorWarning,
search_accuracy.score, X, y)
score_no_score_auc = assert_no_warnings(search_no_score_method_auc.score,
X, y)
score_auc = assert_warns(ChangedBehaviorWarning,
search_auc.score, X, y)
# ensure the test is sane
assert_true(score_auc < 1.0)
assert_true(score_accuracy < 1.0)
assert_not_equal(score_auc, score_accuracy)
assert_almost_equal(score_accuracy, score_no_scoring)
assert_almost_equal(score_auc, score_no_score_auc)
def test_grid_search_labels():
# Check if ValueError (when labels is None) propagates to GridSearchCV
# And also check if labels is correctly passed to the cv object
rng = np.random.RandomState(0)
X, y = make_classification(n_samples=15, n_classes=2, random_state=0)
labels = rng.randint(0, 3, 15)
clf = LinearSVC(random_state=0)
grid = {'C': [1]}
label_cvs = [LeaveOneLabelOut(), LeavePLabelOut(2), LabelKFold(),
LabelShuffleSplit()]
for cv in label_cvs:
gs = GridSearchCV(clf, grid, cv=cv)
assert_raise_message(ValueError,
"The labels parameter should not be None",
gs.fit, X, y)
gs.fit(X, y, labels)
non_label_cvs = [StratifiedKFold(), StratifiedShuffleSplit()]
for cv in non_label_cvs:
gs = GridSearchCV(clf, grid, cv=cv)
# Should not raise an error
gs.fit(X, y)
def test_trivial_grid_scores():
# Test search over a "grid" with only one point.
# Non-regression test: grid_scores_ wouldn't be set by GridSearchCV.
clf = MockClassifier()
grid_search = GridSearchCV(clf, {'foo_param': [1]})
grid_search.fit(X, y)
assert_true(hasattr(grid_search, "grid_scores_"))
random_search = RandomizedSearchCV(clf, {'foo_param': [0]}, n_iter=1)
random_search.fit(X, y)
assert_true(hasattr(random_search, "grid_scores_"))
def test_no_refit():
# Test that grid search can be used for model selection only
clf = MockClassifier()
grid_search = GridSearchCV(clf, {'foo_param': [1, 2, 3]}, refit=False)
grid_search.fit(X, y)
assert_true(hasattr(grid_search, "best_params_"))
def test_grid_search_error():
# Test that grid search will capture errors on data with different
# length
X_, y_ = make_classification(n_samples=200, n_features=100, random_state=0)
clf = LinearSVC()
cv = GridSearchCV(clf, {'C': [0.1, 1.0]})
assert_raises(ValueError, cv.fit, X_[:180], y_)
def test_grid_search_iid():
# test the iid parameter
# noise-free simple 2d-data
X, y = make_blobs(centers=[[0, 0], [1, 0], [0, 1], [1, 1]], random_state=0,
cluster_std=0.1, shuffle=False, n_samples=80)
# split dataset into two folds that are not iid
# first one contains data of all 4 blobs, second only from two.
mask = np.ones(X.shape[0], dtype=np.bool)
mask[np.where(y == 1)[0][::2]] = 0
mask[np.where(y == 2)[0][::2]] = 0
# this leads to perfect classification on one fold and a score of 1/3 on
# the other
svm = SVC(kernel='linear')
# create "cv" for splits
cv = [[mask, ~mask], [~mask, mask]]
# once with iid=True (default)
grid_search = GridSearchCV(svm, param_grid={'C': [1, 10]}, cv=cv)
grid_search.fit(X, y)
first = grid_search.grid_scores_[0]
assert_equal(first.parameters['C'], 1)
assert_array_almost_equal(first.cv_validation_scores, [1, 1. / 3.])
# for first split, 1/4 of dataset is in test, for second 3/4.
# take weighted average
assert_almost_equal(first.mean_validation_score,
1 * 1. / 4. + 1. / 3. * 3. / 4.)
# once with iid=False
grid_search = GridSearchCV(svm, param_grid={'C': [1, 10]}, cv=cv,
iid=False)
grid_search.fit(X, y)
first = grid_search.grid_scores_[0]
assert_equal(first.parameters['C'], 1)
# scores are the same as above
assert_array_almost_equal(first.cv_validation_scores, [1, 1. / 3.])
# averaged score is just mean of scores
assert_almost_equal(first.mean_validation_score,
np.mean(first.cv_validation_scores))
def test_grid_search_one_grid_point():
X_, y_ = make_classification(n_samples=200, n_features=100, random_state=0)
param_dict = {"C": [1.0], "kernel": ["rbf"], "gamma": [0.1]}
clf = SVC()
cv = GridSearchCV(clf, param_dict)
cv.fit(X_, y_)
clf = SVC(C=1.0, kernel="rbf", gamma=0.1)
clf.fit(X_, y_)
assert_array_equal(clf.dual_coef_, cv.best_estimator_.dual_coef_)
def test_grid_search_bad_param_grid():
param_dict = {"C": 1.0}
clf = SVC()
assert_raises(ValueError, GridSearchCV, clf, param_dict)
param_dict = {"C": []}
clf = SVC()
assert_raises(ValueError, GridSearchCV, clf, param_dict)
param_dict = {"C": np.ones(6).reshape(3, 2)}
clf = SVC()
assert_raises(ValueError, GridSearchCV, clf, param_dict)
def test_grid_search_sparse():
# Test that grid search works with both dense and sparse matrices
X_, y_ = make_classification(n_samples=200, n_features=100, random_state=0)
clf = LinearSVC()
cv = GridSearchCV(clf, {'C': [0.1, 1.0]})
cv.fit(X_[:180], y_[:180])
y_pred = cv.predict(X_[180:])
C = cv.best_estimator_.C
X_ = sp.csr_matrix(X_)
clf = LinearSVC()
cv = GridSearchCV(clf, {'C': [0.1, 1.0]})
cv.fit(X_[:180].tocoo(), y_[:180])
y_pred2 = cv.predict(X_[180:])
C2 = cv.best_estimator_.C
assert_true(np.mean(y_pred == y_pred2) >= .9)
assert_equal(C, C2)
def test_grid_search_sparse_scoring():
X_, y_ = make_classification(n_samples=200, n_features=100, random_state=0)
clf = LinearSVC()
cv = GridSearchCV(clf, {'C': [0.1, 1.0]}, scoring="f1")
cv.fit(X_[:180], y_[:180])
y_pred = cv.predict(X_[180:])
C = cv.best_estimator_.C
X_ = sp.csr_matrix(X_)
clf = LinearSVC()
cv = GridSearchCV(clf, {'C': [0.1, 1.0]}, scoring="f1")
cv.fit(X_[:180], y_[:180])
y_pred2 = cv.predict(X_[180:])
C2 = cv.best_estimator_.C
assert_array_equal(y_pred, y_pred2)
assert_equal(C, C2)
# Smoke test the score
# np.testing.assert_allclose(f1_score(cv.predict(X_[:180]), y[:180]),
# cv.score(X_[:180], y[:180]))
# test loss where greater is worse
def f1_loss(y_true_, y_pred_):
return -f1_score(y_true_, y_pred_)
F1Loss = make_scorer(f1_loss, greater_is_better=False)
cv = GridSearchCV(clf, {'C': [0.1, 1.0]}, scoring=F1Loss)
cv.fit(X_[:180], y_[:180])
y_pred3 = cv.predict(X_[180:])
C3 = cv.best_estimator_.C
assert_equal(C, C3)
assert_array_equal(y_pred, y_pred3)
def test_grid_search_precomputed_kernel():
# Test that grid search works when the input features are given in the
# form of a precomputed kernel matrix
X_, y_ = make_classification(n_samples=200, n_features=100, random_state=0)
# compute the training kernel matrix corresponding to the linear kernel
K_train = np.dot(X_[:180], X_[:180].T)
y_train = y_[:180]
clf = SVC(kernel='precomputed')
cv = GridSearchCV(clf, {'C': [0.1, 1.0]})
cv.fit(K_train, y_train)
assert_true(cv.best_score_ >= 0)
# compute the test kernel matrix
K_test = np.dot(X_[180:], X_[:180].T)
y_test = y_[180:]
y_pred = cv.predict(K_test)
assert_true(np.mean(y_pred == y_test) >= 0)
# test error is raised when the precomputed kernel is not array-like
# or sparse
assert_raises(ValueError, cv.fit, K_train.tolist(), y_train)
def test_grid_search_precomputed_kernel_error_nonsquare():
# Test that grid search returns an error with a non-square precomputed
# training kernel matrix
K_train = np.zeros((10, 20))
y_train = np.ones((10, ))
clf = SVC(kernel='precomputed')
cv = GridSearchCV(clf, {'C': [0.1, 1.0]})
assert_raises(ValueError, cv.fit, K_train, y_train)
def test_grid_search_precomputed_kernel_error_kernel_function():
# Test that grid search returns an error when using a kernel_function
X_, y_ = make_classification(n_samples=200, n_features=100, random_state=0)
kernel_function = lambda x1, x2: np.dot(x1, x2.T)
clf = SVC(kernel=kernel_function)
cv = GridSearchCV(clf, {'C': [0.1, 1.0]})
assert_raises(ValueError, cv.fit, X_, y_)
class BrokenClassifier(BaseEstimator):
"""Broken classifier that cannot be fit twice"""
def __init__(self, parameter=None):
self.parameter = parameter
def fit(self, X, y):
assert_true(not hasattr(self, 'has_been_fit_'))
self.has_been_fit_ = True
def predict(self, X):
return np.zeros(X.shape[0])
@ignore_warnings
def test_refit():
# Regression test for bug in refitting
# Simulates re-fitting a broken estimator; this used to break with
# sparse SVMs.
X = np.arange(100).reshape(10, 10)
y = np.array([0] * 5 + [1] * 5)
clf = GridSearchCV(BrokenClassifier(), [{'parameter': [0, 1]}],
scoring="precision", refit=True)
clf.fit(X, y)
def test_gridsearch_nd():
# Pass X as list in GridSearchCV
X_4d = np.arange(10 * 5 * 3 * 2).reshape(10, 5, 3, 2)
y_3d = np.arange(10 * 7 * 11).reshape(10, 7, 11)
check_X = lambda x: x.shape[1:] == (5, 3, 2)
check_y = lambda x: x.shape[1:] == (7, 11)
clf = CheckingClassifier(check_X=check_X, check_y=check_y)
grid_search = GridSearchCV(clf, {'foo_param': [1, 2, 3]})
grid_search.fit(X_4d, y_3d).score(X, y)
assert_true(hasattr(grid_search, "grid_scores_"))
def test_X_as_list():
# Pass X as list in GridSearchCV
X = np.arange(100).reshape(10, 10)
y = np.array([0] * 5 + [1] * 5)
clf = CheckingClassifier(check_X=lambda x: isinstance(x, list))
cv = KFold(n_folds=3)
grid_search = GridSearchCV(clf, {'foo_param': [1, 2, 3]}, cv=cv)
grid_search.fit(X.tolist(), y).score(X, y)
assert_true(hasattr(grid_search, "grid_scores_"))
def test_y_as_list():
# Pass y as list in GridSearchCV
X = np.arange(100).reshape(10, 10)
y = np.array([0] * 5 + [1] * 5)
clf = CheckingClassifier(check_y=lambda x: isinstance(x, list))
cv = KFold(n_folds=3)
grid_search = GridSearchCV(clf, {'foo_param': [1, 2, 3]}, cv=cv)
grid_search.fit(X, y.tolist()).score(X, y)
assert_true(hasattr(grid_search, "grid_scores_"))
@ignore_warnings
def test_pandas_input():
# check cross_val_score doesn't destroy pandas dataframe
types = [(MockDataFrame, MockDataFrame)]
try:
from pandas import Series, DataFrame
types.append((DataFrame, Series))
except ImportError:
pass
X = np.arange(100).reshape(10, 10)
y = np.array([0] * 5 + [1] * 5)
for InputFeatureType, TargetType in types:
# X dataframe, y series
X_df, y_ser = InputFeatureType(X), TargetType(y)
check_df = lambda x: isinstance(x, InputFeatureType)
check_series = lambda x: isinstance(x, TargetType)
clf = CheckingClassifier(check_X=check_df, check_y=check_series)
grid_search = GridSearchCV(clf, {'foo_param': [1, 2, 3]})
grid_search.fit(X_df, y_ser).score(X_df, y_ser)
grid_search.predict(X_df)
assert_true(hasattr(grid_search, "grid_scores_"))
def test_unsupervised_grid_search():
# test grid-search with unsupervised estimator
X, y = make_blobs(random_state=0)
km = KMeans(random_state=0)
grid_search = GridSearchCV(km, param_grid=dict(n_clusters=[2, 3, 4]),
scoring='adjusted_rand_score')
grid_search.fit(X, y)
# ARI can find the right number :)
assert_equal(grid_search.best_params_["n_clusters"], 3)
# Now without a score, and without y
grid_search = GridSearchCV(km, param_grid=dict(n_clusters=[2, 3, 4]))
grid_search.fit(X)
assert_equal(grid_search.best_params_["n_clusters"], 4)
def test_gridsearch_no_predict():
# test grid-search with an estimator without predict.
# slight duplication of a test from KDE
def custom_scoring(estimator, X):
return 42 if estimator.bandwidth == .1 else 0
X, _ = make_blobs(cluster_std=.1, random_state=1,
centers=[[0, 1], [1, 0], [0, 0]])
search = GridSearchCV(KernelDensity(),
param_grid=dict(bandwidth=[.01, .1, 1]),
scoring=custom_scoring)
search.fit(X)
assert_equal(search.best_params_['bandwidth'], .1)
assert_equal(search.best_score_, 42)
def test_param_sampler():
# test basic properties of param sampler
param_distributions = {"kernel": ["rbf", "linear"],
"C": uniform(0, 1)}
sampler = ParameterSampler(param_distributions=param_distributions,
n_iter=10, random_state=0)
samples = [x for x in sampler]
assert_equal(len(samples), 10)
for sample in samples:
assert_true(sample["kernel"] in ["rbf", "linear"])
assert_true(0 <= sample["C"] <= 1)
# test that repeated calls yield identical parameters
param_distributions = {"C": [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10]}
sampler = ParameterSampler(param_distributions=param_distributions,
n_iter=3, random_state=0)
assert_equal([x for x in sampler], [x for x in sampler])
if sp_version >= (0, 16):
param_distributions = {"C": uniform(0, 1)}
sampler = ParameterSampler(param_distributions=param_distributions,
n_iter=10, random_state=0)
assert_equal([x for x in sampler], [x for x in sampler])
def test_randomized_search_grid_scores():
# Make a dataset with a lot of noise to get various kind of prediction
# errors across CV folds and parameter settings
X, y = make_classification(n_samples=200, n_features=100, n_informative=3,
random_state=0)
# XXX: as of today (scipy 0.12) it's not possible to set the random seed
# of scipy.stats distributions: the assertions in this test should thus
# not depend on the randomization
params = dict(C=expon(scale=10),
gamma=expon(scale=0.1))
n_cv_iter = 3
n_search_iter = 30
search = RandomizedSearchCV(SVC(), n_iter=n_search_iter, cv=n_cv_iter,
param_distributions=params, iid=False)
search.fit(X, y)
assert_equal(len(search.grid_scores_), n_search_iter)
# Check consistency of the structure of each cv_score item
for cv_score in search.grid_scores_:
assert_equal(len(cv_score.cv_validation_scores), n_cv_iter)
# Because we set iid to False, the mean_validation score is the
# mean of the fold mean scores instead of the aggregate sample-wise
# mean score
assert_almost_equal(np.mean(cv_score.cv_validation_scores),
cv_score.mean_validation_score)
assert_equal(list(sorted(cv_score.parameters.keys())),
list(sorted(params.keys())))
# Check the consistency with the best_score_ and best_params_ attributes
sorted_grid_scores = list(sorted(search.grid_scores_,
key=lambda x: x.mean_validation_score))
best_score = sorted_grid_scores[-1].mean_validation_score
assert_equal(search.best_score_, best_score)
tied_best_params = [s.parameters for s in sorted_grid_scores
if s.mean_validation_score == best_score]
assert_true(search.best_params_ in tied_best_params,
"best_params_={0} is not part of the"
" tied best models: {1}".format(
search.best_params_, tied_best_params))
def test_grid_search_score_consistency():
# test that correct scores are used
clf = LinearSVC(random_state=0)
X, y = make_blobs(random_state=0, centers=2)
Cs = [.1, 1, 10]
for score in ['f1', 'roc_auc']:
grid_search = GridSearchCV(clf, {'C': Cs}, scoring=score)
grid_search.fit(X, y)
cv = StratifiedKFold(n_folds=3)
for C, scores in zip(Cs, grid_search.grid_scores_):
clf.set_params(C=C)
scores = scores[2] # get the separate runs from grid scores
i = 0
for train, test in cv.split(X, y):
clf.fit(X[train], y[train])
if score == "f1":
correct_score = f1_score(y[test], clf.predict(X[test]))
elif score == "roc_auc":
dec = clf.decision_function(X[test])
correct_score = roc_auc_score(y[test], dec)
assert_almost_equal(correct_score, scores[i])
i += 1
def test_pickle():
# Test that a fit search can be pickled
clf = MockClassifier()
grid_search = GridSearchCV(clf, {'foo_param': [1, 2, 3]}, refit=True)
grid_search.fit(X, y)
pickle.dumps(grid_search) # smoke test
random_search = RandomizedSearchCV(clf, {'foo_param': [1, 2, 3]},
refit=True, n_iter=3)
random_search.fit(X, y)
pickle.dumps(random_search) # smoke test
def test_grid_search_with_multioutput_data():
# Test search with multi-output estimator
X, y = make_multilabel_classification(return_indicator=True,
random_state=0)
est_parameters = {"max_depth": [1, 2, 3, 4]}
cv = KFold(random_state=0)
estimators = [DecisionTreeRegressor(random_state=0),
DecisionTreeClassifier(random_state=0)]
# Test with grid search cv
for est in estimators:
grid_search = GridSearchCV(est, est_parameters, cv=cv)
grid_search.fit(X, y)
for parameters, _, cv_validation_scores in grid_search.grid_scores_:
est.set_params(**parameters)
for i, (train, test) in enumerate(cv.split(X, y)):
est.fit(X[train], y[train])
correct_score = est.score(X[test], y[test])
assert_almost_equal(correct_score,
cv_validation_scores[i])
# Test with a randomized search
for est in estimators:
random_search = RandomizedSearchCV(est, est_parameters,
cv=cv, n_iter=3)
random_search.fit(X, y)
for parameters, _, cv_validation_scores in random_search.grid_scores_:
est.set_params(**parameters)
for i, (train, test) in enumerate(cv.split(X, y)):
est.fit(X[train], y[train])
correct_score = est.score(X[test], y[test])
assert_almost_equal(correct_score,
cv_validation_scores[i])
def test_predict_proba_disabled():
# Test predict_proba when disabled on estimator.
X = np.arange(20).reshape(5, -1)
y = [0, 0, 1, 1, 1]
clf = SVC(probability=False)
gs = GridSearchCV(clf, {}, cv=2).fit(X, y)
assert_false(hasattr(gs, "predict_proba"))
def test_grid_search_allows_nans():
# Test GridSearchCV with Imputer
X = np.arange(20, dtype=np.float64).reshape(5, -1)
X[2, :] = np.nan
y = [0, 0, 1, 1, 1]
p = Pipeline([
('imputer', Imputer(strategy='mean', missing_values='NaN')),
('classifier', MockClassifier()),
])
GridSearchCV(p, {'classifier__foo_param': [1, 2, 3]}, cv=2).fit(X, y)
class FailingClassifier(BaseEstimator):
"""Classifier that raises a ValueError on fit()"""
FAILING_PARAMETER = 2
def __init__(self, parameter=None):
self.parameter = parameter
def fit(self, X, y=None):
if self.parameter == FailingClassifier.FAILING_PARAMETER:
raise ValueError("Failing classifier failed as required")
def predict(self, X):
return np.zeros(X.shape[0])
def test_grid_search_failing_classifier():
# GridSearchCV with on_error != 'raise'
# Ensures that a warning is raised and score reset where appropriate.
X, y = make_classification(n_samples=20, n_features=10, random_state=0)
clf = FailingClassifier()
# refit=False because we only want to check that errors caused by fits
# to individual folds will be caught and warnings raised instead. If
# refit was done, then an exception would be raised on refit and not
# caught by grid_search (expected behavior), and this would cause an
# error in this test.
gs = GridSearchCV(clf, [{'parameter': [0, 1, 2]}], scoring='accuracy',
refit=False, error_score=0.0)
assert_warns(FitFailedWarning, gs.fit, X, y)
# Ensure that grid scores were set to zero as required for those fits
# that are expected to fail.
assert all(np.all(this_point.cv_validation_scores == 0.0)
for this_point in gs.grid_scores_
if this_point.parameters['parameter'] ==
FailingClassifier.FAILING_PARAMETER)
gs = GridSearchCV(clf, [{'parameter': [0, 1, 2]}], scoring='accuracy',
refit=False, error_score=float('nan'))
assert_warns(FitFailedWarning, gs.fit, X, y)
assert all(np.all(np.isnan(this_point.cv_validation_scores))
for this_point in gs.grid_scores_
if this_point.parameters['parameter'] ==
FailingClassifier.FAILING_PARAMETER)
def test_grid_search_failing_classifier_raise():
# GridSearchCV with on_error == 'raise' raises the error
X, y = make_classification(n_samples=20, n_features=10, random_state=0)
clf = FailingClassifier()
# refit=False because we want to test the behaviour of the grid search part
gs = GridSearchCV(clf, [{'parameter': [0, 1, 2]}], scoring='accuracy',
refit=False, error_score='raise')
# FailingClassifier issues a ValueError so this is what we look for.
assert_raises(ValueError, gs.fit, X, y)
def test_parameters_sampler_replacement():
# raise error if n_iter too large
params = {'first': [0, 1], 'second': ['a', 'b', 'c']}
sampler = ParameterSampler(params, n_iter=7)
assert_raises(ValueError, list, sampler)
# degenerates to GridSearchCV if n_iter the same as grid_size
sampler = ParameterSampler(params, n_iter=6)
samples = list(sampler)
assert_equal(len(samples), 6)
for values in ParameterGrid(params):
assert_true(values in samples)
# test sampling without replacement in a large grid
params = {'a': range(10), 'b': range(10), 'c': range(10)}
sampler = ParameterSampler(params, n_iter=99, random_state=42)
samples = list(sampler)
assert_equal(len(samples), 99)
hashable_samples = ["a%db%dc%d" % (p['a'], p['b'], p['c'])
for p in samples]
assert_equal(len(set(hashable_samples)), 99)
# doesn't go into infinite loops
params_distribution = {'first': bernoulli(.5), 'second': ['a', 'b', 'c']}
sampler = ParameterSampler(params_distribution, n_iter=7)
samples = list(sampler)
assert_equal(len(samples), 7)
| unlicense |
huzq/scikit-learn | sklearn/ensemble/_voting.py | 3 | 17256 | """
Soft Voting/Majority Rule classifier and Voting regressor.
This module contains:
- A Soft Voting/Majority Rule classifier for classification estimators.
- A Voting regressor for regression estimators.
"""
# Authors: Sebastian Raschka <[email protected]>,
# Gilles Louppe <[email protected]>,
# Ramil Nugmanov <[email protected]>
# Mohamed Ali Jamaoui <[email protected]>
#
# License: BSD 3 clause
from abc import abstractmethod
import numpy as np
from joblib import Parallel, delayed
from ..base import ClassifierMixin
from ..base import RegressorMixin
from ..base import TransformerMixin
from ..base import clone
from ._base import _fit_single_estimator
from ._base import _BaseHeterogeneousEnsemble
from ..preprocessing import LabelEncoder
from ..utils import Bunch
from ..utils.validation import check_is_fitted
from ..utils.multiclass import check_classification_targets
from ..utils.validation import column_or_1d
from ..utils.validation import _deprecate_positional_args
from ..exceptions import NotFittedError
from ..utils._estimator_html_repr import _VisualBlock
class _BaseVoting(TransformerMixin, _BaseHeterogeneousEnsemble):
"""Base class for voting.
Warning: This class should not be used directly. Use derived classes
instead.
"""
def _log_message(self, name, idx, total):
if not self.verbose:
return None
return '(%d of %d) Processing %s' % (idx, total, name)
@property
def _weights_not_none(self):
"""Get the weights of not `None` estimators."""
if self.weights is None:
return None
return [w for est, w in zip(self.estimators, self.weights)
if est[1] != 'drop']
def _predict(self, X):
"""Collect results from clf.predict calls."""
return np.asarray([est.predict(X) for est in self.estimators_]).T
@abstractmethod
def fit(self, X, y, sample_weight=None):
"""Get common fit operations."""
names, clfs = self._validate_estimators()
if (self.weights is not None and
len(self.weights) != len(self.estimators)):
raise ValueError('Number of `estimators` and weights must be equal'
'; got %d weights, %d estimators'
% (len(self.weights), len(self.estimators)))
self.estimators_ = Parallel(n_jobs=self.n_jobs)(
delayed(_fit_single_estimator)(
clone(clf), X, y,
sample_weight=sample_weight,
message_clsname='Voting',
message=self._log_message(names[idx],
idx + 1, len(clfs))
)
for idx, clf in enumerate(clfs) if clf != 'drop'
)
self.named_estimators_ = Bunch()
# Uses 'drop' as placeholder for dropped estimators
est_iter = iter(self.estimators_)
for name, est in self.estimators:
current_est = est if est == 'drop' else next(est_iter)
self.named_estimators_[name] = current_est
return self
@property
def n_features_in_(self):
# For consistency with other estimators we raise a AttributeError so
# that hasattr() fails if the estimator isn't fitted.
try:
check_is_fitted(self)
except NotFittedError as nfe:
raise AttributeError(
"{} object has no n_features_in_ attribute."
.format(self.__class__.__name__)
) from nfe
return self.estimators_[0].n_features_in_
def _sk_visual_block_(self):
names, estimators = zip(*self.estimators)
return _VisualBlock('parallel', estimators, names=names)
class VotingClassifier(ClassifierMixin, _BaseVoting):
"""Soft Voting/Majority Rule classifier for unfitted estimators.
.. versionadded:: 0.17
Read more in the :ref:`User Guide <voting_classifier>`.
Parameters
----------
estimators : list of (str, estimator) tuples
Invoking the ``fit`` method on the ``VotingClassifier`` will fit clones
of those original estimators that will be stored in the class attribute
``self.estimators_``. An estimator can be set to ``'drop'``
using ``set_params``.
.. versionchanged:: 0.21
``'drop'`` is accepted. Using None was deprecated in 0.22 and
support was removed in 0.24.
voting : {'hard', 'soft'}, default='hard'
If 'hard', uses predicted class labels for majority rule voting.
Else if 'soft', predicts the class label based on the argmax of
the sums of the predicted probabilities, which is recommended for
an ensemble of well-calibrated classifiers.
weights : array-like of shape (n_classifiers,), default=None
Sequence of weights (`float` or `int`) to weight the occurrences of
predicted class labels (`hard` voting) or class probabilities
before averaging (`soft` voting). Uses uniform weights if `None`.
n_jobs : int, default=None
The number of jobs to run in parallel for ``fit``.
``None`` means 1 unless in a :obj:`joblib.parallel_backend` context.
``-1`` means using all processors. See :term:`Glossary <n_jobs>`
for more details.
.. versionadded:: 0.18
flatten_transform : bool, default=True
Affects shape of transform output only when voting='soft'
If voting='soft' and flatten_transform=True, transform method returns
matrix with shape (n_samples, n_classifiers * n_classes). If
flatten_transform=False, it returns
(n_classifiers, n_samples, n_classes).
verbose : bool, default=False
If True, the time elapsed while fitting will be printed as it
is completed.
Attributes
----------
estimators_ : list of classifiers
The collection of fitted sub-estimators as defined in ``estimators``
that are not 'drop'.
named_estimators_ : :class:`~sklearn.utils.Bunch`
Attribute to access any fitted sub-estimators by name.
.. versionadded:: 0.20
classes_ : array-like of shape (n_predictions,)
The classes labels.
See Also
--------
VotingRegressor: Prediction voting regressor.
Examples
--------
>>> import numpy as np
>>> from sklearn.linear_model import LogisticRegression
>>> from sklearn.naive_bayes import GaussianNB
>>> from sklearn.ensemble import RandomForestClassifier, VotingClassifier
>>> clf1 = LogisticRegression(multi_class='multinomial', random_state=1)
>>> clf2 = RandomForestClassifier(n_estimators=50, random_state=1)
>>> clf3 = GaussianNB()
>>> X = np.array([[-1, -1], [-2, -1], [-3, -2], [1, 1], [2, 1], [3, 2]])
>>> y = np.array([1, 1, 1, 2, 2, 2])
>>> eclf1 = VotingClassifier(estimators=[
... ('lr', clf1), ('rf', clf2), ('gnb', clf3)], voting='hard')
>>> eclf1 = eclf1.fit(X, y)
>>> print(eclf1.predict(X))
[1 1 1 2 2 2]
>>> np.array_equal(eclf1.named_estimators_.lr.predict(X),
... eclf1.named_estimators_['lr'].predict(X))
True
>>> eclf2 = VotingClassifier(estimators=[
... ('lr', clf1), ('rf', clf2), ('gnb', clf3)],
... voting='soft')
>>> eclf2 = eclf2.fit(X, y)
>>> print(eclf2.predict(X))
[1 1 1 2 2 2]
>>> eclf3 = VotingClassifier(estimators=[
... ('lr', clf1), ('rf', clf2), ('gnb', clf3)],
... voting='soft', weights=[2,1,1],
... flatten_transform=True)
>>> eclf3 = eclf3.fit(X, y)
>>> print(eclf3.predict(X))
[1 1 1 2 2 2]
>>> print(eclf3.transform(X).shape)
(6, 6)
"""
@_deprecate_positional_args
def __init__(self, estimators, *, voting='hard', weights=None,
n_jobs=None, flatten_transform=True, verbose=False):
super().__init__(estimators=estimators)
self.voting = voting
self.weights = weights
self.n_jobs = n_jobs
self.flatten_transform = flatten_transform
self.verbose = verbose
def fit(self, X, y, sample_weight=None):
"""Fit the estimators.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
Training vectors, where n_samples is the number of samples and
n_features is the number of features.
y : array-like of shape (n_samples,)
Target values.
sample_weight : array-like of shape (n_samples,), default=None
Sample weights. If None, then samples are equally weighted.
Note that this is supported only if all underlying estimators
support sample weights.
.. versionadded:: 0.18
Returns
-------
self : object
"""
check_classification_targets(y)
if isinstance(y, np.ndarray) and len(y.shape) > 1 and y.shape[1] > 1:
raise NotImplementedError('Multilabel and multi-output'
' classification is not supported.')
if self.voting not in ('soft', 'hard'):
raise ValueError("Voting must be 'soft' or 'hard'; got (voting=%r)"
% self.voting)
self.le_ = LabelEncoder().fit(y)
self.classes_ = self.le_.classes_
transformed_y = self.le_.transform(y)
return super().fit(X, transformed_y, sample_weight)
def predict(self, X):
"""Predict class labels for X.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
The input samples.
Returns
-------
maj : array-like of shape (n_samples,)
Predicted class labels.
"""
check_is_fitted(self)
if self.voting == 'soft':
maj = np.argmax(self.predict_proba(X), axis=1)
else: # 'hard' voting
predictions = self._predict(X)
maj = np.apply_along_axis(
lambda x: np.argmax(
np.bincount(x, weights=self._weights_not_none)),
axis=1, arr=predictions)
maj = self.le_.inverse_transform(maj)
return maj
def _collect_probas(self, X):
"""Collect results from clf.predict calls."""
return np.asarray([clf.predict_proba(X) for clf in self.estimators_])
def _predict_proba(self, X):
"""Predict class probabilities for X in 'soft' voting."""
check_is_fitted(self)
avg = np.average(self._collect_probas(X), axis=0,
weights=self._weights_not_none)
return avg
@property
def predict_proba(self):
"""Compute probabilities of possible outcomes for samples in X.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
The input samples.
Returns
-------
avg : array-like of shape (n_samples, n_classes)
Weighted average probability for each class per sample.
"""
if self.voting == 'hard':
raise AttributeError("predict_proba is not available when"
" voting=%r" % self.voting)
return self._predict_proba
def transform(self, X):
"""Return class labels or probabilities for X for each estimator.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
Training vectors, where n_samples is the number of samples and
n_features is the number of features.
Returns
-------
probabilities_or_labels
If `voting='soft'` and `flatten_transform=True`:
returns ndarray of shape (n_classifiers, n_samples *
n_classes), being class probabilities calculated by each
classifier.
If `voting='soft' and `flatten_transform=False`:
ndarray of shape (n_classifiers, n_samples, n_classes)
If `voting='hard'`:
ndarray of shape (n_samples, n_classifiers), being
class labels predicted by each classifier.
"""
check_is_fitted(self)
if self.voting == 'soft':
probas = self._collect_probas(X)
if not self.flatten_transform:
return probas
return np.hstack(probas)
else:
return self._predict(X)
class VotingRegressor(RegressorMixin, _BaseVoting):
"""Prediction voting regressor for unfitted estimators.
.. versionadded:: 0.21
A voting regressor is an ensemble meta-estimator that fits several base
regressors, each on the whole dataset. Then it averages the individual
predictions to form a final prediction.
Read more in the :ref:`User Guide <voting_regressor>`.
Parameters
----------
estimators : list of (str, estimator) tuples
Invoking the ``fit`` method on the ``VotingRegressor`` will fit clones
of those original estimators that will be stored in the class attribute
``self.estimators_``. An estimator can be set to ``'drop'`` using
``set_params``.
.. versionchanged:: 0.21
``'drop'`` is accepted. Using None was deprecated in 0.22 and
support was removed in 0.24.
weights : array-like of shape (n_regressors,), default=None
Sequence of weights (`float` or `int`) to weight the occurrences of
predicted values before averaging. Uses uniform weights if `None`.
n_jobs : int, default=None
The number of jobs to run in parallel for ``fit``.
``None`` means 1 unless in a :obj:`joblib.parallel_backend` context.
``-1`` means using all processors. See :term:`Glossary <n_jobs>`
for more details.
verbose : bool, default=False
If True, the time elapsed while fitting will be printed as it
is completed.
Attributes
----------
estimators_ : list of regressors
The collection of fitted sub-estimators as defined in ``estimators``
that are not 'drop'.
named_estimators_ : Bunch
Attribute to access any fitted sub-estimators by name.
.. versionadded:: 0.20
See Also
--------
VotingClassifier: Soft Voting/Majority Rule classifier.
Examples
--------
>>> import numpy as np
>>> from sklearn.linear_model import LinearRegression
>>> from sklearn.ensemble import RandomForestRegressor
>>> from sklearn.ensemble import VotingRegressor
>>> r1 = LinearRegression()
>>> r2 = RandomForestRegressor(n_estimators=10, random_state=1)
>>> X = np.array([[1, 1], [2, 4], [3, 9], [4, 16], [5, 25], [6, 36]])
>>> y = np.array([2, 6, 12, 20, 30, 42])
>>> er = VotingRegressor([('lr', r1), ('rf', r2)])
>>> print(er.fit(X, y).predict(X))
[ 3.3 5.7 11.8 19.7 28. 40.3]
"""
@_deprecate_positional_args
def __init__(self, estimators, *, weights=None, n_jobs=None,
verbose=False):
super().__init__(estimators=estimators)
self.weights = weights
self.n_jobs = n_jobs
self.verbose = verbose
def fit(self, X, y, sample_weight=None):
"""Fit the estimators.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
Training vectors, where n_samples is the number of samples and
n_features is the number of features.
y : array-like of shape (n_samples,)
Target values.
sample_weight : array-like of shape (n_samples,), default=None
Sample weights. If None, then samples are equally weighted.
Note that this is supported only if all underlying estimators
support sample weights.
Returns
-------
self : object
Fitted estimator.
"""
y = column_or_1d(y, warn=True)
return super().fit(X, y, sample_weight)
def predict(self, X):
"""Predict regression target for X.
The predicted regression target of an input sample is computed as the
mean predicted regression targets of the estimators in the ensemble.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
The input samples.
Returns
-------
y : ndarray of shape (n_samples,)
The predicted values.
"""
check_is_fitted(self)
return np.average(self._predict(X), axis=1,
weights=self._weights_not_none)
def transform(self, X):
"""Return predictions for X for each estimator.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
The input samples.
Returns
-------
predictions: ndarray of shape (n_samples, n_classifiers)
Values predicted by each regressor.
"""
check_is_fitted(self)
return self._predict(X)
| bsd-3-clause |
kiyoto/statsmodels | statsmodels/duration/survfunc.py | 2 | 15704 | import numpy as np
import pandas as pd
from scipy.stats.distributions import chi2, norm
from statsmodels.graphics import utils
def _calc_survfunc_right(time, status):
"""
Calculate the survival function and its standard error for a single
group.
"""
time = np.asarray(time)
status = np.asarray(status)
# Convert the unique times to ranks (0, 1, 2, ...)
time, rtime = np.unique(time, return_inverse=True)
# Number of deaths at each unique time.
d = np.bincount(rtime, weights=status)
# Size of risk set just prior to each event time.
n = np.bincount(rtime)
n = np.cumsum(n[::-1])[::-1]
# Only retain times where an event occured.
ii = np.flatnonzero(d > 0)
d = d[ii]
n = n[ii]
time = time[ii]
# The survival function probabilities.
sp = 1 - d / n.astype(np.float64)
sp = np.log(sp)
sp = np.cumsum(sp)
sp = np.exp(sp)
# Standard errors (Greenwood's formula).
se = d / (n * (n - d)).astype(np.float64)
se = np.cumsum(se)
se = np.sqrt(se)
se *= sp
return sp, se, time, n, d
class SurvfuncRight(object):
"""
Estimation and inference for a survival function.
Only right censoring is supported.
Parameters
----------
time : array-like
An array of times (censoring times or event times)
status : array-like
Status at the event time, status==1 is the 'event'
(e.g. death, failure), meaning that the event
occurs at the given value in `time`; status==0
indicates that censoring has occured, meaning that
the event occurs after the given value in `time`.
title : string
Optional title used for plots and summary output.
Attributes
----------
surv_prob : array-like
The estimated value of the survivor function at each time
point in `surv_times`.
surv_prob_se : array-like
The standard errors for the values in `surv_prob`.
surv_times : array-like
The points where the survival function changes.
n_risk : array-like
The number of subjects at risk just before each time value in
`surv_times`.
n_events : array-like
The number of events (e.g. deaths) that occur at each point
in `surv_times`.
"""
def __init__(self, time, status, title=None):
self.time = np.asarray(time)
self.status = np.asarray(status)
m = len(status)
x = _calc_survfunc_right(time, status)
self.surv_prob = x[0]
self.surv_prob_se = x[1]
self.surv_times = x[2]
self.n_risk = x[3]
self.n_events = x[4]
self.title = "" if not title else title
def plot(self, ax=None):
"""
Plot the survival function.
Examples
--------
Change the line color:
>>> fig = sf.plot()
>>> ax = fig.get_axes()[0]
>>> li = ax.get_lines()
>>> li[0].set_color('purple')
>>> li[1].set_color('purple')
Don't show the censoring points:
>>> fig = sf.plot()
>>> ax = fig.get_axes()[0]
>>> li = ax.get_lines()
>>> li[1].set_visible(False)
"""
return plot_survfunc(self, ax)
def quantile(self, p):
"""
Estimated quantile of a survival distribution.
Parameters
----------
p : float
The probability point at which the quantile
is determined.
Returns the estimated quantile.
"""
# SAS uses a strict inequality here.
ii = np.flatnonzero(self.surv_prob < 1 - p)
if len(ii) == 0:
return np.nan
return self.surv_times[ii[0]]
def quantile_ci(self, p, alpha=0.05, method='cloglog'):
"""
Returns a confidence interval for a survival quantile.
Parameters
----------
p : float
The probability point for which a confidence interval is
determined.
alpha : float
The confidence interval has nominal coverage probability
1 - `alpha`.
method : string
Function to use for g-transformation, must be ...
Returns
-------
lb : float
The lower confidence limit.
ub : float
The upper confidence limit.
Notes
-----
The confidence interval is obtained by inverting Z-tests. The
limits of the confidence interval will always be observed
event times.
References
----------
The method is based on the approach used in SAS, documented here:
http://support.sas.com/documentation/cdl/en/statug/68162/HTML/default/viewer.htm#statug_lifetest_details03.htm
"""
tr = norm.ppf(1 - alpha / 2)
method = method.lower()
if method == "cloglog":
g = lambda x : np.log(-np.log(x))
gprime = lambda x : -1 / (x * np.log(x))
elif method == "linear":
g = lambda x : x
gprime = lambda x : 1
elif method == "log":
g = lambda x : np.log(x)
gprime = lambda x : 1 / x
elif method == "logit":
g = lambda x : np.log(x / (1 - x))
gprime = lambda x : 1 / (x * (1 - x))
elif method == "asinsqrt":
g = lambda x : np.arcsin(np.sqrt(x))
gprime = lambda x : 1 / (2 * np.sqrt(x) * np.sqrt(1 - x))
else:
raise ValueError("unknown method")
r = g(self.surv_prob) - g(1 - p)
r /= (gprime(self.surv_prob) * self.surv_prob_se)
ii = np.flatnonzero(np.abs(r) <= tr)
if len(ii) == 0:
return np.nan, np.nan
lb = self.surv_times[ii[0]]
if ii[-1] == len(self.surv_times) - 1:
ub = np.inf
else:
ub = self.surv_times[ii[-1] + 1]
return lb, ub
def summary(self):
"""
Return a summary of the estimated survival function.
The summary is a datafram containing the unique event times,
estimated survival function values, and related quantities.
"""
df = pd.DataFrame(index=self.surv_times)
df.index.name = "Time"
df["Surv prob"] = self.surv_prob
df["Surv prob SE"] = self.surv_prob_se
df["num at risk"] = self.n_risk
df["num events"] = self.n_events
return df
def simultaneous_cb(self, alpha=0.05, method="hw", transform="log"):
"""
Returns a simultaneous confidence band for the survival function.
Arguments
---------
alpha : float
`1 - alpha` is the desired simultaneous coverage
probability for the confidence region. Currently alpha
must be set to 0.05, giving 95% simultaneous intervals.
method : string
The method used to produce the simultaneous confidence
band. Only the Hall-Wellner (hw) method is currently
implemented.
transform : string
The used to produce the interval (note that the returned
interval is on the survival probability scale regardless
of which transform is used). Only `log` and `arcsin` are
implemented.
Returns
-------
lcb : array-like
The lower confidence limits corresponding to the points
in `surv_times`.
ucb : array-like
The upper confidence limits corresponding to the points
in `surv_times`.
"""
method = method.lower()
if method != "hw":
raise ValueError("only the Hall-Wellner (hw) method is implemented")
if alpha != 0.05:
raise ValueError("alpha must be set to 0.05")
transform = transform.lower()
s2 = self.surv_prob_se**2 / self.surv_prob**2
nn = self.n_risk
if transform == "log":
denom = np.sqrt(nn) * np.log(self.surv_prob)
theta = 1.3581 * (1 + nn * s2) / denom
theta = np.exp(theta)
lcb = self.surv_prob**(1/theta)
ucb = self.surv_prob**theta
elif transform == "arcsin":
k = 1.3581
k *= (1 + nn * s2) / (2 * np.sqrt(nn))
k *= np.sqrt(self.surv_prob / (1 - self.surv_prob))
f = np.arcsin(np.sqrt(self.surv_prob))
v = np.clip(f - k, 0, np.inf)
lcb = np.sin(v)**2
v = np.clip(f + k, -np.inf, np.pi/2)
ucb = np.sin(v)**2
else:
raise ValueError("Unknown transform")
return lcb, ucb
def survdiff(time, status, group, weight_type=None, strata=None, **kwargs):
"""
Test for the equality of two survival distributions.
Parameters:
-----------
time : array-like
The event or censoring times.
status : array-like
The censoring status variable, status=1 indicates that the
event occured, status=0 indicates that the observation was
censored.
group : array-like
Indicators of the two groups
weight_type : string
The following weight types are implemented:
None (default) : logrank test
fh : Fleming-Harrington, weights by S^(fh_p),
requires exponent fh_p to be provided as keyword
argument; the weights are derived from S defined at
the previous event time, and the first weight is
always 1.
gb : Gehan-Breslow, weights by the number at risk
tw : Tarone-Ware, weights by the square root of the number
at risk
strata : array-like
Optional stratum indicators for a stratified test
Returns:
--------
chisq : The chi-square (1 degree of freedom) distributed test
statistic value
pvalue : The p-value for the chi^2 test
"""
# TODO: extend to handle more than two groups
time = np.asarray(time)
status = np.asarray(status)
group = np.asarray(group)
gr = np.unique(group)
if len(gr) != 2:
raise ValueError("logrank only supports two groups")
if strata is None:
obs, var = _survdiff(time, status, group, weight_type, gr,
**kwargs)
else:
strata = np.asarray(strata)
stu = np.unique(strata)
obs, var = 0., 0.
for st in stu:
# could be more efficient?
ii = (strata == st)
obs1, var1 = _survdiff(time[ii], status[ii], group[ii],
weight_type, gr, **kwargs)
obs += obs1
var += var1
zstat = obs / np.sqrt(var)
# The chi^2 test statistic and p-value.
chisq = zstat**2
pvalue = 1 - chi2.cdf(chisq, 1)
return chisq, pvalue
def _survdiff(time, status, group, weight_type, gr, **kwargs):
# logrank test for one stratum
ii = (group == gr[0])
time1 = time[ii]
status1 = status[ii]
ii = (group == gr[1])
time2 = time[ii]
status2 = status[ii]
# Get the unique times.
utimes = np.unique(time)
status1 = status1.astype(np.bool)
status2 = status2.astype(np.bool)
# The positions of the observed event times in each group, in the
# overall list of unique times.
ix1 = np.searchsorted(utimes, time1[status1])
ix2 = np.searchsorted(utimes, time2[status2])
# Number of events observed at each time point, per group and
# overall.
obs1 = np.bincount(ix1, minlength=len(utimes))
obs2 = np.bincount(ix2, minlength=len(utimes))
obs = obs1 + obs2
# Risk set size at each time point, per group and overall.
nvec = []
for time0 in time1, time2:
ix = np.searchsorted(utimes, time0)
n = np.bincount(ix, minlength=len(utimes))
n = np.cumsum(n)
n = np.roll(n, 1)
n[0] = 0
n = len(time0) - n
nvec.append(n)
n1, n2 = tuple(nvec)
n = n1 + n2
# The variance of event counts in the first group.
r = n1 / n.astype(np.float64)
var = obs * r * (1 - r) * (n - obs) / (n - 1)
# The expected number of events in the first group.
exp1 = obs * r
weights = None
if weight_type is not None:
weight_type = weight_type.lower()
if weight_type == "gb":
weights = n
elif weight_type == "tw":
weights = np.sqrt(n)
elif weight_type == "fh":
if "fh_p" not in kwargs:
raise ValueError("weight_type type 'fh' requires specification of fh_p")
fh_p = kwargs["fh_p"]
# Calculate the survivor function directly to avoid the
# overhead of creating a SurvfuncRight object
sp = 1 - obs / n.astype(np.float64)
sp = np.log(sp)
sp = np.cumsum(sp)
sp = np.exp(sp)
weights = sp**fh_p
weights = np.roll(weights, 1)
weights[0] = 1
else:
raise ValueError("weight_type not implemented")
# The Z-scale test statistic (compare to normal reference
# distribution).
ix = np.flatnonzero(n > 1)
if weights is None:
obs = np.sum(obs1[ix] - exp1[ix])
var = np.sum(var[ix])
else:
obs = np.dot(weights[ix], obs1[ix] - exp1[ix])
var = np.dot(weights[ix]**2, var[ix])
return obs, var
def plot_survfunc(survfuncs, ax=None):
"""
Plot one or more survivor functions.
Arguments
---------
survfuncs : object or array-like
A single SurvfuncRight object, or a list or SurvfuncRight
objects that are plotted together.
Returns
-------
A figure instance on which the plot was drawn.
Examples
--------
Add a legend:
>>> fig = plot_survfunc([sf0, sf1])
>>> ax = fig.get_axes()[0]
>>> ax.set_position([0.1, 0.1, 0.64, 0.8])
>>> ha, lb = ax.get_legend_handles_labels()
>>> leg = fig.legend((ha[0], ha[2]), (lb[0], lb[2]), 'center right')
Change the line colors:
>>> fig = plot_survfunc([sf0, sf1])
>>> ax = fig.get_axes()[0]
>>> ax.set_position([0.1, 0.1, 0.64, 0.8])
>>> ha, lb = ax.get_legend_handles_labels()
>>> ha[0].set_color('purple')
>>> ha[1].set_color('purple')
>>> ha[2].set_color('orange')
>>> ha[3].set_color('orange')
"""
fig, ax = utils.create_mpl_ax(ax)
# If we have only a single survival function to plot, put it into
# a list.
try:
assert(type(survfuncs[0]) is SurvfuncRight)
except:
survfuncs = [survfuncs]
for gx, sf in enumerate(survfuncs):
# The estimated survival function does not include a point at
# time 0, include it here for plotting.
surv_times = np.concatenate(([0], sf.surv_times))
surv_prob = np.concatenate(([1], sf.surv_prob))
# If the final times are censoring times they are not included
# in the survival function so we add them here
mxt = max(sf.time)
if mxt > surv_times[-1]:
surv_times = np.concatenate((surv_times, [mxt]))
surv_prob = np.concatenate((surv_prob, [surv_prob[-1]]))
label = getattr(sf, "title", "Group %d" % (gx + 1))
li, = ax.step(surv_times, surv_prob, '-', label=label, lw=2, where='post')
# Plot the censored points.
ii = np.flatnonzero(np.logical_not(sf.status))
ti = sf.time[ii]
jj = np.searchsorted(surv_times, ti) - 1
sp = surv_prob[jj]
ax.plot(ti, sp, '+', ms=12, color=li.get_color(),
label=label + " points")
ax.set_ylim(0, 1.01)
return fig
| bsd-3-clause |
samzhang111/scikit-learn | sklearn/linear_model/stochastic_gradient.py | 31 | 50760 | # Authors: Peter Prettenhofer <[email protected]> (main author)
# Mathieu Blondel (partial_fit support)
#
# License: BSD 3 clause
"""Classification and regression using Stochastic Gradient Descent (SGD)."""
import numpy as np
from abc import ABCMeta, abstractmethod
from ..externals.joblib import Parallel, delayed
from .base import LinearClassifierMixin, SparseCoefMixin
from .base import make_dataset
from ..base import BaseEstimator, RegressorMixin
from ..feature_selection.from_model import _LearntSelectorMixin
from ..utils import (check_array, check_random_state, check_X_y,
deprecated)
from ..utils.extmath import safe_sparse_dot
from ..utils.multiclass import _check_partial_fit_first_call
from ..utils.validation import check_is_fitted
from ..externals import six
from .sgd_fast import plain_sgd, average_sgd
from ..utils.fixes import astype
from ..utils import compute_class_weight
from .sgd_fast import Hinge
from .sgd_fast import SquaredHinge
from .sgd_fast import Log
from .sgd_fast import ModifiedHuber
from .sgd_fast import SquaredLoss
from .sgd_fast import Huber
from .sgd_fast import EpsilonInsensitive
from .sgd_fast import SquaredEpsilonInsensitive
LEARNING_RATE_TYPES = {"constant": 1, "optimal": 2, "invscaling": 3,
"pa1": 4, "pa2": 5}
PENALTY_TYPES = {"none": 0, "l2": 2, "l1": 1, "elasticnet": 3}
DEFAULT_EPSILON = 0.1
# Default value of ``epsilon`` parameter.
class BaseSGD(six.with_metaclass(ABCMeta, BaseEstimator, SparseCoefMixin)):
"""Base class for SGD classification and regression."""
def __init__(self, loss, penalty='l2', alpha=0.0001, C=1.0,
l1_ratio=0.15, fit_intercept=True, n_iter=5, shuffle=True,
verbose=0, epsilon=0.1, random_state=None,
learning_rate="optimal", eta0=0.0, power_t=0.5,
warm_start=False, average=False):
self.loss = loss
self.penalty = penalty
self.learning_rate = learning_rate
self.epsilon = epsilon
self.alpha = alpha
self.C = C
self.l1_ratio = l1_ratio
self.fit_intercept = fit_intercept
self.n_iter = n_iter
self.shuffle = shuffle
self.random_state = random_state
self.verbose = verbose
self.eta0 = eta0
self.power_t = power_t
self.warm_start = warm_start
self.average = average
self._validate_params()
self.coef_ = None
if self.average > 0:
self.standard_coef_ = None
self.average_coef_ = None
# iteration count for learning rate schedule
# must not be int (e.g. if ``learning_rate=='optimal'``)
self.t_ = None
def set_params(self, *args, **kwargs):
super(BaseSGD, self).set_params(*args, **kwargs)
self._validate_params()
return self
@abstractmethod
def fit(self, X, y):
"""Fit model."""
def _validate_params(self):
"""Validate input params. """
if not isinstance(self.shuffle, bool):
raise ValueError("shuffle must be either True or False")
if self.n_iter <= 0:
raise ValueError("n_iter must be > zero")
if not (0.0 <= self.l1_ratio <= 1.0):
raise ValueError("l1_ratio must be in [0, 1]")
if self.alpha < 0.0:
raise ValueError("alpha must be >= 0")
if self.learning_rate in ("constant", "invscaling"):
if self.eta0 <= 0.0:
raise ValueError("eta0 must be > 0")
if self.learning_rate == "optimal" and self.alpha == 0:
raise ValueError("alpha must be > 0 since "
"learning_rate is 'optimal'. alpha is used "
"to compute the optimal learning rate.")
# raises ValueError if not registered
self._get_penalty_type(self.penalty)
self._get_learning_rate_type(self.learning_rate)
if self.loss not in self.loss_functions:
raise ValueError("The loss %s is not supported. " % self.loss)
def _get_loss_function(self, loss):
"""Get concrete ``LossFunction`` object for str ``loss``. """
try:
loss_ = self.loss_functions[loss]
loss_class, args = loss_[0], loss_[1:]
if loss in ('huber', 'epsilon_insensitive',
'squared_epsilon_insensitive'):
args = (self.epsilon, )
return loss_class(*args)
except KeyError:
raise ValueError("The loss %s is not supported. " % loss)
def _get_learning_rate_type(self, learning_rate):
try:
return LEARNING_RATE_TYPES[learning_rate]
except KeyError:
raise ValueError("learning rate %s "
"is not supported. " % learning_rate)
def _get_penalty_type(self, penalty):
penalty = str(penalty).lower()
try:
return PENALTY_TYPES[penalty]
except KeyError:
raise ValueError("Penalty %s is not supported. " % penalty)
def _validate_sample_weight(self, sample_weight, n_samples):
"""Set the sample weight array."""
if sample_weight is None:
# uniform sample weights
sample_weight = np.ones(n_samples, dtype=np.float64, order='C')
else:
# user-provided array
sample_weight = np.asarray(sample_weight, dtype=np.float64,
order="C")
if sample_weight.shape[0] != n_samples:
raise ValueError("Shapes of X and sample_weight do not match.")
return sample_weight
def _allocate_parameter_mem(self, n_classes, n_features, coef_init=None,
intercept_init=None):
"""Allocate mem for parameters; initialize if provided."""
if n_classes > 2:
# allocate coef_ for multi-class
if coef_init is not None:
coef_init = np.asarray(coef_init, order="C")
if coef_init.shape != (n_classes, n_features):
raise ValueError("Provided ``coef_`` does not match "
"dataset. ")
self.coef_ = coef_init
else:
self.coef_ = np.zeros((n_classes, n_features),
dtype=np.float64, order="C")
# allocate intercept_ for multi-class
if intercept_init is not None:
intercept_init = np.asarray(intercept_init, order="C")
if intercept_init.shape != (n_classes, ):
raise ValueError("Provided intercept_init "
"does not match dataset.")
self.intercept_ = intercept_init
else:
self.intercept_ = np.zeros(n_classes, dtype=np.float64,
order="C")
else:
# allocate coef_ for binary problem
if coef_init is not None:
coef_init = np.asarray(coef_init, dtype=np.float64,
order="C")
coef_init = coef_init.ravel()
if coef_init.shape != (n_features,):
raise ValueError("Provided coef_init does not "
"match dataset.")
self.coef_ = coef_init
else:
self.coef_ = np.zeros(n_features,
dtype=np.float64,
order="C")
# allocate intercept_ for binary problem
if intercept_init is not None:
intercept_init = np.asarray(intercept_init, dtype=np.float64)
if intercept_init.shape != (1,) and intercept_init.shape != ():
raise ValueError("Provided intercept_init "
"does not match dataset.")
self.intercept_ = intercept_init.reshape(1,)
else:
self.intercept_ = np.zeros(1, dtype=np.float64, order="C")
# initialize average parameters
if self.average > 0:
self.standard_coef_ = self.coef_
self.standard_intercept_ = self.intercept_
self.average_coef_ = np.zeros(self.coef_.shape,
dtype=np.float64,
order="C")
self.average_intercept_ = np.zeros(self.standard_intercept_.shape,
dtype=np.float64,
order="C")
def _prepare_fit_binary(est, y, i):
"""Initialization for fit_binary.
Returns y, coef, intercept.
"""
y_i = np.ones(y.shape, dtype=np.float64, order="C")
y_i[y != est.classes_[i]] = -1.0
average_intercept = 0
average_coef = None
if len(est.classes_) == 2:
if not est.average:
coef = est.coef_.ravel()
intercept = est.intercept_[0]
else:
coef = est.standard_coef_.ravel()
intercept = est.standard_intercept_[0]
average_coef = est.average_coef_.ravel()
average_intercept = est.average_intercept_[0]
else:
if not est.average:
coef = est.coef_[i]
intercept = est.intercept_[i]
else:
coef = est.standard_coef_[i]
intercept = est.standard_intercept_[i]
average_coef = est.average_coef_[i]
average_intercept = est.average_intercept_[i]
return y_i, coef, intercept, average_coef, average_intercept
def fit_binary(est, i, X, y, alpha, C, learning_rate, n_iter,
pos_weight, neg_weight, sample_weight):
"""Fit a single binary classifier.
The i'th class is considered the "positive" class.
"""
# if average is not true, average_coef, and average_intercept will be
# unused
y_i, coef, intercept, average_coef, average_intercept = \
_prepare_fit_binary(est, y, i)
assert y_i.shape[0] == y.shape[0] == sample_weight.shape[0]
dataset, intercept_decay = make_dataset(X, y_i, sample_weight)
penalty_type = est._get_penalty_type(est.penalty)
learning_rate_type = est._get_learning_rate_type(learning_rate)
# XXX should have random_state_!
random_state = check_random_state(est.random_state)
# numpy mtrand expects a C long which is a signed 32 bit integer under
# Windows
seed = random_state.randint(0, np.iinfo(np.int32).max)
if not est.average:
return plain_sgd(coef, intercept, est.loss_function,
penalty_type, alpha, C, est.l1_ratio,
dataset, n_iter, int(est.fit_intercept),
int(est.verbose), int(est.shuffle), seed,
pos_weight, neg_weight,
learning_rate_type, est.eta0,
est.power_t, est.t_, intercept_decay)
else:
standard_coef, standard_intercept, average_coef, \
average_intercept = average_sgd(coef, intercept, average_coef,
average_intercept,
est.loss_function, penalty_type,
alpha, C, est.l1_ratio, dataset,
n_iter, int(est.fit_intercept),
int(est.verbose), int(est.shuffle),
seed, pos_weight, neg_weight,
learning_rate_type, est.eta0,
est.power_t, est.t_,
intercept_decay,
est.average)
if len(est.classes_) == 2:
est.average_intercept_[0] = average_intercept
else:
est.average_intercept_[i] = average_intercept
return standard_coef, standard_intercept
class BaseSGDClassifier(six.with_metaclass(ABCMeta, BaseSGD,
LinearClassifierMixin)):
loss_functions = {
"hinge": (Hinge, 1.0),
"squared_hinge": (SquaredHinge, 1.0),
"perceptron": (Hinge, 0.0),
"log": (Log, ),
"modified_huber": (ModifiedHuber, ),
"squared_loss": (SquaredLoss, ),
"huber": (Huber, DEFAULT_EPSILON),
"epsilon_insensitive": (EpsilonInsensitive, DEFAULT_EPSILON),
"squared_epsilon_insensitive": (SquaredEpsilonInsensitive,
DEFAULT_EPSILON),
}
@abstractmethod
def __init__(self, loss="hinge", penalty='l2', alpha=0.0001, l1_ratio=0.15,
fit_intercept=True, n_iter=5, shuffle=True, verbose=0,
epsilon=DEFAULT_EPSILON, n_jobs=1, random_state=None,
learning_rate="optimal", eta0=0.0, power_t=0.5,
class_weight=None, warm_start=False, average=False):
super(BaseSGDClassifier, self).__init__(loss=loss, penalty=penalty,
alpha=alpha, l1_ratio=l1_ratio,
fit_intercept=fit_intercept,
n_iter=n_iter, shuffle=shuffle,
verbose=verbose,
epsilon=epsilon,
random_state=random_state,
learning_rate=learning_rate,
eta0=eta0, power_t=power_t,
warm_start=warm_start,
average=average)
self.class_weight = class_weight
self.classes_ = None
self.n_jobs = int(n_jobs)
def _partial_fit(self, X, y, alpha, C,
loss, learning_rate, n_iter,
classes, sample_weight,
coef_init, intercept_init):
X, y = check_X_y(X, y, 'csr', dtype=np.float64, order="C")
n_samples, n_features = X.shape
self._validate_params()
_check_partial_fit_first_call(self, classes)
n_classes = self.classes_.shape[0]
# Allocate datastructures from input arguments
self._expanded_class_weight = compute_class_weight(self.class_weight,
self.classes_, y)
sample_weight = self._validate_sample_weight(sample_weight, n_samples)
if self.coef_ is None or coef_init is not None:
self._allocate_parameter_mem(n_classes, n_features,
coef_init, intercept_init)
elif n_features != self.coef_.shape[-1]:
raise ValueError("Number of features %d does not match previous "
"data %d." % (n_features, self.coef_.shape[-1]))
self.loss_function = self._get_loss_function(loss)
if self.t_ is None:
self.t_ = 1.0
# delegate to concrete training procedure
if n_classes > 2:
self._fit_multiclass(X, y, alpha=alpha, C=C,
learning_rate=learning_rate,
sample_weight=sample_weight, n_iter=n_iter)
elif n_classes == 2:
self._fit_binary(X, y, alpha=alpha, C=C,
learning_rate=learning_rate,
sample_weight=sample_weight, n_iter=n_iter)
else:
raise ValueError("The number of class labels must be "
"greater than one.")
return self
def _fit(self, X, y, alpha, C, loss, learning_rate, coef_init=None,
intercept_init=None, sample_weight=None):
if hasattr(self, "classes_"):
self.classes_ = None
X, y = check_X_y(X, y, 'csr', dtype=np.float64, order="C")
n_samples, n_features = X.shape
# labels can be encoded as float, int, or string literals
# np.unique sorts in asc order; largest class id is positive class
classes = np.unique(y)
if self.warm_start and self.coef_ is not None:
if coef_init is None:
coef_init = self.coef_
if intercept_init is None:
intercept_init = self.intercept_
else:
self.coef_ = None
self.intercept_ = None
if self.average > 0:
self.standard_coef_ = self.coef_
self.standard_intercept_ = self.intercept_
self.average_coef_ = None
self.average_intercept_ = None
# Clear iteration count for multiple call to fit.
self.t_ = None
self._partial_fit(X, y, alpha, C, loss, learning_rate, self.n_iter,
classes, sample_weight, coef_init, intercept_init)
return self
def _fit_binary(self, X, y, alpha, C, sample_weight,
learning_rate, n_iter):
"""Fit a binary classifier on X and y. """
coef, intercept = fit_binary(self, 1, X, y, alpha, C,
learning_rate, n_iter,
self._expanded_class_weight[1],
self._expanded_class_weight[0],
sample_weight)
self.t_ += n_iter * X.shape[0]
# need to be 2d
if self.average > 0:
if self.average <= self.t_ - 1:
self.coef_ = self.average_coef_.reshape(1, -1)
self.intercept_ = self.average_intercept_
else:
self.coef_ = self.standard_coef_.reshape(1, -1)
self.standard_intercept_ = np.atleast_1d(intercept)
self.intercept_ = self.standard_intercept_
else:
self.coef_ = coef.reshape(1, -1)
# intercept is a float, need to convert it to an array of length 1
self.intercept_ = np.atleast_1d(intercept)
def _fit_multiclass(self, X, y, alpha, C, learning_rate,
sample_weight, n_iter):
"""Fit a multi-class classifier by combining binary classifiers
Each binary classifier predicts one class versus all others. This
strategy is called OVA: One Versus All.
"""
# Use joblib to fit OvA in parallel.
result = Parallel(n_jobs=self.n_jobs, backend="threading",
verbose=self.verbose)(
delayed(fit_binary)(self, i, X, y, alpha, C, learning_rate,
n_iter, self._expanded_class_weight[i], 1.,
sample_weight)
for i in range(len(self.classes_)))
for i, (_, intercept) in enumerate(result):
self.intercept_[i] = intercept
self.t_ += n_iter * X.shape[0]
if self.average > 0:
if self.average <= self.t_ - 1.0:
self.coef_ = self.average_coef_
self.intercept_ = self.average_intercept_
else:
self.coef_ = self.standard_coef_
self.standard_intercept_ = np.atleast_1d(self.intercept_)
self.intercept_ = self.standard_intercept_
def partial_fit(self, X, y, classes=None, sample_weight=None):
"""Fit linear model with Stochastic Gradient Descent.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Subset of the training data
y : numpy array, shape (n_samples,)
Subset of the target values
classes : array, shape (n_classes,)
Classes across all calls to partial_fit.
Can be obtained by via `np.unique(y_all)`, where y_all is the
target vector of the entire dataset.
This argument is required for the first call to partial_fit
and can be omitted in the subsequent calls.
Note that y doesn't need to contain all labels in `classes`.
sample_weight : array-like, shape (n_samples,), optional
Weights applied to individual samples.
If not provided, uniform weights are assumed.
Returns
-------
self : returns an instance of self.
"""
if self.class_weight in ['balanced', 'auto']:
raise ValueError("class_weight '{0}' is not supported for "
"partial_fit. In order to use 'balanced' weights,"
" use compute_class_weight('{0}', classes, y). "
"In place of y you can us a large enough sample "
"of the full training set target to properly "
"estimate the class frequency distributions. "
"Pass the resulting weights as the class_weight "
"parameter.".format(self.class_weight))
return self._partial_fit(X, y, alpha=self.alpha, C=1.0, loss=self.loss,
learning_rate=self.learning_rate, n_iter=1,
classes=classes, sample_weight=sample_weight,
coef_init=None, intercept_init=None)
def fit(self, X, y, coef_init=None, intercept_init=None,
sample_weight=None):
"""Fit linear model with Stochastic Gradient Descent.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Training data
y : numpy array, shape (n_samples,)
Target values
coef_init : array, shape (n_classes, n_features)
The initial coefficients to warm-start the optimization.
intercept_init : array, shape (n_classes,)
The initial intercept to warm-start the optimization.
sample_weight : array-like, shape (n_samples,), optional
Weights applied to individual samples.
If not provided, uniform weights are assumed. These weights will
be multiplied with class_weight (passed through the
contructor) if class_weight is specified
Returns
-------
self : returns an instance of self.
"""
return self._fit(X, y, alpha=self.alpha, C=1.0,
loss=self.loss, learning_rate=self.learning_rate,
coef_init=coef_init, intercept_init=intercept_init,
sample_weight=sample_weight)
class SGDClassifier(BaseSGDClassifier, _LearntSelectorMixin):
"""Linear classifiers (SVM, logistic regression, a.o.) with SGD training.
This estimator implements regularized linear models with stochastic
gradient descent (SGD) learning: the gradient of the loss is estimated
each sample at a time and the model is updated along the way with a
decreasing strength schedule (aka learning rate). SGD allows minibatch
(online/out-of-core) learning, see the partial_fit method.
For best results using the default learning rate schedule, the data should
have zero mean and unit variance.
This implementation works with data represented as dense or sparse arrays
of floating point values for the features. The model it fits can be
controlled with the loss parameter; by default, it fits a linear support
vector machine (SVM).
The regularizer is a penalty added to the loss function that shrinks model
parameters towards the zero vector using either the squared euclidean norm
L2 or the absolute norm L1 or a combination of both (Elastic Net). If the
parameter update crosses the 0.0 value because of the regularizer, the
update is truncated to 0.0 to allow for learning sparse models and achieve
online feature selection.
Read more in the :ref:`User Guide <sgd>`.
Parameters
----------
loss : str, 'hinge', 'log', 'modified_huber', 'squared_hinge',\
'perceptron', or a regression loss: 'squared_loss', 'huber',\
'epsilon_insensitive', or 'squared_epsilon_insensitive'
The loss function to be used. Defaults to 'hinge', which gives a
linear SVM.
The 'log' loss gives logistic regression, a probabilistic classifier.
'modified_huber' is another smooth loss that brings tolerance to
outliers as well as probability estimates.
'squared_hinge' is like hinge but is quadratically penalized.
'perceptron' is the linear loss used by the perceptron algorithm.
The other losses are designed for regression but can be useful in
classification as well; see SGDRegressor for a description.
penalty : str, 'none', 'l2', 'l1', or 'elasticnet'
The penalty (aka regularization term) to be used. Defaults to 'l2'
which is the standard regularizer for linear SVM models. 'l1' and
'elasticnet' might bring sparsity to the model (feature selection)
not achievable with 'l2'.
alpha : float
Constant that multiplies the regularization term. Defaults to 0.0001
Also used to compute learning_rate when set to 'optimal'.
l1_ratio : float
The Elastic Net mixing parameter, with 0 <= l1_ratio <= 1.
l1_ratio=0 corresponds to L2 penalty, l1_ratio=1 to L1.
Defaults to 0.15.
fit_intercept : bool
Whether the intercept should be estimated or not. If False, the
data is assumed to be already centered. Defaults to True.
n_iter : int, optional
The number of passes over the training data (aka epochs). The number
of iterations is set to 1 if using partial_fit.
Defaults to 5.
shuffle : bool, optional
Whether or not the training data should be shuffled after each epoch.
Defaults to True.
random_state : int seed, RandomState instance, or None (default)
The seed of the pseudo random number generator to use when
shuffling the data.
verbose : integer, optional
The verbosity level
epsilon : float
Epsilon in the epsilon-insensitive loss functions; only if `loss` is
'huber', 'epsilon_insensitive', or 'squared_epsilon_insensitive'.
For 'huber', determines the threshold at which it becomes less
important to get the prediction exactly right.
For epsilon-insensitive, any differences between the current prediction
and the correct label are ignored if they are less than this threshold.
n_jobs : integer, optional
The number of CPUs to use to do the OVA (One Versus All, for
multi-class problems) computation. -1 means 'all CPUs'. Defaults
to 1.
learning_rate : string, optional
The learning rate schedule:
constant: eta = eta0
optimal: eta = 1.0 / (alpha * (t + t0)) [default]
invscaling: eta = eta0 / pow(t, power_t)
where t0 is chosen by a heuristic proposed by Leon Bottou.
eta0 : double
The initial learning rate for the 'constant' or 'invscaling'
schedules. The default value is 0.0 as eta0 is not used by the
default schedule 'optimal'.
power_t : double
The exponent for inverse scaling learning rate [default 0.5].
class_weight : dict, {class_label: weight} or "balanced" or None, optional
Preset for the class_weight fit parameter.
Weights associated with classes. If not given, all classes
are supposed to have weight one.
The "balanced" mode uses the values of y to automatically adjust
weights inversely proportional to class frequencies in the input data
as ``n_samples / (n_classes * np.bincount(y))``
warm_start : bool, optional
When set to True, reuse the solution of the previous call to fit as
initialization, otherwise, just erase the previous solution.
average : bool or int, optional
When set to True, computes the averaged SGD weights and stores the
result in the ``coef_`` attribute. If set to an int greater than 1,
averaging will begin once the total number of samples seen reaches
average. So average=10 will begin averaging after seeing 10 samples.
Attributes
----------
coef_ : array, shape (1, n_features) if n_classes == 2 else (n_classes,\
n_features)
Weights assigned to the features.
intercept_ : array, shape (1,) if n_classes == 2 else (n_classes,)
Constants in decision function.
Examples
--------
>>> import numpy as np
>>> from sklearn import linear_model
>>> X = np.array([[-1, -1], [-2, -1], [1, 1], [2, 1]])
>>> Y = np.array([1, 1, 2, 2])
>>> clf = linear_model.SGDClassifier()
>>> clf.fit(X, Y)
... #doctest: +NORMALIZE_WHITESPACE
SGDClassifier(alpha=0.0001, average=False, class_weight=None, epsilon=0.1,
eta0=0.0, fit_intercept=True, l1_ratio=0.15,
learning_rate='optimal', loss='hinge', n_iter=5, n_jobs=1,
penalty='l2', power_t=0.5, random_state=None, shuffle=True,
verbose=0, warm_start=False)
>>> print(clf.predict([[-0.8, -1]]))
[1]
See also
--------
LinearSVC, LogisticRegression, Perceptron
"""
def __init__(self, loss="hinge", penalty='l2', alpha=0.0001, l1_ratio=0.15,
fit_intercept=True, n_iter=5, shuffle=True, verbose=0,
epsilon=DEFAULT_EPSILON, n_jobs=1, random_state=None,
learning_rate="optimal", eta0=0.0, power_t=0.5,
class_weight=None, warm_start=False, average=False):
super(SGDClassifier, self).__init__(
loss=loss, penalty=penalty, alpha=alpha, l1_ratio=l1_ratio,
fit_intercept=fit_intercept, n_iter=n_iter, shuffle=shuffle,
verbose=verbose, epsilon=epsilon, n_jobs=n_jobs,
random_state=random_state, learning_rate=learning_rate, eta0=eta0,
power_t=power_t, class_weight=class_weight, warm_start=warm_start,
average=average)
def _check_proba(self):
check_is_fitted(self, "t_")
if self.loss not in ("log", "modified_huber"):
raise AttributeError("probability estimates are not available for"
" loss=%r" % self.loss)
@property
def predict_proba(self):
"""Probability estimates.
This method is only available for log loss and modified Huber loss.
Multiclass probability estimates are derived from binary (one-vs.-rest)
estimates by simple normalization, as recommended by Zadrozny and
Elkan.
Binary probability estimates for loss="modified_huber" are given by
(clip(decision_function(X), -1, 1) + 1) / 2.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Returns
-------
array, shape (n_samples, n_classes)
Returns the probability of the sample for each class in the model,
where classes are ordered as they are in `self.classes_`.
References
----------
Zadrozny and Elkan, "Transforming classifier scores into multiclass
probability estimates", SIGKDD'02,
http://www.research.ibm.com/people/z/zadrozny/kdd2002-Transf.pdf
The justification for the formula in the loss="modified_huber"
case is in the appendix B in:
http://jmlr.csail.mit.edu/papers/volume2/zhang02c/zhang02c.pdf
"""
self._check_proba()
return self._predict_proba
def _predict_proba(self, X):
if self.loss == "log":
return self._predict_proba_lr(X)
elif self.loss == "modified_huber":
binary = (len(self.classes_) == 2)
scores = self.decision_function(X)
if binary:
prob2 = np.ones((scores.shape[0], 2))
prob = prob2[:, 1]
else:
prob = scores
np.clip(scores, -1, 1, prob)
prob += 1.
prob /= 2.
if binary:
prob2[:, 0] -= prob
prob = prob2
else:
# the above might assign zero to all classes, which doesn't
# normalize neatly; work around this to produce uniform
# probabilities
prob_sum = prob.sum(axis=1)
all_zero = (prob_sum == 0)
if np.any(all_zero):
prob[all_zero, :] = 1
prob_sum[all_zero] = len(self.classes_)
# normalize
prob /= prob_sum.reshape((prob.shape[0], -1))
return prob
else:
raise NotImplementedError("predict_(log_)proba only supported when"
" loss='log' or loss='modified_huber' "
"(%r given)" % self.loss)
@property
def predict_log_proba(self):
"""Log of probability estimates.
This method is only available for log loss and modified Huber loss.
When loss="modified_huber", probability estimates may be hard zeros
and ones, so taking the logarithm is not possible.
See ``predict_proba`` for details.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Returns
-------
T : array-like, shape (n_samples, n_classes)
Returns the log-probability of the sample for each class in the
model, where classes are ordered as they are in
`self.classes_`.
"""
self._check_proba()
return self._predict_log_proba
def _predict_log_proba(self, X):
return np.log(self.predict_proba(X))
class BaseSGDRegressor(BaseSGD, RegressorMixin):
loss_functions = {
"squared_loss": (SquaredLoss, ),
"huber": (Huber, DEFAULT_EPSILON),
"epsilon_insensitive": (EpsilonInsensitive, DEFAULT_EPSILON),
"squared_epsilon_insensitive": (SquaredEpsilonInsensitive,
DEFAULT_EPSILON),
}
@abstractmethod
def __init__(self, loss="squared_loss", penalty="l2", alpha=0.0001,
l1_ratio=0.15, fit_intercept=True, n_iter=5, shuffle=True,
verbose=0, epsilon=DEFAULT_EPSILON, random_state=None,
learning_rate="invscaling", eta0=0.01, power_t=0.25,
warm_start=False, average=False):
super(BaseSGDRegressor, self).__init__(loss=loss, penalty=penalty,
alpha=alpha, l1_ratio=l1_ratio,
fit_intercept=fit_intercept,
n_iter=n_iter, shuffle=shuffle,
verbose=verbose,
epsilon=epsilon,
random_state=random_state,
learning_rate=learning_rate,
eta0=eta0, power_t=power_t,
warm_start=warm_start,
average=average)
def _partial_fit(self, X, y, alpha, C, loss, learning_rate,
n_iter, sample_weight,
coef_init, intercept_init):
X, y = check_X_y(X, y, "csr", copy=False, order='C', dtype=np.float64)
y = astype(y, np.float64, copy=False)
n_samples, n_features = X.shape
self._validate_params()
# Allocate datastructures from input arguments
sample_weight = self._validate_sample_weight(sample_weight, n_samples)
if self.coef_ is None:
self._allocate_parameter_mem(1, n_features,
coef_init, intercept_init)
elif n_features != self.coef_.shape[-1]:
raise ValueError("Number of features %d does not match previous "
"data %d." % (n_features, self.coef_.shape[-1]))
if self.average > 0 and self.average_coef_ is None:
self.average_coef_ = np.zeros(n_features,
dtype=np.float64,
order="C")
self.average_intercept_ = np.zeros(1,
dtype=np.float64,
order="C")
self._fit_regressor(X, y, alpha, C, loss, learning_rate,
sample_weight, n_iter)
return self
def partial_fit(self, X, y, sample_weight=None):
"""Fit linear model with Stochastic Gradient Descent.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Subset of training data
y : numpy array of shape (n_samples,)
Subset of target values
sample_weight : array-like, shape (n_samples,), optional
Weights applied to individual samples.
If not provided, uniform weights are assumed.
Returns
-------
self : returns an instance of self.
"""
return self._partial_fit(X, y, self.alpha, C=1.0,
loss=self.loss,
learning_rate=self.learning_rate, n_iter=1,
sample_weight=sample_weight,
coef_init=None, intercept_init=None)
def _fit(self, X, y, alpha, C, loss, learning_rate, coef_init=None,
intercept_init=None, sample_weight=None):
if self.warm_start and self.coef_ is not None:
if coef_init is None:
coef_init = self.coef_
if intercept_init is None:
intercept_init = self.intercept_
else:
self.coef_ = None
self.intercept_ = None
if self.average > 0:
self.standard_intercept_ = self.intercept_
self.standard_coef_ = self.coef_
self.average_coef_ = None
self.average_intercept_ = None
# Clear iteration count for multiple call to fit.
self.t_ = None
return self._partial_fit(X, y, alpha, C, loss, learning_rate,
self.n_iter, sample_weight,
coef_init, intercept_init)
def fit(self, X, y, coef_init=None, intercept_init=None,
sample_weight=None):
"""Fit linear model with Stochastic Gradient Descent.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Training data
y : numpy array, shape (n_samples,)
Target values
coef_init : array, shape (n_features,)
The initial coefficients to warm-start the optimization.
intercept_init : array, shape (1,)
The initial intercept to warm-start the optimization.
sample_weight : array-like, shape (n_samples,), optional
Weights applied to individual samples (1. for unweighted).
Returns
-------
self : returns an instance of self.
"""
return self._fit(X, y, alpha=self.alpha, C=1.0,
loss=self.loss, learning_rate=self.learning_rate,
coef_init=coef_init,
intercept_init=intercept_init,
sample_weight=sample_weight)
@deprecated(" and will be removed in 0.19.")
def decision_function(self, X):
"""Predict using the linear model
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Returns
-------
array, shape (n_samples,)
Predicted target values per element in X.
"""
return self._decision_function(X)
def _decision_function(self, X):
"""Predict using the linear model
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Returns
-------
array, shape (n_samples,)
Predicted target values per element in X.
"""
check_is_fitted(self, ["t_", "coef_", "intercept_"], all_or_any=all)
X = check_array(X, accept_sparse='csr')
scores = safe_sparse_dot(X, self.coef_.T,
dense_output=True) + self.intercept_
return scores.ravel()
def predict(self, X):
"""Predict using the linear model
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Returns
-------
array, shape (n_samples,)
Predicted target values per element in X.
"""
return self._decision_function(X)
def _fit_regressor(self, X, y, alpha, C, loss, learning_rate,
sample_weight, n_iter):
dataset, intercept_decay = make_dataset(X, y, sample_weight)
loss_function = self._get_loss_function(loss)
penalty_type = self._get_penalty_type(self.penalty)
learning_rate_type = self._get_learning_rate_type(learning_rate)
if self.t_ is None:
self.t_ = 1.0
random_state = check_random_state(self.random_state)
# numpy mtrand expects a C long which is a signed 32 bit integer under
# Windows
seed = random_state.randint(0, np.iinfo(np.int32).max)
if self.average > 0:
self.standard_coef_, self.standard_intercept_, \
self.average_coef_, self.average_intercept_ =\
average_sgd(self.standard_coef_,
self.standard_intercept_[0],
self.average_coef_,
self.average_intercept_[0],
loss_function,
penalty_type,
alpha, C,
self.l1_ratio,
dataset,
n_iter,
int(self.fit_intercept),
int(self.verbose),
int(self.shuffle),
seed,
1.0, 1.0,
learning_rate_type,
self.eta0, self.power_t, self.t_,
intercept_decay, self.average)
self.average_intercept_ = np.atleast_1d(self.average_intercept_)
self.standard_intercept_ = np.atleast_1d(self.standard_intercept_)
self.t_ += n_iter * X.shape[0]
if self.average <= self.t_ - 1.0:
self.coef_ = self.average_coef_
self.intercept_ = self.average_intercept_
else:
self.coef_ = self.standard_coef_
self.intercept_ = self.standard_intercept_
else:
self.coef_, self.intercept_ = \
plain_sgd(self.coef_,
self.intercept_[0],
loss_function,
penalty_type,
alpha, C,
self.l1_ratio,
dataset,
n_iter,
int(self.fit_intercept),
int(self.verbose),
int(self.shuffle),
seed,
1.0, 1.0,
learning_rate_type,
self.eta0, self.power_t, self.t_,
intercept_decay)
self.t_ += n_iter * X.shape[0]
self.intercept_ = np.atleast_1d(self.intercept_)
class SGDRegressor(BaseSGDRegressor, _LearntSelectorMixin):
"""Linear model fitted by minimizing a regularized empirical loss with SGD
SGD stands for Stochastic Gradient Descent: the gradient of the loss is
estimated each sample at a time and the model is updated along the way with
a decreasing strength schedule (aka learning rate).
The regularizer is a penalty added to the loss function that shrinks model
parameters towards the zero vector using either the squared euclidean norm
L2 or the absolute norm L1 or a combination of both (Elastic Net). If the
parameter update crosses the 0.0 value because of the regularizer, the
update is truncated to 0.0 to allow for learning sparse models and achieve
online feature selection.
This implementation works with data represented as dense numpy arrays of
floating point values for the features.
Read more in the :ref:`User Guide <sgd>`.
Parameters
----------
loss : str, 'squared_loss', 'huber', 'epsilon_insensitive', \
or 'squared_epsilon_insensitive'
The loss function to be used. Defaults to 'squared_loss' which refers
to the ordinary least squares fit. 'huber' modifies 'squared_loss' to
focus less on getting outliers correct by switching from squared to
linear loss past a distance of epsilon. 'epsilon_insensitive' ignores
errors less than epsilon and is linear past that; this is the loss
function used in SVR. 'squared_epsilon_insensitive' is the same but
becomes squared loss past a tolerance of epsilon.
penalty : str, 'none', 'l2', 'l1', or 'elasticnet'
The penalty (aka regularization term) to be used. Defaults to 'l2'
which is the standard regularizer for linear SVM models. 'l1' and
'elasticnet' might bring sparsity to the model (feature selection)
not achievable with 'l2'.
alpha : float
Constant that multiplies the regularization term. Defaults to 0.0001
Also used to compute learning_rate when set to 'optimal'.
l1_ratio : float
The Elastic Net mixing parameter, with 0 <= l1_ratio <= 1.
l1_ratio=0 corresponds to L2 penalty, l1_ratio=1 to L1.
Defaults to 0.15.
fit_intercept : bool
Whether the intercept should be estimated or not. If False, the
data is assumed to be already centered. Defaults to True.
n_iter : int, optional
The number of passes over the training data (aka epochs). The number
of iterations is set to 1 if using partial_fit.
Defaults to 5.
shuffle : bool, optional
Whether or not the training data should be shuffled after each epoch.
Defaults to True.
random_state : int seed, RandomState instance, or None (default)
The seed of the pseudo random number generator to use when
shuffling the data.
verbose : integer, optional
The verbosity level.
epsilon : float
Epsilon in the epsilon-insensitive loss functions; only if `loss` is
'huber', 'epsilon_insensitive', or 'squared_epsilon_insensitive'.
For 'huber', determines the threshold at which it becomes less
important to get the prediction exactly right.
For epsilon-insensitive, any differences between the current prediction
and the correct label are ignored if they are less than this threshold.
learning_rate : string, optional
The learning rate:
constant: eta = eta0
optimal: eta = 1.0/(alpha * t)
invscaling: eta = eta0 / pow(t, power_t) [default]
eta0 : double, optional
The initial learning rate [default 0.01].
power_t : double, optional
The exponent for inverse scaling learning rate [default 0.25].
warm_start : bool, optional
When set to True, reuse the solution of the previous call to fit as
initialization, otherwise, just erase the previous solution.
average : bool or int, optional
When set to True, computes the averaged SGD weights and stores the
result in the ``coef_`` attribute. If set to an int greater than 1,
averaging will begin once the total number of samples seen reaches
average. So ``average=10 will`` begin averaging after seeing 10
samples.
Attributes
----------
coef_ : array, shape (n_features,)
Weights assigned to the features.
intercept_ : array, shape (1,)
The intercept term.
average_coef_ : array, shape (n_features,)
Averaged weights assigned to the features.
average_intercept_ : array, shape (1,)
The averaged intercept term.
Examples
--------
>>> import numpy as np
>>> from sklearn import linear_model
>>> n_samples, n_features = 10, 5
>>> np.random.seed(0)
>>> y = np.random.randn(n_samples)
>>> X = np.random.randn(n_samples, n_features)
>>> clf = linear_model.SGDRegressor()
>>> clf.fit(X, y)
... #doctest: +NORMALIZE_WHITESPACE
SGDRegressor(alpha=0.0001, average=False, epsilon=0.1, eta0=0.01,
fit_intercept=True, l1_ratio=0.15, learning_rate='invscaling',
loss='squared_loss', n_iter=5, penalty='l2', power_t=0.25,
random_state=None, shuffle=True, verbose=0, warm_start=False)
See also
--------
Ridge, ElasticNet, Lasso, SVR
"""
def __init__(self, loss="squared_loss", penalty="l2", alpha=0.0001,
l1_ratio=0.15, fit_intercept=True, n_iter=5, shuffle=True,
verbose=0, epsilon=DEFAULT_EPSILON, random_state=None,
learning_rate="invscaling", eta0=0.01, power_t=0.25,
warm_start=False, average=False):
super(SGDRegressor, self).__init__(loss=loss, penalty=penalty,
alpha=alpha, l1_ratio=l1_ratio,
fit_intercept=fit_intercept,
n_iter=n_iter, shuffle=shuffle,
verbose=verbose,
epsilon=epsilon,
random_state=random_state,
learning_rate=learning_rate,
eta0=eta0, power_t=power_t,
warm_start=warm_start,
average=average)
| bsd-3-clause |
vigilv/scikit-learn | examples/decomposition/plot_ica_blind_source_separation.py | 349 | 2228 | """
=====================================
Blind source separation using FastICA
=====================================
An example of estimating sources from noisy data.
:ref:`ICA` is used to estimate sources given noisy measurements.
Imagine 3 instruments playing simultaneously and 3 microphones
recording the mixed signals. ICA is used to recover the sources
ie. what is played by each instrument. Importantly, PCA fails
at recovering our `instruments` since the related signals reflect
non-Gaussian processes.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from scipy import signal
from sklearn.decomposition import FastICA, PCA
###############################################################################
# Generate sample data
np.random.seed(0)
n_samples = 2000
time = np.linspace(0, 8, n_samples)
s1 = np.sin(2 * time) # Signal 1 : sinusoidal signal
s2 = np.sign(np.sin(3 * time)) # Signal 2 : square signal
s3 = signal.sawtooth(2 * np.pi * time) # Signal 3: saw tooth signal
S = np.c_[s1, s2, s3]
S += 0.2 * np.random.normal(size=S.shape) # Add noise
S /= S.std(axis=0) # Standardize data
# Mix data
A = np.array([[1, 1, 1], [0.5, 2, 1.0], [1.5, 1.0, 2.0]]) # Mixing matrix
X = np.dot(S, A.T) # Generate observations
# Compute ICA
ica = FastICA(n_components=3)
S_ = ica.fit_transform(X) # Reconstruct signals
A_ = ica.mixing_ # Get estimated mixing matrix
# We can `prove` that the ICA model applies by reverting the unmixing.
assert np.allclose(X, np.dot(S_, A_.T) + ica.mean_)
# For comparison, compute PCA
pca = PCA(n_components=3)
H = pca.fit_transform(X) # Reconstruct signals based on orthogonal components
###############################################################################
# Plot results
plt.figure()
models = [X, S, S_, H]
names = ['Observations (mixed signal)',
'True Sources',
'ICA recovered signals',
'PCA recovered signals']
colors = ['red', 'steelblue', 'orange']
for ii, (model, name) in enumerate(zip(models, names), 1):
plt.subplot(4, 1, ii)
plt.title(name)
for sig, color in zip(model.T, colors):
plt.plot(sig, color=color)
plt.subplots_adjust(0.09, 0.04, 0.94, 0.94, 0.26, 0.46)
plt.show()
| bsd-3-clause |
rpetersburg/FiberProperties | scripts/EXPRES_stability.py | 2 | 3703 | import matplotlib.pyplot as plt
import numpy as np
import os
from fiber_properties import FiberImage, image_list, plot_stability, plot_stability_binned
from functools import partial
from multiprocessing import Pool
plt.rc('font', size=32, family='serif')
plt.rc('figure', figsize=[18,18])
plt.rc('xtick', labelsize=32)
plt.rc('ytick', labelsize=32)
plt.rc('text', usetex=True)
plt.rc('lines', lw=4)
NEW_DATA = False
PARALLELIZE = True
PROCESSES = 3
NUM_IMAGES = 150
CAMS = ['nf']
# FOLDER = '../data/stability/2017-03-19 Stability Test/circular_200um/'
FOLDER = '../data/EXPRES/bare_octagonal/stability/'
# FOLDER = '../data/scrambling/2016-08-05 Prototype Core Extension 1/Shift_30/'
NF_METHOD = 'full'
FF_METHOD = 'full'
BIN_SIZE = 10
OPTIONS = {'units': 'microns',
'threshold': 1000,
'kernel': 9,
'radius_tol': .03,
'radius_range': 64,
'center_tol': .03,
'center_range': 64}
class StabilityInfo(object):
def __init__(self):
self.centroid = []
self.center = []
self.x_diff = []
self.y_diff = []
self.diameter = []
self.time = []
def save_objects(i, cam, method):
obj_file = cam + '_obj_' + str(i).zfill(3) + '.pkl'
if obj_file not in os.listdir(FOLDER):
print 'saving ' + cam + '_' + str(i).zfill(3)
im_file = FOLDER + cam + '_' + str(i).zfill(3) + '.fit'
obj = FiberImage(im_file)
obj.set_fiber_center(method=method, **OPTIONS)
obj.set_fiber_centroid(method=method, **OPTIONS)
obj.save_object(FOLDER + obj_file)
if __name__ == "__main__":
data = {}
for cam in CAMS:
data[cam] = StabilityInfo()
if cam == 'in' or cam == 'nf':
method = NF_METHOD
else:
method = FF_METHOD
if NEW_DATA:
if PARALLELIZE:
pool = Pool(processes=PROCESSES)
pool.map(partial(save_objects, cam=cam, method=method),
range(NUM_IMAGES))
else:
for i in xrange(NUM_IMAGES):
save_objects(i, cam, method)
for i in xrange(NUM_IMAGES):
obj_file = cam + '_obj_' + str(i).zfill(3) + '.pkl'
print 'loading ' + cam + '_' + str(i).zfill(3)
obj = FiberImage(FOLDER + obj_file)
data[cam].center.append(obj.get_fiber_center(method=method, **OPTIONS))
data[cam].centroid.append(obj.get_fiber_centroid(method=method, **OPTIONS))
data[cam].x_diff.append(data[cam].centroid[-1].x - data[cam].center[-1].x)
data[cam].y_diff.append(data[cam].centroid[-1].y - data[cam].center[-1].y)
# data[cam].x_diff.append(data[cam].centroid[-1].x)
# data[cam].y_diff.append(data[cam].centroid[-1].y)
data[cam].diameter.append(obj.get_fiber_diameter(method=method, **OPTIONS))
data[cam].time.append(obj.date_time)
obj.save_object(FOLDER + obj_file)
avg_x_diff = np.median(data[cam].x_diff)
avg_y_diff = np.median(data[cam].y_diff)
init_time = np.copy(data[cam].time[0])
for i in xrange(NUM_IMAGES):
data[cam].x_diff[i] -= avg_x_diff
data[cam].y_diff[i] -= avg_y_diff
data[cam].time[i] -= init_time
data[cam].time[i] = data[cam].time[i].total_seconds() / 60.0
plot_stability(data[cam], cam)
plt.savefig(FOLDER + cam + '_stability.png')
plot_stability_binned(data[cam], cam, BIN_SIZE)
plt.savefig(FOLDER + cam + '_stability_binned.png')
| mit |
sgenoud/scikit-learn | sklearn/utils/tests/test_validation.py | 2 | 2708 | """
Tests for input validation functions
"""
import numpy as np
from numpy.testing import assert_array_equal
import scipy.sparse as sp
from tempfile import NamedTemporaryFile
from nose.tools import assert_raises, assert_true, assert_false
from .. import (array2d, as_float_array, atleast2d_or_csr, check_arrays,
safe_asarray)
def test_as_float_array():
"""
Test function for as_float_array
"""
X = np.ones((3, 10), dtype=np.int32)
X = X + np.arange(10, dtype=np.int32)
# Checks that the return type is ok
X2 = as_float_array(X, copy=False)
np.testing.assert_equal(X2.dtype, np.float32)
# Another test
X = X.astype(np.int64)
X2 = as_float_array(X, copy=True)
# Checking that the array wasn't overwritten
assert_true(as_float_array(X, False) is not X)
# Checking that the new type is ok
np.testing.assert_equal(X2.dtype, np.float64)
# Here, X is of the right type, it shouldn't be modified
X = np.ones((3, 2), dtype=np.float32)
assert_true(as_float_array(X, copy=False) is X)
def test_check_arrays_exceptions():
"""Check that invalid arguments raise appropriate exceptions"""
assert_raises(ValueError, check_arrays, [0], [0, 1])
assert_raises(TypeError, check_arrays, 0, [0, 1])
assert_raises(TypeError, check_arrays, [0], 0)
assert_raises(TypeError, check_arrays, [0, 1], [0, 1], meaning_of_life=42)
assert_raises(ValueError, check_arrays, [0], [0], sparse_format='fake')
def test_np_matrix():
"""
Confirm that input validation code does not return np.matrix
"""
X = np.arange(12).reshape(3, 4)
assert_false(isinstance(as_float_array(X), np.matrix))
assert_false(isinstance(as_float_array(np.matrix(X)), np.matrix))
assert_false(isinstance(as_float_array(sp.csc_matrix(X)), np.matrix))
assert_false(isinstance(atleast2d_or_csr(X), np.matrix))
assert_false(isinstance(atleast2d_or_csr(np.matrix(X)), np.matrix))
assert_false(isinstance(atleast2d_or_csr(sp.csc_matrix(X)), np.matrix))
assert_false(isinstance(safe_asarray(X), np.matrix))
assert_false(isinstance(safe_asarray(np.matrix(X)), np.matrix))
assert_false(isinstance(safe_asarray(sp.lil_matrix(X)), np.matrix))
def test_memmap():
"""
Confirm that input validation code doesn't copy memory mapped arrays
"""
asflt = lambda x: as_float_array(x, copy=False)
with NamedTemporaryFile(prefix='sklearn-test') as tmp:
M = np.memmap(tmp, shape=100, dtype=np.float32)
M[:] = 0
for f in (array2d, np.asarray, asflt, safe_asarray):
X = f(M)
X[:] = 1
assert_array_equal(X.ravel(), M)
X[:] = 0
| bsd-3-clause |
MartinThoma/algorithms | ML/50-mlps/24-keras-cnn-skip-connection-dropout/main.py | 1 | 3039 | #!/usr/bin/env python
# internal modules
import hasy_tools
import numpy as np
# 3rd party modules
from keras.callbacks import CSVLogger, ModelCheckpoint
from keras.layers import (Activation, Add, Conv2D, Dense, Dropout, Flatten,
Input, MaxPooling2D)
from keras.layers.pooling import GlobalAveragePooling2D
from keras.models import Model
from keras.regularizers import l1
from sklearn.model_selection import train_test_split
# Load the data
data = hasy_tools.load_data()
datasets = ['train', 'test']
# One-Hot encoding
for dataset in datasets:
key = 'y_' + dataset
data[key] = np.eye(hasy_tools.n_classes)[data[key].squeeze()]
# Preprocessing
for dataset in datasets:
key = 'x_' + dataset
data[key] = hasy_tools.preprocess(data[key])
# Generate Validation Data
split = train_test_split(data['x_train'], data['y_train'],
test_size=0.20,
random_state=0,
stratify=data['y_train'])
data['x_train'], data['x_val'], data['y_train'], data['y_val'] = split
datasets.append('val')
def skip_layer_conv(x, nb_layers=16):
x1 = Conv2D(nb_layers, (3, 3), padding='same')(x)
x1 = Activation('relu')(x1)
x2 = Conv2D(nb_layers, (3, 3), padding='same')(x1)
x2 = Activation('relu')(x2)
x3 = Add()([x1, x2])
return x3
def skip_layer(x, nb_layers=16):
x1 = Dense(nb_layers, kernel_regularizer=l1(0.01))(x)
x1 = Activation('relu')(x1)
x2 = Dense(nb_layers, kernel_regularizer=l1(0.01))(x1)
x2 = Activation('relu')(x2)
x3 = Add()([x1, x2])
return x3
# Define the model
input_ = Input(shape=(hasy_tools.WIDTH, hasy_tools.HEIGHT, 1))
x = input_
x = Conv2D(16, (3, 3), padding='same',
kernel_initializer='he_uniform')(x)
x = MaxPooling2D(pool_size=(2, 2))(x) # 16x16
x = skip_layer_conv(x)
x = MaxPooling2D(pool_size=(2, 2))(x) # 8x8
x = skip_layer_conv(x)
x = MaxPooling2D(pool_size=(2, 2))(x) # 4x4
x = skip_layer_conv(x)
x = skip_layer_conv(x, 32)
x = Flatten()(x) # Adjust for FCN
x = Dense(512, kernel_regularizer=l1(0.01))(x)
x = Dropout(0.50)(x)
x = Activation('relu')(x)
x = Dense(hasy_tools.n_classes)(x)
x = Activation('softmax')(x)
model = Model(inputs=input_, outputs=x)
# Compile model
model.compile(loss='categorical_crossentropy',
optimizer='adam',
metrics=['accuracy'])
# Fit the model
csv_logger = CSVLogger('log.csv', append=True, separator=';')
checkpointer = ModelCheckpoint(filepath='checkpoint.h5',
verbose=1,
period=10,
save_best_only=True)
model.fit(data['x_train'], data['y_train'],
validation_data=(data['x_val'], data['y_val']),
epochs=500,
batch_size=128,
callbacks=[csv_logger, checkpointer])
# Serialize model
model.save('model.h5')
# evaluate the model
scores = model.evaluate(data['x_test'], data['y_test'])
print("\n{}: {:.2f}%".format(model.metrics_names[1], scores[1] * 100))
| mit |
pkruskal/scikit-learn | sklearn/cluster/tests/test_bicluster.py | 226 | 9457 | """Testing for Spectral Biclustering methods"""
import numpy as np
from scipy.sparse import csr_matrix, issparse
from sklearn.grid_search import ParameterGrid
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import SkipTest
from sklearn.base import BaseEstimator, BiclusterMixin
from sklearn.cluster.bicluster import SpectralCoclustering
from sklearn.cluster.bicluster import SpectralBiclustering
from sklearn.cluster.bicluster import _scale_normalize
from sklearn.cluster.bicluster import _bistochastic_normalize
from sklearn.cluster.bicluster import _log_normalize
from sklearn.metrics import consensus_score
from sklearn.datasets import make_biclusters, make_checkerboard
class MockBiclustering(BaseEstimator, BiclusterMixin):
# Mock object for testing get_submatrix.
def __init__(self):
pass
def get_indices(self, i):
# Overridden to reproduce old get_submatrix test.
return (np.where([True, True, False, False, True])[0],
np.where([False, False, True, True])[0])
def test_get_submatrix():
data = np.arange(20).reshape(5, 4)
model = MockBiclustering()
for X in (data, csr_matrix(data), data.tolist()):
submatrix = model.get_submatrix(0, X)
if issparse(submatrix):
submatrix = submatrix.toarray()
assert_array_equal(submatrix, [[2, 3],
[6, 7],
[18, 19]])
submatrix[:] = -1
if issparse(X):
X = X.toarray()
assert_true(np.all(X != -1))
def _test_shape_indices(model):
# Test get_shape and get_indices on fitted model.
for i in range(model.n_clusters):
m, n = model.get_shape(i)
i_ind, j_ind = model.get_indices(i)
assert_equal(len(i_ind), m)
assert_equal(len(j_ind), n)
def test_spectral_coclustering():
# Test Dhillon's Spectral CoClustering on a simple problem.
param_grid = {'svd_method': ['randomized', 'arpack'],
'n_svd_vecs': [None, 20],
'mini_batch': [False, True],
'init': ['k-means++'],
'n_init': [10],
'n_jobs': [1]}
random_state = 0
S, rows, cols = make_biclusters((30, 30), 3, noise=0.5,
random_state=random_state)
S -= S.min() # needs to be nonnegative before making it sparse
S = np.where(S < 1, 0, S) # threshold some values
for mat in (S, csr_matrix(S)):
for kwargs in ParameterGrid(param_grid):
model = SpectralCoclustering(n_clusters=3,
random_state=random_state,
**kwargs)
model.fit(mat)
assert_equal(model.rows_.shape, (3, 30))
assert_array_equal(model.rows_.sum(axis=0), np.ones(30))
assert_array_equal(model.columns_.sum(axis=0), np.ones(30))
assert_equal(consensus_score(model.biclusters_,
(rows, cols)), 1)
_test_shape_indices(model)
def test_spectral_biclustering():
# Test Kluger methods on a checkerboard dataset.
S, rows, cols = make_checkerboard((30, 30), 3, noise=0.5,
random_state=0)
non_default_params = {'method': ['scale', 'log'],
'svd_method': ['arpack'],
'n_svd_vecs': [20],
'mini_batch': [True]}
for mat in (S, csr_matrix(S)):
for param_name, param_values in non_default_params.items():
for param_value in param_values:
model = SpectralBiclustering(
n_clusters=3,
n_init=3,
init='k-means++',
random_state=0,
)
model.set_params(**dict([(param_name, param_value)]))
if issparse(mat) and model.get_params().get('method') == 'log':
# cannot take log of sparse matrix
assert_raises(ValueError, model.fit, mat)
continue
else:
model.fit(mat)
assert_equal(model.rows_.shape, (9, 30))
assert_equal(model.columns_.shape, (9, 30))
assert_array_equal(model.rows_.sum(axis=0),
np.repeat(3, 30))
assert_array_equal(model.columns_.sum(axis=0),
np.repeat(3, 30))
assert_equal(consensus_score(model.biclusters_,
(rows, cols)), 1)
_test_shape_indices(model)
def _do_scale_test(scaled):
"""Check that rows sum to one constant, and columns to another."""
row_sum = scaled.sum(axis=1)
col_sum = scaled.sum(axis=0)
if issparse(scaled):
row_sum = np.asarray(row_sum).squeeze()
col_sum = np.asarray(col_sum).squeeze()
assert_array_almost_equal(row_sum, np.tile(row_sum.mean(), 100),
decimal=1)
assert_array_almost_equal(col_sum, np.tile(col_sum.mean(), 100),
decimal=1)
def _do_bistochastic_test(scaled):
"""Check that rows and columns sum to the same constant."""
_do_scale_test(scaled)
assert_almost_equal(scaled.sum(axis=0).mean(),
scaled.sum(axis=1).mean(),
decimal=1)
def test_scale_normalize():
generator = np.random.RandomState(0)
X = generator.rand(100, 100)
for mat in (X, csr_matrix(X)):
scaled, _, _ = _scale_normalize(mat)
_do_scale_test(scaled)
if issparse(mat):
assert issparse(scaled)
def test_bistochastic_normalize():
generator = np.random.RandomState(0)
X = generator.rand(100, 100)
for mat in (X, csr_matrix(X)):
scaled = _bistochastic_normalize(mat)
_do_bistochastic_test(scaled)
if issparse(mat):
assert issparse(scaled)
def test_log_normalize():
# adding any constant to a log-scaled matrix should make it
# bistochastic
generator = np.random.RandomState(0)
mat = generator.rand(100, 100)
scaled = _log_normalize(mat) + 1
_do_bistochastic_test(scaled)
def test_fit_best_piecewise():
model = SpectralBiclustering(random_state=0)
vectors = np.array([[0, 0, 0, 1, 1, 1],
[2, 2, 2, 3, 3, 3],
[0, 1, 2, 3, 4, 5]])
best = model._fit_best_piecewise(vectors, n_best=2, n_clusters=2)
assert_array_equal(best, vectors[:2])
def test_project_and_cluster():
model = SpectralBiclustering(random_state=0)
data = np.array([[1, 1, 1],
[1, 1, 1],
[3, 6, 3],
[3, 6, 3]])
vectors = np.array([[1, 0],
[0, 1],
[0, 0]])
for mat in (data, csr_matrix(data)):
labels = model._project_and_cluster(data, vectors,
n_clusters=2)
assert_array_equal(labels, [0, 0, 1, 1])
def test_perfect_checkerboard():
raise SkipTest("This test is failing on the buildbot, but cannot"
" reproduce. Temporarily disabling it until it can be"
" reproduced and fixed.")
model = SpectralBiclustering(3, svd_method="arpack", random_state=0)
S, rows, cols = make_checkerboard((30, 30), 3, noise=0,
random_state=0)
model.fit(S)
assert_equal(consensus_score(model.biclusters_,
(rows, cols)), 1)
S, rows, cols = make_checkerboard((40, 30), 3, noise=0,
random_state=0)
model.fit(S)
assert_equal(consensus_score(model.biclusters_,
(rows, cols)), 1)
S, rows, cols = make_checkerboard((30, 40), 3, noise=0,
random_state=0)
model.fit(S)
assert_equal(consensus_score(model.biclusters_,
(rows, cols)), 1)
def test_errors():
data = np.arange(25).reshape((5, 5))
model = SpectralBiclustering(n_clusters=(3, 3, 3))
assert_raises(ValueError, model.fit, data)
model = SpectralBiclustering(n_clusters='abc')
assert_raises(ValueError, model.fit, data)
model = SpectralBiclustering(n_clusters=(3, 'abc'))
assert_raises(ValueError, model.fit, data)
model = SpectralBiclustering(method='unknown')
assert_raises(ValueError, model.fit, data)
model = SpectralBiclustering(svd_method='unknown')
assert_raises(ValueError, model.fit, data)
model = SpectralBiclustering(n_components=0)
assert_raises(ValueError, model.fit, data)
model = SpectralBiclustering(n_best=0)
assert_raises(ValueError, model.fit, data)
model = SpectralBiclustering(n_components=3, n_best=4)
assert_raises(ValueError, model.fit, data)
model = SpectralBiclustering()
data = np.arange(27).reshape((3, 3, 3))
assert_raises(ValueError, model.fit, data)
| bsd-3-clause |
RRShieldsCutler/clusterpluck | clusterpluck/tools/cluster_lookup.py | 1 | 9863 | #!/usr/bin/env Python
import argparse
import os
import sys
from collections import defaultdict
from multiprocessing import cpu_count
import pandas as pd
from ninja_dojo.database import RefSeqDatabase
from ninja_dojo.taxonomy import NCBITree
from ninja_utils.parsers import FASTA
from clusterpluck.tools.annotations import refseq_to_name
from clusterpluck.tools.h_clustering import process_hierarchy
from clusterpluck.tools.suppress_print import suppress_stdout
from clusterpluck.wrappers.run_blastp import run_blastp
from clusterpluck.tools.genbank_id_to_tid import genbank_id_to_tid
# The arg parser
def make_arg_parser():
parser = argparse.ArgumentParser(description='Look up relevant information for given OFU(s), such as DNA and AA sequences, and organism names')
parser.add_argument('-s', '--scores', help="The appropriate scores matrix resource for this data (csv)", default='-')
parser.add_argument('-t', '--height', help='The similarity/identity at which the OFUs were picked (0-100)', required=True, type=float)
parser.add_argument('-m', '--mpfa', help='The .mpfa resource for this ClusterPluck database', required=False)
parser.add_argument('--mibig', help='To search OFU sequences against the MIBiG database, provide path to the MIBiG blastp database files (including database name, no extension)', required=False)
parser.add_argument('-d', '--dna_fasta', help='The multi-fasta resource containing cluster DNA sequences for this ClusterPluck database', required=False)
parser.add_argument('-b', '--bread', help='Where to find the header for the sequence (default="ref|,|")', default='ref|,|')
parser.add_argument('-o', '--output', help='Directory in which to save the cluster information files (default = cwd)', required=False, default='.')
parser.add_argument('-c', '--ofu', help='Comma-separated list of the ofus (e.g. ofu00001,ofu00003) on which to provide information', required=False, type=str)
parser.add_argument('-n', '--name', help='Comma-separated list of the RefSeq IDs (with or without cluster number) for which to provide a list of OFUs', required=False, type=str)
parser.add_argument('-y', '--types', help='A CSV file containing the predicted product types for each cluster', required=False)
parser.add_argument('--method', help='The clustering linkage method to use (default = average)',
required=False, default='average')
parser.add_argument('--nt_cat', help='Path to the nt catalog, required for genbank ID clusters (i.e. antismash DB)', required=False, default='-')
parser.add_argument('--homologs', help='Take the reported OFU from an identifier query and then get the clusters in that OFU',
action='store_true', required=False, default='false')
return parser
def list_organisms(ofus, hclus, nt_cat, typetable, outpath, cut_h):
bgc_dd = defaultdict(list)
for value, key in hclus.itertuples(index=True):
key = str('%05d' % key)
bgc_dd[key].extend(list([value]))
ofu_list = ofus.split(',')
i = 0
# Preload the Database and Tree
db = RefSeqDatabase()
nt = NCBITree()
for ofu in ofu_list:
ofu = str(ofu)
if ofu.startswith('ofu'):
ofu_n = str(ofu.replace('ofu', ''))
elif ofu.startswith('ofu_'):
ofu_n = str(ofu.replace('ofu', ''))
else:
ofu_n = ofu
bgcs = bgc_dd[ofu_n]
name_dict = defaultdict(list)
with suppress_stdout():
for bgc in bgcs:
if bgc.startswith('ncbi_tid'):
ncbi_tid = bgc.split('|')[1]
if ncbi_tid == 'na':
name = bgc.split('|')[3]
else:
ncbi_tid = int(ncbi_tid)
name = nt.green_genes_lineage(ncbi_tid, depth=8, depth_force=True)
elif '|genbank|' in bgc:
gbk_id = bgc.split('|')[3].split('_cluster')[0]
if nt_cat == '-':
sys.exit('Genbank ID BGC headers require an NT Catalog for annotation... see --help')
tid, organism = genbank_id_to_tid(gbk_id, nt_cat)
name = organism
else:
refseqid = '_'.join(bgc.split('_')[:2])
name = refseq_to_name(refseqid, db=db, nt=nt)
if typetable is not False:
ctype = typetable.filter(like=bgc, axis=0)
ctype = str(ctype.iloc[0, 0])
else:
ctype = 'NA'
if bgc == name:
name_dict[bgc] = [ctype, refseqid]
else:
name_dict[bgc] = [ctype, name]
ofu_file = ''.join(['ofu', ofu_n, '_id', cut_h, '.txt'])
with open(os.path.join(outpath, ofu_file), 'w') as outf:
outdf = pd.DataFrame.from_dict(name_dict, orient='index')
outdf.columns = ['predicted_type', 'organism']
outdf.to_csv(outf, sep='\t')
i += 1
print('\nOrganism information for %d OFUs written to file.\n' % i)
return bgc_dd
def compile_ofu_sequences(inf_m, bgc, aa_outf):
mpfa_gen = FASTA(inf_m)
bgc = str(bgc)
for header, sequence in mpfa_gen.read():
if '.cluster' in header:
header = header.replace('.cluster', '_cluster')
if bgc in header:
aa_outf.write(''.join(['>', header, '\n']))
aa_outf.write(''.join([sequence, '\n']))
return aa_outf
def compile_ofu_dnasequences(inf_d, bgc, dna_outf):
fasta_gen = FASTA(inf_d)
bgc = str(bgc)
for header, sequence in fasta_gen.read():
if '.cluster' in header:
header = header.replace('.cluster', '_cluster')
if bgc in header:
dna_outf.write(''.join(['>', header, '\n']))
dna_outf.write(''.join([sequence, '\n']))
return dna_outf
def identify_organism(org, nt_cat, db, nt):
# with suppress_stdout():
if 'cluster' in org:
if '.cluster' in org:
org.replace('.cluster', '_cluster')
if len(org.split('_')) == 2:
ref_id = org.split('_')[0]
else:
ref_id = '_'.join(org.split('_')[:2])
else:
ref_id = org
if '_' in ref_id:
name = refseq_to_name(ref_id, db=db, nt=nt)
else:
if nt_cat == '-':
sys.exit('Genbank ID BGC headers require an NT Catalog for annotation... see --help')
tid, name = genbank_id_to_tid(ref_id, nt_cat)
return name
def list_organism_ofus(orgs, nt_cat, hclus, height, outpath):
bgc_dd = defaultdict(list)
for value, key in hclus.itertuples(index=True):
key = str('%05d' % key)
key = ''.join(['ofu', key])
bgc_dd[key].extend(list([value]))
orgs_list = orgs.split(',')
i = len(orgs_list)
ofu_dict = defaultdict(list)
# Preload the Database and Tree
db = RefSeqDatabase()
nt = NCBITree()
for org in orgs_list:
# print(org)
org_ofu_dup = []
for ofu_num, ofu_orgs in bgc_dd.items():
for ofu_org in ofu_orgs:
if org in ofu_org:
if org.startswith('BGC'):
name = org
else:
name = identify_organism(org, nt_cat, db=db, nt=nt)
if ofu_num not in org_ofu_dup:
org_ofu_dup.append(ofu_num)
ofu_dict[name].extend([ofu_num])
else:
continue
height = str(height)
ofu_file = ''.join(['OFUs_from_similarity_level', height, '.txt'])
outdf = pd.DataFrame.from_dict(ofu_dict, orient='index')
if not outdf.empty:
with open(os.path.join(outpath, ofu_file), 'w') as outf:
outdf.to_csv(outf, sep='\t', header=False)
print('\nOFU assigments written to file for the %d organism ID entries given.\n' % i)
else:
print('\nNo OFU assignments found; check RefSeq / Genbank / MIBiG identifier and format')
return ofu_dict
def main():
parser = make_arg_parser()
args = parser.parse_args()
outpath = args.output
method = args.method
nt_cat = args.nt_cat
if not os.path.isdir(outpath):
os.mkdir(outpath)
if not os.path.isdir(outpath):
print('\nError creating output directory; check given path and try again\n')
sys.exit()
with open(args.scores, 'r') as inf:
h = 1 - (args.height / 100)
hclus = process_hierarchy(inf, h, method)
if args.types:
with open(args.types, 'r') as in_t:
typetable = pd.read_csv(in_t, header=0, index_col=0)
else:
typetable = False
if args.ofu:
ofus = args.ofu
cut_h = str(args.height)
bgc_dd = list_organisms(ofus, hclus, nt_cat, typetable, outpath, cut_h)
if args.dna_fasta or args.mpfa: # only generate OFU sequence files if the appropriate files are provided
ofu_list = ofus.split(',')
i = 0
for ofu in ofu_list:
i += 1
if ofu.startswith('ofu'):
ofu_n = str(ofu.replace('ofu', ''))
elif ofu.startswith('ofu_'):
ofu_n = str(ofu.replace('ofu', ''))
else:
ofu_n = ofu
bgcs = bgc_dd[ofu_n]
if args.mpfa:
ofu_aaseqfile = ''.join(['ofu', ofu_n, '_id', cut_h, '_aasequences.mpfa'])
aa_outf = open(os.path.join(outpath, ofu_aaseqfile), 'w')
for bgc in bgcs:
with open(args.mpfa, 'r') as inf_m:
aa_outf = compile_ofu_sequences(inf_m, bgc, aa_outf)
aa_outf.close()
if args.mibig:
mibigout = ''.join(['ofu', ofu_n, '_vs_MiBiG.txt'])
mibigout = os.path.join(outpath, mibigout)
# print(mibigout)
blastout = open(mibigout, 'w')
cpus = int(cpu_count() / 2)
print('Blasting OFU%s against database using %s cpus\n' % (ofu_n, cpus))
mibig_db = str(os.path.relpath(args.mibig))
# print(mibig_db)
ofu_query = str(os.path.join(outpath, ofu_aaseqfile))
# print(ofu_query)
blastresult = run_blastp(ofu_query, mibigout, mibig_db, cpus)
blastout.write(blastresult)
blastout.close()
if args.dna_fasta:
ofu_dnaseqfile = ''.join(['ofu', ofu_n, '_id', cut_h, '_dnasequences.fna'])
dna_outf = open(os.path.join(outpath, ofu_dnaseqfile), 'w')
for bgc in bgcs:
with open(args.dna_fasta, 'r') as inf_d:
dna_outf = compile_ofu_dnasequences(inf_d, bgc, dna_outf)
dna_outf.close()
print('Sequence files written for %d OFUs.\n' % i)
else:
pass
# If using to look up OFUs from the RefSeq IDs, runs this next bit
if args.name:
orgs = args.name
height = args.height
ofu_dict = list_organism_ofus(orgs, nt_cat, hclus, height, outpath)
if args.homologs:
for key, value in ofu_dict.items():
ofus = ','.join(value)
cut_h = str(args.height)
homolog_dd = list_organisms(ofus, hclus, nt_cat, typetable, outpath, cut_h)
del homolog_dd
sys.exit()
if __name__ == '__main__':
main()
| mit |
lawlietfans/tushare | test/storing_test.py | 40 | 1729 | # -*- coding:utf-8 -*-
import os
from sqlalchemy import create_engine
from pandas.io.pytables import HDFStore
import tushare as ts
def csv():
df = ts.get_hist_data('000875')
df.to_csv('c:/day/000875.csv',columns=['open','high','low','close'])
def xls():
df = ts.get_hist_data('000875')
#直接保存
df.to_excel('c:/day/000875.xlsx', startrow=2,startcol=5)
def hdf():
df = ts.get_hist_data('000875')
# df.to_hdf('c:/day/store.h5','table')
store = HDFStore('c:/day/store.h5')
store['000875'] = df
store.close()
def json():
df = ts.get_hist_data('000875')
df.to_json('c:/day/000875.json',orient='records')
#或者直接使用
print(df.to_json(orient='records'))
def appends():
filename = 'c:/day/bigfile.csv'
for code in ['000875', '600848', '000981']:
df = ts.get_hist_data(code)
if os.path.exists(filename):
df.to_csv(filename, mode='a', header=None)
else:
df.to_csv(filename)
def db():
df = ts.get_tick_data('600848',date='2014-12-22')
engine = create_engine('mysql://root:[email protected]/mystock?charset=utf8')
# db = MySQLdb.connect(host='127.0.0.1',user='root',passwd='jimmy1',db="mystock",charset="utf8")
# df.to_sql('TICK_DATA',con=db,flavor='mysql')
# db.close()
df.to_sql('tick_data',engine,if_exists='append')
def nosql():
import pymongo
import json
conn = pymongo.Connection('127.0.0.1', port=27017)
df = ts.get_tick_data('600848',date='2014-12-22')
print(df.to_json(orient='records'))
conn.db.tickdata.insert(json.loads(df.to_json(orient='records')))
# print conn.db.tickdata.find()
if __name__ == '__main__':
nosql() | bsd-3-clause |
munhyunsu/Hobby | 2018F_SCSCAlgorithm/week5/draw_graph_practice.py | 1 | 1656 | import time
import matplotlib.pyplot as plt
def func_logn(num_loop):
while num_loop > 0:
num_loop = num_loop // 2
def func_n(num_loop):
for index in range(0, num_loop):
pass
def func_n2(num_loop):
for index in range(0, num_loop):
for index2 in range(0, num_loop):
pass
def main():
x_data = range(0, 101, 1) # X 축
y_data1 = list() # Y축1
y_data2 = list() # Y축2
y_data3 = list() # Y축3
for x in x_data: # X축 만큼 순환
# Y1
start_time = time.time()
func_logn(x)
end_time = time.time()
execution_time = end_time - start_time
y_data1.append(execution_time)
# Y2
start_time = time.time()
func_n(x)
end_time = time.time()
execution_time = end_time - start_time
y_data2.append(execution_time)
# Y3
start_time = time.time()
func_n2(x)
end_time = time.time()
execution_time = end_time - start_time
y_data3.append(execution_time)
# 차트 그리기
plt.plot(x_data, y_data1, 'r.-', label='Log N')
plt.plot(x_data, y_data2, 'g.-', label='N')
plt.plot(x_data, y_data3, 'b.-', label='N**2')
# 범례 추가
plt.legend(bbox_to_anchor=(0.05, 0.95), loc=2, borderaxespad=0.0)
plt.title('Time Complexity') # 제목
plt.ylabel('Execution time (sec)') # Y축
plt.xlabel('N') # X축
# 차트 저장 및 표시
plt.savefig('time_complexity.png', bbox_inches='tight') # 차트 저장
# plt.show() # 차트 표시
plt.close() # 차트 닫기
if __name__ == '__main__':
main()
| gpl-3.0 |
MichielCottaar/pymc3 | pymc3/backends/text.py | 8 | 5647 | """Text file trace backend
Store sampling values as CSV files.
File format
-----------
Sampling values for each chain are saved in a separate file (under a
directory specified by the `name` argument). The rows correspond to
sampling iterations. The column names consist of variable names and
index labels. For example, the heading
x,y__0_0,y__0_1,y__1_0,y__1_1,y__2_0,y__2_1
represents two variables, x and y, where x is a scalar and y has a
shape of (3, 2).
"""
from glob import glob
import numpy as np
import os
import pandas as pd
from ..backends import base, ndarray
from . import tracetab as ttab
class Text(base.BaseTrace):
"""Text trace object
Parameters
----------
name : str
Name of directory to store text files
model : Model
If None, the model is taken from the `with` context.
vars : list of variables
Sampling values will be stored for these variables. If None,
`model.unobserved_RVs` is used.
"""
def __init__(self, name, model=None, vars=None):
if not os.path.exists(name):
os.mkdir(name)
super(Text, self).__init__(name, model, vars)
self.flat_names = {v: ttab.create_flat_names(v, shape)
for v, shape in self.var_shapes.items()}
self.filename = None
self._fh = None
self.df = None
## Sampling methods
def setup(self, draws, chain):
"""Perform chain-specific setup.
Parameters
----------
draws : int
Expected number of draws
chain : int
Chain number
"""
self.chain = chain
self.filename = os.path.join(self.name, 'chain-{}.csv'.format(chain))
cnames = [fv for v in self.varnames for fv in self.flat_names[v]]
if os.path.exists(self.filename):
with open(self.filename) as fh:
prev_cnames = next(fh).strip().split(',')
if prev_cnames != cnames:
raise base.BackendError(
"Previous file '{}' has different variables names "
"than current model.".format(self.filename))
self._fh = open(self.filename, 'a')
else:
self._fh = open(self.filename, 'w')
self._fh.write(','.join(cnames) + '\n')
def record(self, point):
"""Record results of a sampling iteration.
Parameters
----------
point : dict
Values mapped to variable names
"""
vals = {}
for varname, value in zip(self.varnames, self.fn(point)):
vals[varname] = value.ravel()
columns = [str(val) for var in self.varnames for val in vals[var]]
self._fh.write(','.join(columns) + '\n')
def close(self):
self._fh.close()
self._fh = None # Avoid serialization issue.
## Selection methods
def _load_df(self):
if self.df is None:
self.df = pd.read_csv(self.filename)
def __len__(self):
if self.filename is None:
return 0
self._load_df()
return self.df.shape[0]
def get_values(self, varname, burn=0, thin=1):
"""Get values from trace.
Parameters
----------
varname : str
burn : int
thin : int
Returns
-------
A NumPy array
"""
self._load_df()
var_df = self.df[self.flat_names[varname]]
shape = (self.df.shape[0],) + self.var_shapes[varname]
vals = var_df.values.ravel().reshape(shape)
return vals[burn::thin]
def _slice(self, idx):
if idx.stop is not None:
raise ValueError('Stop value in slice not supported.')
return ndarray._slice_as_ndarray(self, idx)
def point(self, idx):
"""Return dictionary of point values at `idx` for current chain
with variables names as keys.
"""
idx = int(idx)
self._load_df()
pt = {}
for varname in self.varnames:
vals = self.df[self.flat_names[varname]].iloc[idx]
pt[varname] = vals.reshape(self.var_shapes[varname])
return pt
def load(name, model=None):
"""Load Text database.
Parameters
----------
name : str
Name of directory with files (one per chain)
model : Model
If None, the model is taken from the `with` context.
Returns
-------
A MultiTrace instance
"""
files = glob(os.path.join(name, 'chain-*.csv'))
straces = []
for f in files:
chain = int(os.path.splitext(f)[0].rsplit('-', 1)[1])
strace = Text(name, model=model)
strace.chain = chain
strace.filename = f
straces.append(strace)
return base.MultiTrace(straces)
def dump(name, trace, chains=None):
"""Store values from NDArray trace as CSV files.
Parameters
----------
name : str
Name of directory to store CSV files in
trace : MultiTrace of NDArray traces
Result of MCMC run with default NDArray backend
chains : list
Chains to dump. If None, all chains are dumped.
"""
if not os.path.exists(name):
os.mkdir(name)
if chains is None:
chains = trace.chains
var_shapes = trace._straces[chains[0]].var_shapes
flat_names = {v: ttab.create_flat_names(v, shape)
for v, shape in var_shapes.items()}
for chain in chains:
filename = os.path.join(name, 'chain-{}.csv'.format(chain))
df = ttab.trace_to_dataframe(trace, chains=chain, flat_names=flat_names)
df.to_csv(filename, index=False)
| apache-2.0 |
MartinSavc/scikit-learn | sklearn/cluster/bicluster.py | 211 | 19443 | """Spectral biclustering algorithms.
Authors : Kemal Eren
License: BSD 3 clause
"""
from abc import ABCMeta, abstractmethod
import numpy as np
from scipy.sparse import dia_matrix
from scipy.sparse import issparse
from . import KMeans, MiniBatchKMeans
from ..base import BaseEstimator, BiclusterMixin
from ..externals import six
from ..utils.arpack import eigsh, svds
from ..utils.extmath import (make_nonnegative, norm, randomized_svd,
safe_sparse_dot)
from ..utils.validation import assert_all_finite, check_array
__all__ = ['SpectralCoclustering',
'SpectralBiclustering']
def _scale_normalize(X):
"""Normalize ``X`` by scaling rows and columns independently.
Returns the normalized matrix and the row and column scaling
factors.
"""
X = make_nonnegative(X)
row_diag = np.asarray(1.0 / np.sqrt(X.sum(axis=1))).squeeze()
col_diag = np.asarray(1.0 / np.sqrt(X.sum(axis=0))).squeeze()
row_diag = np.where(np.isnan(row_diag), 0, row_diag)
col_diag = np.where(np.isnan(col_diag), 0, col_diag)
if issparse(X):
n_rows, n_cols = X.shape
r = dia_matrix((row_diag, [0]), shape=(n_rows, n_rows))
c = dia_matrix((col_diag, [0]), shape=(n_cols, n_cols))
an = r * X * c
else:
an = row_diag[:, np.newaxis] * X * col_diag
return an, row_diag, col_diag
def _bistochastic_normalize(X, max_iter=1000, tol=1e-5):
"""Normalize rows and columns of ``X`` simultaneously so that all
rows sum to one constant and all columns sum to a different
constant.
"""
# According to paper, this can also be done more efficiently with
# deviation reduction and balancing algorithms.
X = make_nonnegative(X)
X_scaled = X
dist = None
for _ in range(max_iter):
X_new, _, _ = _scale_normalize(X_scaled)
if issparse(X):
dist = norm(X_scaled.data - X.data)
else:
dist = norm(X_scaled - X_new)
X_scaled = X_new
if dist is not None and dist < tol:
break
return X_scaled
def _log_normalize(X):
"""Normalize ``X`` according to Kluger's log-interactions scheme."""
X = make_nonnegative(X, min_value=1)
if issparse(X):
raise ValueError("Cannot compute log of a sparse matrix,"
" because log(x) diverges to -infinity as x"
" goes to 0.")
L = np.log(X)
row_avg = L.mean(axis=1)[:, np.newaxis]
col_avg = L.mean(axis=0)
avg = L.mean()
return L - row_avg - col_avg + avg
class BaseSpectral(six.with_metaclass(ABCMeta, BaseEstimator,
BiclusterMixin)):
"""Base class for spectral biclustering."""
@abstractmethod
def __init__(self, n_clusters=3, svd_method="randomized",
n_svd_vecs=None, mini_batch=False, init="k-means++",
n_init=10, n_jobs=1, random_state=None):
self.n_clusters = n_clusters
self.svd_method = svd_method
self.n_svd_vecs = n_svd_vecs
self.mini_batch = mini_batch
self.init = init
self.n_init = n_init
self.n_jobs = n_jobs
self.random_state = random_state
def _check_parameters(self):
legal_svd_methods = ('randomized', 'arpack')
if self.svd_method not in legal_svd_methods:
raise ValueError("Unknown SVD method: '{0}'. svd_method must be"
" one of {1}.".format(self.svd_method,
legal_svd_methods))
def fit(self, X):
"""Creates a biclustering for X.
Parameters
----------
X : array-like, shape (n_samples, n_features)
"""
X = check_array(X, accept_sparse='csr', dtype=np.float64)
self._check_parameters()
self._fit(X)
def _svd(self, array, n_components, n_discard):
"""Returns first `n_components` left and right singular
vectors u and v, discarding the first `n_discard`.
"""
if self.svd_method == 'randomized':
kwargs = {}
if self.n_svd_vecs is not None:
kwargs['n_oversamples'] = self.n_svd_vecs
u, _, vt = randomized_svd(array, n_components,
random_state=self.random_state,
**kwargs)
elif self.svd_method == 'arpack':
u, _, vt = svds(array, k=n_components, ncv=self.n_svd_vecs)
if np.any(np.isnan(vt)):
# some eigenvalues of A * A.T are negative, causing
# sqrt() to be np.nan. This causes some vectors in vt
# to be np.nan.
_, v = eigsh(safe_sparse_dot(array.T, array),
ncv=self.n_svd_vecs)
vt = v.T
if np.any(np.isnan(u)):
_, u = eigsh(safe_sparse_dot(array, array.T),
ncv=self.n_svd_vecs)
assert_all_finite(u)
assert_all_finite(vt)
u = u[:, n_discard:]
vt = vt[n_discard:]
return u, vt.T
def _k_means(self, data, n_clusters):
if self.mini_batch:
model = MiniBatchKMeans(n_clusters,
init=self.init,
n_init=self.n_init,
random_state=self.random_state)
else:
model = KMeans(n_clusters, init=self.init,
n_init=self.n_init, n_jobs=self.n_jobs,
random_state=self.random_state)
model.fit(data)
centroid = model.cluster_centers_
labels = model.labels_
return centroid, labels
class SpectralCoclustering(BaseSpectral):
"""Spectral Co-Clustering algorithm (Dhillon, 2001).
Clusters rows and columns of an array `X` to solve the relaxed
normalized cut of the bipartite graph created from `X` as follows:
the edge between row vertex `i` and column vertex `j` has weight
`X[i, j]`.
The resulting bicluster structure is block-diagonal, since each
row and each column belongs to exactly one bicluster.
Supports sparse matrices, as long as they are nonnegative.
Read more in the :ref:`User Guide <spectral_coclustering>`.
Parameters
----------
n_clusters : integer, optional, default: 3
The number of biclusters to find.
svd_method : string, optional, default: 'randomized'
Selects the algorithm for finding singular vectors. May be
'randomized' or 'arpack'. If 'randomized', use
:func:`sklearn.utils.extmath.randomized_svd`, which may be faster
for large matrices. If 'arpack', use
:func:`sklearn.utils.arpack.svds`, which is more accurate, but
possibly slower in some cases.
n_svd_vecs : int, optional, default: None
Number of vectors to use in calculating the SVD. Corresponds
to `ncv` when `svd_method=arpack` and `n_oversamples` when
`svd_method` is 'randomized`.
mini_batch : bool, optional, default: False
Whether to use mini-batch k-means, which is faster but may get
different results.
init : {'k-means++', 'random' or an ndarray}
Method for initialization of k-means algorithm; defaults to
'k-means++'.
n_init : int, optional, default: 10
Number of random initializations that are tried with the
k-means algorithm.
If mini-batch k-means is used, the best initialization is
chosen and the algorithm runs once. Otherwise, the algorithm
is run for each initialization and the best solution chosen.
n_jobs : int, optional, default: 1
The number of jobs to use for the computation. This works by breaking
down the pairwise matrix into n_jobs even slices and computing them in
parallel.
If -1 all CPUs are used. If 1 is given, no parallel computing code is
used at all, which is useful for debugging. For n_jobs below -1,
(n_cpus + 1 + n_jobs) are used. Thus for n_jobs = -2, all CPUs but one
are used.
random_state : int seed, RandomState instance, or None (default)
A pseudo random number generator used by the K-Means
initialization.
Attributes
----------
rows_ : array-like, shape (n_row_clusters, n_rows)
Results of the clustering. `rows[i, r]` is True if
cluster `i` contains row `r`. Available only after calling ``fit``.
columns_ : array-like, shape (n_column_clusters, n_columns)
Results of the clustering, like `rows`.
row_labels_ : array-like, shape (n_rows,)
The bicluster label of each row.
column_labels_ : array-like, shape (n_cols,)
The bicluster label of each column.
References
----------
* Dhillon, Inderjit S, 2001. `Co-clustering documents and words using
bipartite spectral graph partitioning
<http://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.140.3011>`__.
"""
def __init__(self, n_clusters=3, svd_method='randomized',
n_svd_vecs=None, mini_batch=False, init='k-means++',
n_init=10, n_jobs=1, random_state=None):
super(SpectralCoclustering, self).__init__(n_clusters,
svd_method,
n_svd_vecs,
mini_batch,
init,
n_init,
n_jobs,
random_state)
def _fit(self, X):
normalized_data, row_diag, col_diag = _scale_normalize(X)
n_sv = 1 + int(np.ceil(np.log2(self.n_clusters)))
u, v = self._svd(normalized_data, n_sv, n_discard=1)
z = np.vstack((row_diag[:, np.newaxis] * u,
col_diag[:, np.newaxis] * v))
_, labels = self._k_means(z, self.n_clusters)
n_rows = X.shape[0]
self.row_labels_ = labels[:n_rows]
self.column_labels_ = labels[n_rows:]
self.rows_ = np.vstack(self.row_labels_ == c
for c in range(self.n_clusters))
self.columns_ = np.vstack(self.column_labels_ == c
for c in range(self.n_clusters))
class SpectralBiclustering(BaseSpectral):
"""Spectral biclustering (Kluger, 2003).
Partitions rows and columns under the assumption that the data has
an underlying checkerboard structure. For instance, if there are
two row partitions and three column partitions, each row will
belong to three biclusters, and each column will belong to two
biclusters. The outer product of the corresponding row and column
label vectors gives this checkerboard structure.
Read more in the :ref:`User Guide <spectral_biclustering>`.
Parameters
----------
n_clusters : integer or tuple (n_row_clusters, n_column_clusters)
The number of row and column clusters in the checkerboard
structure.
method : string, optional, default: 'bistochastic'
Method of normalizing and converting singular vectors into
biclusters. May be one of 'scale', 'bistochastic', or 'log'.
The authors recommend using 'log'. If the data is sparse,
however, log normalization will not work, which is why the
default is 'bistochastic'. CAUTION: if `method='log'`, the
data must not be sparse.
n_components : integer, optional, default: 6
Number of singular vectors to check.
n_best : integer, optional, default: 3
Number of best singular vectors to which to project the data
for clustering.
svd_method : string, optional, default: 'randomized'
Selects the algorithm for finding singular vectors. May be
'randomized' or 'arpack'. If 'randomized', uses
`sklearn.utils.extmath.randomized_svd`, which may be faster
for large matrices. If 'arpack', uses
`sklearn.utils.arpack.svds`, which is more accurate, but
possibly slower in some cases.
n_svd_vecs : int, optional, default: None
Number of vectors to use in calculating the SVD. Corresponds
to `ncv` when `svd_method=arpack` and `n_oversamples` when
`svd_method` is 'randomized`.
mini_batch : bool, optional, default: False
Whether to use mini-batch k-means, which is faster but may get
different results.
init : {'k-means++', 'random' or an ndarray}
Method for initialization of k-means algorithm; defaults to
'k-means++'.
n_init : int, optional, default: 10
Number of random initializations that are tried with the
k-means algorithm.
If mini-batch k-means is used, the best initialization is
chosen and the algorithm runs once. Otherwise, the algorithm
is run for each initialization and the best solution chosen.
n_jobs : int, optional, default: 1
The number of jobs to use for the computation. This works by breaking
down the pairwise matrix into n_jobs even slices and computing them in
parallel.
If -1 all CPUs are used. If 1 is given, no parallel computing code is
used at all, which is useful for debugging. For n_jobs below -1,
(n_cpus + 1 + n_jobs) are used. Thus for n_jobs = -2, all CPUs but one
are used.
random_state : int seed, RandomState instance, or None (default)
A pseudo random number generator used by the K-Means
initialization.
Attributes
----------
rows_ : array-like, shape (n_row_clusters, n_rows)
Results of the clustering. `rows[i, r]` is True if
cluster `i` contains row `r`. Available only after calling ``fit``.
columns_ : array-like, shape (n_column_clusters, n_columns)
Results of the clustering, like `rows`.
row_labels_ : array-like, shape (n_rows,)
Row partition labels.
column_labels_ : array-like, shape (n_cols,)
Column partition labels.
References
----------
* Kluger, Yuval, et. al., 2003. `Spectral biclustering of microarray
data: coclustering genes and conditions
<http://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.135.1608>`__.
"""
def __init__(self, n_clusters=3, method='bistochastic',
n_components=6, n_best=3, svd_method='randomized',
n_svd_vecs=None, mini_batch=False, init='k-means++',
n_init=10, n_jobs=1, random_state=None):
super(SpectralBiclustering, self).__init__(n_clusters,
svd_method,
n_svd_vecs,
mini_batch,
init,
n_init,
n_jobs,
random_state)
self.method = method
self.n_components = n_components
self.n_best = n_best
def _check_parameters(self):
super(SpectralBiclustering, self)._check_parameters()
legal_methods = ('bistochastic', 'scale', 'log')
if self.method not in legal_methods:
raise ValueError("Unknown method: '{0}'. method must be"
" one of {1}.".format(self.method, legal_methods))
try:
int(self.n_clusters)
except TypeError:
try:
r, c = self.n_clusters
int(r)
int(c)
except (ValueError, TypeError):
raise ValueError("Incorrect parameter n_clusters has value:"
" {}. It should either be a single integer"
" or an iterable with two integers:"
" (n_row_clusters, n_column_clusters)")
if self.n_components < 1:
raise ValueError("Parameter n_components must be greater than 0,"
" but its value is {}".format(self.n_components))
if self.n_best < 1:
raise ValueError("Parameter n_best must be greater than 0,"
" but its value is {}".format(self.n_best))
if self.n_best > self.n_components:
raise ValueError("n_best cannot be larger than"
" n_components, but {} > {}"
"".format(self.n_best, self.n_components))
def _fit(self, X):
n_sv = self.n_components
if self.method == 'bistochastic':
normalized_data = _bistochastic_normalize(X)
n_sv += 1
elif self.method == 'scale':
normalized_data, _, _ = _scale_normalize(X)
n_sv += 1
elif self.method == 'log':
normalized_data = _log_normalize(X)
n_discard = 0 if self.method == 'log' else 1
u, v = self._svd(normalized_data, n_sv, n_discard)
ut = u.T
vt = v.T
try:
n_row_clusters, n_col_clusters = self.n_clusters
except TypeError:
n_row_clusters = n_col_clusters = self.n_clusters
best_ut = self._fit_best_piecewise(ut, self.n_best,
n_row_clusters)
best_vt = self._fit_best_piecewise(vt, self.n_best,
n_col_clusters)
self.row_labels_ = self._project_and_cluster(X, best_vt.T,
n_row_clusters)
self.column_labels_ = self._project_and_cluster(X.T, best_ut.T,
n_col_clusters)
self.rows_ = np.vstack(self.row_labels_ == label
for label in range(n_row_clusters)
for _ in range(n_col_clusters))
self.columns_ = np.vstack(self.column_labels_ == label
for _ in range(n_row_clusters)
for label in range(n_col_clusters))
def _fit_best_piecewise(self, vectors, n_best, n_clusters):
"""Find the ``n_best`` vectors that are best approximated by piecewise
constant vectors.
The piecewise vectors are found by k-means; the best is chosen
according to Euclidean distance.
"""
def make_piecewise(v):
centroid, labels = self._k_means(v.reshape(-1, 1), n_clusters)
return centroid[labels].ravel()
piecewise_vectors = np.apply_along_axis(make_piecewise,
axis=1, arr=vectors)
dists = np.apply_along_axis(norm, axis=1,
arr=(vectors - piecewise_vectors))
result = vectors[np.argsort(dists)[:n_best]]
return result
def _project_and_cluster(self, data, vectors, n_clusters):
"""Project ``data`` to ``vectors`` and cluster the result."""
projected = safe_sparse_dot(data, vectors)
_, labels = self._k_means(projected, n_clusters)
return labels
| bsd-3-clause |
eickenberg/scikit-learn | examples/plot_johnson_lindenstrauss_bound.py | 13 | 7459 | """
=====================================================================
The Johnson-Lindenstrauss bound for embedding with random projections
=====================================================================
The `Johnson-Lindenstrauss lemma`_ states that any high dimensional
dataset can be randomly projected into a lower dimensional Euclidean
space while controlling the distortion in the pairwise distances.
.. _`Johnson-Lindenstrauss lemma`: http://en.wikipedia.org/wiki/Johnson%E2%80%93Lindenstrauss_lemma
Theoretical bounds
==================
The distortion introduced by a random projection `p` is asserted by
the fact that `p` is defining an eps-embedding with good probability
as defined by:
(1 - eps) ||u - v||^2 < ||p(u) - p(v)||^2 < (1 + eps) ||u - v||^2
Where u and v are any rows taken from a dataset of shape [n_samples,
n_features] and p is a projection by a random Gaussian N(0, 1) matrix
with shape [n_components, n_features] (or a sparse Achlioptas matrix).
The minimum number of components to guarantees the eps-embedding is
given by:
n_components >= 4 log(n_samples) / (eps^2 / 2 - eps^3 / 3)
The first plot shows that with an increasing number of samples ``n_samples``,
the minimal number of dimensions ``n_components`` increased logarithmically
in order to guarantee an ``eps``-embedding.
The second plot shows that an increase of the admissible
distortion ``eps`` allows to reduce drastically the minimal number of
dimensions ``n_components`` for a given number of samples ``n_samples``
Empirical validation
====================
We validate the above bounds on the the digits dataset or on the 20 newsgroups
text document (TF-IDF word frequencies) dataset:
- for the digits dataset, some 8x8 gray level pixels data for 500
handwritten digits pictures are randomly projected to spaces for various
larger number of dimensions ``n_components``.
- for the 20 newsgroups dataset some 500 documents with 100k
features in total are projected using a sparse random matrix to smaller
euclidean spaces with various values for the target number of dimensions
``n_components``.
The default dataset is the digits dataset. To run the example on the twenty
newsgroups dataset, pass the --twenty-newsgroups command line argument to this
script.
For each value of ``n_components``, we plot:
- 2D distribution of sample pairs with pairwise distances in original
and projected spaces as x and y axis respectively.
- 1D histogram of the ratio of those distances (projected / original).
We can see that for low values of ``n_components`` the distribution is wide
with many distorted pairs and a skewed distribution (due to the hard
limit of zero ratio on the left as distances are always positives)
while for larger values of n_components the distortion is controlled
and the distances are well preserved by the random projection.
Remarks
=======
According to the JL lemma, projecting 500 samples without too much distortion
will require at least several thousands dimensions, irrespective of the
number of features of the original dataset.
Hence using random projections on the digits dataset which only has 64 features
in the input space does not make sense: it does not allow for dimensionality
reduction in this case.
On the twenty newsgroups on the other hand the dimensionality can be decreased
from 56436 down to 10000 while reasonably preserving pairwise distances.
"""
print(__doc__)
import sys
from time import time
import numpy as np
import matplotlib.pyplot as plt
from sklearn.random_projection import johnson_lindenstrauss_min_dim
from sklearn.random_projection import SparseRandomProjection
from sklearn.datasets import fetch_20newsgroups_vectorized
from sklearn.datasets import load_digits
from sklearn.metrics.pairwise import euclidean_distances
# Part 1: plot the theoretical dependency between n_components_min and
# n_samples
# range of admissible distortions
eps_range = np.linspace(0.1, 0.99, 5)
colors = plt.cm.Blues(np.linspace(0.3, 1.0, len(eps_range)))
# range of number of samples (observation) to embed
n_samples_range = np.logspace(1, 9, 9)
plt.figure()
for eps, color in zip(eps_range, colors):
min_n_components = johnson_lindenstrauss_min_dim(n_samples_range, eps=eps)
plt.loglog(n_samples_range, min_n_components, color=color)
plt.legend(["eps = %0.1f" % eps for eps in eps_range], loc="lower right")
plt.xlabel("Number of observations to eps-embed")
plt.ylabel("Minimum number of dimensions")
plt.title("Johnson-Lindenstrauss bounds:\nn_samples vs n_components")
plt.show()
# range of admissible distortions
eps_range = np.linspace(0.01, 0.99, 100)
# range of number of samples (observation) to embed
n_samples_range = np.logspace(2, 6, 5)
colors = plt.cm.Blues(np.linspace(0.3, 1.0, len(n_samples_range)))
plt.figure()
for n_samples, color in zip(n_samples_range, colors):
min_n_components = johnson_lindenstrauss_min_dim(n_samples, eps=eps_range)
plt.semilogy(eps_range, min_n_components, color=color)
plt.legend(["n_samples = %d" % n for n in n_samples_range], loc="upper right")
plt.xlabel("Distortion eps")
plt.ylabel("Minimum number of dimensions")
plt.title("Johnson-Lindenstrauss bounds:\nn_components vs eps")
plt.show()
# Part 2: perform sparse random projection of some digits images which are
# quite low dimensional and dense or documents of the 20 newsgroups dataset
# which is both high dimensional and sparse
if '--twenty-newsgroups' in sys.argv:
# Need an internet connection hence not enabled by default
data = fetch_20newsgroups_vectorized().data[:500]
else:
data = load_digits().data[:500]
n_samples, n_features = data.shape
print("Embedding %d samples with dim %d using various random projections"
% (n_samples, n_features))
n_components_range = np.array([300, 1000, 10000])
dists = euclidean_distances(data, squared=True).ravel()
# select only non-identical samples pairs
nonzero = dists != 0
dists = dists[nonzero]
for n_components in n_components_range:
t0 = time()
rp = SparseRandomProjection(n_components=n_components)
projected_data = rp.fit_transform(data)
print("Projected %d samples from %d to %d in %0.3fs"
% (n_samples, n_features, n_components, time() - t0))
if hasattr(rp, 'components_'):
n_bytes = rp.components_.data.nbytes
n_bytes += rp.components_.indices.nbytes
print("Random matrix with size: %0.3fMB" % (n_bytes / 1e6))
projected_dists = euclidean_distances(
projected_data, squared=True).ravel()[nonzero]
plt.figure()
plt.hexbin(dists, projected_dists, gridsize=100)
plt.xlabel("Pairwise squared distances in original space")
plt.ylabel("Pairwise squared distances in projected space")
plt.title("Pairwise distances distribution for n_components=%d" %
n_components)
cb = plt.colorbar()
cb.set_label('Sample pairs counts')
rates = projected_dists / dists
print("Mean distances rate: %0.2f (%0.2f)"
% (np.mean(rates), np.std(rates)))
plt.figure()
plt.hist(rates, bins=50, normed=True, range=(0., 2.))
plt.xlabel("Squared distances rate: projected / original")
plt.ylabel("Distribution of samples pairs")
plt.title("Histogram of pairwise distance rates for n_components=%d" %
n_components)
plt.show()
# TODO: compute the expected value of eps and add them to the previous plot
# as vertical lines / region
| bsd-3-clause |
fedebell/Laboratorio3 | Fotoelettrico/subplotparabolico.py | 1 | 4916 | #import uncertainties
#from uncertainties import ufloat
import math
import numpy
import numpy
import pylab
from scipy.optimize import curve_fit
import math
import scipy.stats
import uncertainties
from uncertainties import ufloat, unumpy
import matplotlib.pyplot as plt
def ff(x, a, b):
return (a*x+b)**2
bucket = numpy.linspace(0,2700, 1000)
#ARANCIO
f, V, dV, I, dI = pylab.loadtxt("C:\\Users\\marco\\Desktop\\Laboratorio3\\Fotoelettrico\\datiAranciocut.txt", unpack = True)
#stimo a occhio la corrente anodica dal grafico
I_a = -1.8
#faccio il fit (parametri in V*s ma verosimil, mente ho le frequenze in 10**13 Hz...)
popt = (5, 20)
pars, cov = curve_fit(ff, V, I-I_a, popt, dI, absolute_sigma = "true")
#plot
x1 = plt.subplot(321)
pylab.xlabel("V (mV)", size = "14")
pylab.ylabel("I (nA)", size = "14")
pylab.xlim(0, 1000)
pylab.ylim(min(I-10),max(I)+10)
pylab.grid(color = "gray")
pylab.errorbar(V, I, dI, dV, color = 'blue', marker = 'o', linestyle = '', label = "Arancio")
pylab.legend(loc = "upper-right")
pylab.plot(bucket, ff(bucket, pars[0], pars[1])+I_a, color = "red")
#GIALLO
f, V, dV, I, dI = pylab.loadtxt("C:\\Users\\marco\\Desktop\\Laboratorio3\\Fotoelettrico\\datiGiallicut.txt", unpack = True)
#stimo a occhio la corrente anodica dal grafico
I_a = -2.27
#faccio il fit (parametri in V*s ma verosimil, mente ho le frequenze in 10**13 Hz...)
popt = (5, 20)
pars, cov = curve_fit(ff, V, I-I_a, popt, dI, absolute_sigma = "true")
#plot
x2 = plt.subplot(322)
pylab.xlabel("V (mV)", size = "14")
pylab.ylabel("I (nA)", size = "14")
pylab.xlim(0, 1000)
pylab.ylim(min(I-10),max(I)+10)
pylab.grid(color = "gray")
pylab.errorbar(V, I, dI, dV, color = 'blue', marker = 'o', linestyle = '', label = "Giallo")
pylab.legend(loc = "upper-right")
pylab.plot(bucket, ff(bucket, pars[0], pars[1])+I_a, color = "red")
#VERDE
f, V, dV, I, dI = pylab.loadtxt("C:\\Users\\marco\\Desktop\\Laboratorio3\\Fotoelettrico\\datiVerdicut.txt", unpack = True)
#stimo a occhio la corrente anodica dal grafico
I_a = -2.11
#faccio il fit (parametri in V*s ma verosimil, mente ho le frequenze in 10**13 Hz...)
popt = (5, 20)
pars, cov = curve_fit(ff, V, I-I_a, popt, dI, absolute_sigma = "true")
#plot
x3 = plt.subplot(323)
pylab.xlabel("V (mV)", size = "14")
pylab.ylabel("I (nA)", size = "14")
pylab.xlim(0, 1000)
pylab.ylim(min(I-10),max(I)+10)
pylab.grid(color = "gray")
pylab.errorbar(V, I, dI, dV, color = 'blue', marker = 'o', linestyle = '', label = "Verde")
pylab.legend(loc = "upper-right")
pylab.plot(bucket, ff(bucket, pars[0], pars[1])+I_a, color = "red")
#VERDE-AZZURRO
f, V, dV, I, dI = pylab.loadtxt("C:\\Users\\marco\\Desktop\\Laboratorio3\\Fotoelettrico\\datiVerdeAzzurrocut.txt", unpack = True)
#stimo a occhio la corrente anodica dal grafico
I_a = -1.488
#faccio il fit (parametri in V*s ma verosimil, mente ho le frequenze in 10**13 Hz...)
popt = (5, 20)
pars, cov = curve_fit(ff, V, I-I_a, popt, dI, absolute_sigma = "true")
#plot
x4 = plt.subplot(324)
pylab.xlabel("V (mV)", size = "14")
pylab.ylabel("I (nA)", size = "14")
pylab.xlim(0, 1000)
pylab.ylim(min(I-10),max(I)+10)
pylab.grid(color = "gray")
pylab.errorbar(V, I, dI, dV, color = 'blue', marker = 'o', linestyle = '', label = "Verde-Azzurro")
pylab.plot(bucket, ff(bucket, pars[0], pars[1])+I_a, color = "red")
pylab.legend(loc = "upper-right")
#AZZURRO
f, V, dV, I, dI = pylab.loadtxt("C:\\Users\\marco\\Desktop\\Laboratorio3\\Fotoelettrico\\datiAzzurrocut.txt", unpack = True)
#stimo a occhio la corrente anodica dal grafico
I_a = -2.30
#faccio il fit (parametri in V*s ma verosimil, mente ho le frequenze in 10**13 Hz...)
popt = (5, 20)
pars, cov = curve_fit(ff, V, I-I_a, popt, dI, absolute_sigma = "true")
#plot
x5 = plt.subplot(325)
pylab.xlabel("V (mV)", size = "14")
pylab.ylabel("I (nA)", size = "14")
pylab.xlim(0, 1000)
pylab.ylim(min(I-10),max(I)+10)
pylab.grid(color = "gray")
pylab.errorbar(V, I, dI, dV, color = 'blue', marker = 'o', linestyle = '', label = "Azzurro")
pylab.legend(loc = "upper-right")
pylab.plot(bucket, ff(bucket, pars[0], pars[1])+I_a, color = "red")
#BLU
f, V, dV, I, dI = pylab.loadtxt("C:\\Users\\marco\\Desktop\\Laboratorio3\\Fotoelettrico\\datiBlucut.txt", unpack = True)
#stimo a occhio la corrente anodica dal grafico
I_a = -0.265
#faccio il fit (parametri in V*s ma verosimil, mente ho le frequenze in 10**13 Hz...)
popt = (5, 20)
pars, cov = curve_fit(ff, V, I-I_a, popt, dI, absolute_sigma = "true")
#plot
x6 = plt.subplot(326)
pylab.xlabel("V (mV)", size = "14")
pylab.ylabel("I (nA)", size = "14")
pylab.xlim(0, 1000)
pylab.ylim(min(I-10),max(I)+10)
pylab.grid(color = "gray")
pylab.errorbar(V, I, dI, dV, color = 'blue', marker = 'o', linestyle = '', label = "Blu")
pylab.legend(loc = "upper-right")
pylab.plot(bucket, ff(bucket, pars[0], pars[1])+I_a, color = "red")
plt.suptitle('Corrente vs Potenziale di frenamento: fit parabolico', size = 18)
pylab.show() | gpl-3.0 |
ipashchenko/ml4vs | ml4vs/ensemble.py | 1 | 11378 | # -*- coding: utf-8 -*-
import sys
sys.setrecursionlimit(10000)
import pandas as pd
import numpy as np
from sklearn.calibration import CalibratedClassifierCV
from sklearn.metrics import roc_auc_score, f1_score, precision_score, recall_score
import xgboost as xgb
from sklearn.ensemble import VotingClassifier
from sklearn.svm import SVC
from keras.models import Sequential
from keras.layers import Dense
from keras.layers import Dropout
from keras.constraints import maxnorm
from keras.optimizers import SGD
from keras.wrappers.scikit_learn import KerasClassifier
from sklearn.neighbors import KNeighborsClassifier
from sklearn.model_selection import StratifiedShuffleSplit
from sklearn.pipeline import Pipeline
from sklearn.preprocessing import Imputer
from sklearn.preprocessing import StandardScaler
class FixedKerasClassifier(KerasClassifier):
def predict_proba(self, X, **kwargs):
kwargs = self.filter_sk_params(Sequential.predict_proba, kwargs)
probs = self.model.predict_proba(X, **kwargs)
if(probs.shape[1] == 1):
probs = np.hstack([1-probs,probs])
return probs
def predict(self, X, **kwargs):
kwargs = self.filter_sk_params(Sequential.predict, kwargs)
y = self.model.predict(X, **kwargs)
if(y.shape[1] == 1):
y = y[:, 0]
return y
names = ['Magnitude', 'clipped_sigma', 'meaningless_1', 'meaningless_2',
'star_ID', 'weighted_sigma', 'skew', 'kurt', 'I', 'J', 'K', 'L',
'Npts', 'MAD', 'lag1', 'RoMS', 'rCh2', 'Isgn', 'Vp2p', 'Jclp', 'Lclp',
'Jtim', 'Ltim', 'CSSD', 'Ex', 'inv_eta', 'E_A', 'S_B', 'NXS', 'IQR']
names_to_delete = ['Magnitude', 'meaningless_1', 'meaningless_2', 'star_ID']
def shift_log_transform(df, name, shift):
df[name] = np.log(df[name] + shift)
def load_to_df(fnames, names, names_to_delete, target='variable'):
"""
Function that loads data from series of files where first file contains
class of zeros and other files - classes of ones.
:param fnames:
Iterable of file names.
:param names:
Names of columns in files.
:param names_to_delete:
Column names to delete.
:return:
Pandas data frame.
"""
# Load data
dfs = list()
for fn in fnames:
dfs.append(pd.read_table(fn, names=names, engine='python',
na_values='+inf', sep=r"\s*",
usecols=range(30)))
df = pd.concat(dfs)
y = np.zeros(len(df))
y[len(dfs[0]):] = np.ones(len(df) - len(dfs[0]))
df[target] = y
# Remove meaningless features
delta = min(df['CSSD'][np.isfinite(df['CSSD'].values)])
# print delta
print delta
for name in names_to_delete:
del df[name]
try:
shift_log_transform(df, 'CSSD', -delta + 0.1)
except KeyError:
pass
return df, delta
def load_data(fnames, names, names_to_delete):
"""
Function that loads data from series of files where first file contains
class of zeros and other files - classes of ones.
:param fnames:
Iterable of file names.
:param names:
Names of columns in files.
:param names_to_delete:
Column names to delete.
:return:
X, y - ``sklearn`` arrays of features & responces.
"""
# Load data
dfs = list()
for fn in fnames:
dfs.append(pd.read_table(fn, names=names, engine='python',
na_values='+inf', sep=r"\s*",
usecols=range(30)))
# Remove meaningless features
delta = list()
for df in dfs:
delta.append(df['CSSD'].min())
delta = np.min([d for d in delta if not np.isinf(d)])
print "delta = {}".format(delta)
for df in dfs:
for name in names_to_delete:
del df[name]
try:
shift_log_transform(df, 'CSSD', -delta + 0.1)
except KeyError:
pass
# List of feature names
features_names = list(dfs[0])
# Count number of NaN for each feature
for i, df in enumerate(dfs):
print("File {}".format(i))
for feature in features_names:
print("Feature {} has {} NaNs".format(feature,
df[feature].isnull().sum()))
print("=======================")
# Convert to numpy arrays
# Features
X = list()
for df in dfs:
X.append(np.array(df[list(features_names)].values, dtype=float))
X = np.vstack(X)
# Responses
y = np.zeros(len(X))
y[len(dfs[0]):] = np.ones(len(X) - len(dfs[0]))
df = pd.concat(dfs)
df['variable'] = y
return X, y, df, features_names, delta
def load_data_tgt(fname, names, names_to_delete, delta):
"""
Function that loads target data for classification.
:param fname:
Target data file.
:param names:
Names of columns in files.
:param names_to_delete:
Column names to delete.
:return:
X, ``sklearn`` array of features, list of feature names
"""
# Load data
df = pd.read_table(fname, names=names, engine='python', na_values='+inf',
sep=r"\s*", usecols=range(30))
for name in names_to_delete:
del df[name]
try:
shift_log_transform(df, 'CSSD', -delta + 0.1)
except KeyError:
pass
# List of feature names
features_names = list(df)
# Count number of NaN for each feature
for feature in features_names:
print("Feature {} has {} NaNs".format(feature,
df[feature].isnull().sum()))
print("=======================")
# Convert to numpy arrays
# Features
X = np.array(df[list(features_names)].values, dtype=float)
# Original data
df_orig = pd.read_table(fname, names=names, engine='python', na_values='+inf',
sep=r"\s*", usecols=range(30))
return X, features_names, df, df_orig
import os
# load data
# Load data
data_dir = '/home/ilya/code/ml4vs/data/dataset_OGLE/indexes_normalized'
file_1 = 'vast_lightcurve_statistics_normalized_variables_only.log'
file_0 = 'vast_lightcurve_statistics_normalized_constant_only.log'
file_0 = os.path.join(data_dir, file_0)
file_1 = os.path.join(data_dir, file_1)
names = ['Magnitude', 'clipped_sigma', 'meaningless_1', 'meaningless_2',
'star_ID', 'weighted_sigma', 'skew', 'kurt', 'I', 'J', 'K', 'L',
'Npts', 'MAD', 'lag1', 'RoMS', 'rCh2', 'Isgn', 'Vp2p', 'Jclp', 'Lclp',
'Jtim', 'Ltim', 'CSSD', 'Ex', 'inv_eta', 'E_A', 'S_B', 'NXS', 'IQR']
names_to_delete = ['Magnitude', 'meaningless_1', 'meaningless_2', 'star_ID',
'Npts', 'CSSD']
X, y, df, features_names, delta = load_data([file_0, file_1], names, names_to_delete)
target = 'variable'
predictors = list(df)
predictors.remove(target)
dtrain = df
sss = StratifiedShuffleSplit(n_splits=1, test_size=0.25, random_state=7)
for train_indx, test_indx in sss.split(dtrain[predictors].index, dtrain['variable']):
print train_indx, test_indx
train = dtrain.iloc[train_indx]
valid = dtrain.iloc[test_indx]
clf_gb = xgb.XGBClassifier(n_estimators=100, learning_rate=0.1,
max_depth=9, gamma=0.74, colsample_bylevel=0.72,
colsample_bytree=0.58,
min_child_weight=1,
subsample=0.8)
clf_knn = KNeighborsClassifier(n_neighbors=362, weights='distance', leaf_size=22,
n_jobs=3)
clf_svm = SVC(C=16.5036, class_weight='balanced', probability=True,
gamma=0.09138)
def create_baseline():
# create model
model = Sequential()
model.add(Dense(24, input_dim=24, init='normal', activation='relu',
W_constraint=maxnorm(3)))
model.add(Dropout(0.1))
model.add(Dense(24, init='normal', activation='relu',
W_constraint=maxnorm(3)))
model.add(Dropout(0.1))
model.add(Dense(12, init='normal', activation='relu'))
model.add(Dropout(0.1))
model.add(Dense(1, init='normal', activation='sigmoid'))
# Compile model
learning_rate = 0.1
decay_rate = learning_rate / epochs
momentum = 0.90
sgd = SGD(lr=learning_rate, decay=decay_rate, momentum=momentum,
nesterov=False)
model.compile(loss='binary_crossentropy', optimizer=sgd,
metrics=['accuracy'])
return model
epochs = 50
# epochs = 125
batch_size = 12
clf_mlp = FixedKerasClassifier(build_fn=create_baseline, nb_epoch=epochs,
batch_size=batch_size, verbose=0)
# calibrated_clf_gb = CalibratedClassifierCV(clf_gb, method='sigmoid', cv=3)
# calibrated_clf_knn = CalibratedClassifierCV(clf_knn, method='sigmoid', cv=3)
# calibrated_clf_mlp = CalibratedClassifierCV(clf_mlp, method='sigmoid', cv=3)
# calibrated_clf_svm = CalibratedClassifierCV(clf_svm, method='sigmoid', cv=3)
# eclf = VotingClassifier(estimators=[('gb', calibrated_clf_gb),
# ('knn', calibrated_clf_knn),
# ('nn', calibrated_clf_mlp),
# ('svm', calibrated_clf_svm)],
# voting='soft', weights=[1, 1, 1, 1], n_jobs=-1)
eclf = VotingClassifier(estimators=[('gb', clf_gb),
('knn', clf_knn),
('nn', clf_mlp),
('svm', clf_svm)],
voting='soft', weights=[2, 1, 1, 1])
estimators = list()
estimators.append(('imputer', Imputer(missing_values='NaN', strategy='median',
axis=0, verbose=2)))
estimators.append(('scaler', StandardScaler()))
estimators.append(('clf', eclf))
pipeline = Pipeline(estimators)
valid_ = valid[predictors]
train_ = train[predictors]
for name, transform in pipeline.steps[:-1]:
transform.fit(train_)
valid_ = transform.transform(valid_)
train_ = transform.transform(train_)
eclf.fit(train_, train['variable'])
# pred = eclf.predict_proba(valid_)[:, 1]
y_pred = eclf.predict(valid_)
# auc = roc_auc_score(valid['variable'], pred)
recall = recall_score(valid['variable'], y_pred)
pre = precision_score(valid['variable'], y_pred)
print "Pr, Re: {} {}".format(pre, recall)
# print "SCORE:", auc
# Fit full training set
train_ = dtrain[predictors]
for name, transform in pipeline.steps[:-1]:
transform.fit(train_)
train_ = transform.transform(train_)
eclf.fit(train_, dtrain['variable'])
# Load blind test data
file_tgt = 'LMC_SC19_PSF_Pgood98__vast_lightcurve_statistics_normalized.log'
file_tgt = os.path.join(data_dir, file_tgt)
X, feature_names, df, df_orig = load_data_tgt(file_tgt, names, names_to_delete,
delta)
# Use fitted transformation steps
for name, transform in pipeline.steps[:-1]:
print name, transform
X = transform.transform(X)
y_pred = eclf.predict(X)
# y_probs = eclf.predict_proba(X)
idx = y_pred == 1.
# idx = y_probs[:, 1] > 0.5
# idx_ = y_probs[:, 1] < 0.5
# ens_no = list(df_orig['star_ID'][idx_])
print("Found {} variables".format(np.count_nonzero(idx)))
# with open('ens_results.txt', 'w') as fo:
# for line in list(df_orig['star_ID'][idx]):
# fo.write(line + '\n')
| mit |
yangliuy/yangliuy.github.io | markdown_generator/publications.py | 1 | 4690 |
# coding: utf-8
# # Publications markdown generator for academicpages
#
# Takes a TSV of publications with metadata and converts them for use with [academicpages.github.io](academicpages.github.io). This is an interactive Jupyter notebook, with the core python code in publications.py. Run either from the `markdown_generator` folder after replacing `publications.tsv` with one that fits your format.
#
# TODO: Make this work with BibTex and other databases of citations, rather than Stuart's non-standard TSV format and citation style.
#
# ## Data format
#
# The TSV needs to have the following columns: pub_date, title, venue, excerpt, citation, site_url, and paper_url, with a header at the top.
#
# - `excerpt` and `paper_url` can be blank, but the others must have values.
# - `pub_date` must be formatted as YYYY-MM-DD. (Update in 0421, add a function to transfer pub_date like 8/7/2017 to 2017-08-07)
# - `url_slug` will be the descriptive part of the .md file and the permalink URL for the page about the paper. The .md file will be `YYYY-MM-DD-[url_slug].md` and the permalink will be `https://[yourdomain]/publications/YYYY-MM-DD-[url_slug]`
# ## Import pandas
#
# We are using the very handy pandas library for dataframes.
# In[2]:
import pandas as pd
# ## Import TSV
#
# Pandas makes this easy with the read_csv function. We are using a TSV, so we specify the separator as a tab, or `\t`.
#
# I found it important to put this data in a tab-separated values format, because there are a lot of commas in this kind of data and comma-separated values can get messed up. However, you can modify the import statement, as pandas also has read_excel(), read_json(), and others.
# In[3]:
publications = pd.read_csv("publications.tsv", sep="\t", header=0)
publications
# ## Escape special characters
#
# YAML is very picky about how it takes a valid string, so we are replacing single and double quotes (and ampersands) with their HTML encoded equivilents. This makes them look not so readable in raw format, but they are parsed and rendered nicely.
# In[4]:
html_escape_table = {
"&": "&",
'"': """,
"'": "'"
}
def html_escape(text):
"""Produce entities within text."""
return "".join(html_escape_table.get(c,c) for c in text)
def add_pre_zero(month):
month = int(month) # 1-12 or 1-31
if month < 10:
return '0' + str(month)
else:
return str(month)
#transfer pub_date like 8/7/2017 to 2017-08-07
def transfer_pub_date(ori_pub_date):
tokens = ori_pub_date.split('/')
year = tokens[2]
month = add_pre_zero(tokens[0])
day = add_pre_zero(tokens[1])
new_date = '20' + year + '-' + month + '-' + day
print 'test new_date:', new_date
return new_date
# ## Creating the markdown files
#
# This is where the heavy lifting is done. This loops through all the rows in the TSV dataframe, then starts to concatentate a big string (```md```) that contains the markdown for each type. It does the YAML metadata first, then does the description for the individual page. If you don't want something to appear (like the "Recommended citation")
# In[5]:
import os
for row, item in publications.iterrows():
print 'item: ', item
# update in 0421, transfer pub_date like 8/7/2017 to 2017-08-07
new_pub_date = transfer_pub_date(str(item.pub_date))
md_filename = new_pub_date + "-" + item.url_slug + ".md"
html_filename = new_pub_date + "-" + item.url_slug
#year = item.pub_date[:4]
## YAML variables
md = "---\ntitle: \"" + item.title + '"\n'
md += """collection: publications"""
md += """\npermalink: /publication/""" + html_filename
if len(str(item.excerpt)) > 5:
md += "\nexcerpt: '" + html_escape(item.excerpt) + "'"
md += "\ndate: " + str(new_pub_date)
md += "\nvenue: '" + html_escape(item.venue) + "'"
# if len(str(item.paper_url)) > 5:
# md += "\npaperurl: '" + item.paper_url + "'"
md += "\ncitation: '" + html_escape(item.citation) + "'"
md += "\n---"
## Markdown description for individual page
if len(str(item.paper_url)) > 5:
md += "\n\n<a href='" + item.paper_url + "'>Download</a>\n"
# if len(str(item.excerpt)) > 5:
# md += "\n" + html_escape(item.excerpt) + "\n"
# md += "\nCitation: " + item.citation
md_filename = os.path.basename(md_filename)
with open("../_publications/" + md_filename, 'w') as f:
f.write(md)
| mit |
krivenko/som | benchmark/chi/plot.py | 1 | 6392 | from pytriqs.archive import *
from pytriqs.gf.local import *
from pytriqs.plot.mpl_interface import plt, oplot
from matplotlib.backends.backend_pdf import PdfPages
import numpy as np
from itertools import product
from params import *
arch = HDFArchive(chi_filename,'r')
ed_arch = HDFArchive(chi_ed_filename,'r')
pp = PdfPages('chi.pdf')
spin_names = ('up','dn')
spin_labels = {'up':'\uparrow\uparrow', 'dn':'\downarrow\downarrow'}
# G_tau
delta_max_text = ""
for sn in spin_names:
g = arch['G_tau'][sn]
g_ed = ed_arch['G_tau'][sn]
oplot(g, mode='R', lw=0.5, label="QMC, $%s$" % spin_labels[sn])
oplot(g_ed, mode='R', lw=0.5, label="ED, $%s$" % spin_labels[sn])
delta_max = np.max(np.abs(g.data[:,0,0] - g_ed.data[:,0,0]))
delta_max_text += "$\\delta^{max}_{%s} = %f$\n" % (spin_labels[sn],delta_max)
ax = plt.gca()
ax.set_title('$G(\\tau)$')
ax.set_ylabel('$G(\\tau)$')
ax.set_xlim((0,beta))
ax.set_ylim((-1,0.05))
ax.legend(loc='lower center',prop={'size':10})
ax.text(beta/2,-0.5,delta_max_text,horizontalalignment='center')
pp.savefig(plt.gcf())
# G_iw
plt.cla()
delta_max_text = ""
for sn in spin_names:
g = arch['G_iw'][sn]
g_ed = ed_arch['G_iw'][sn]
oplot(g, mode='I', lw=0.5, label="QMC, $%s$" % spin_labels[sn])
oplot(g_ed, mode='I', lw=0.5, label="ED, $%s$" % spin_labels[sn])
delta_max = np.max(np.abs(g.data[:,0,0] - g_ed.data[:,0,0]))
delta_max_text += "$\\delta^{max}_{%s} = %f$\n" % (spin_labels[sn],delta_max)
ax = plt.gca()
ax.set_title('$G(i\\omega)$')
ax.set_ylabel('$G(i\\omega)$')
ax.set_xlim((0,5.0))
ax.legend(loc='upper center',prop={'size':10})
ax.text(2.5,0.6,delta_max_text,horizontalalignment='center')
pp.savefig(plt.gcf())
# G_l
plt.cla()
delta_max_text = ""
for sn in spin_names:
g = arch['G_l'][sn]
g_ed = ed_arch['G_l'][sn]
oplot(g, mode='R', lw=0.5, label="QMC, $%s$" % spin_labels[sn])
oplot(g_ed, mode='R', lw=0.5, label="ED, $%s$" % spin_labels[sn])
delta_max = np.max(np.abs(g.data[:,0,0] - g_ed.data[:,0,0]))
delta_max_text += "$\\delta^{max}_{%s} = %f$\n" % (spin_labels[sn],delta_max)
ax = plt.gca()
ax.set_title('$G(\\ell)$')
ax.set_ylabel('$G(\\ell)$')
ax.set_xlim((0,n_l-1))
ax.legend(loc='lower center',prop={'size':10})
ax.text(n_l/2,-2,delta_max_text,horizontalalignment='center')
pp.savefig(plt.gcf())
# chi_tau (diagonal)
plt.cla()
delta_max_text = ""
for s in (0,1):
chi = arch['chi_tau'][s,s]
chi_ed = ed_arch['chi_tau'][s,s]
oplot(chi, mode='R', lw=0.5, label="QMC, %i%i" % (s,s))
oplot(chi_ed, mode='R', lw=0.5, label="ED, %i%i" % (s,s))
delta_max = np.max(np.abs(chi.data[:,0,0] - chi_ed.data[:,0,0]))
delta_max_text += "$\\delta^{max}_{%i%i} = %f$\n" % (s,s,delta_max)
ax = plt.gca()
ax.set_title('$\\chi(\\tau)$ (diagonal)')
ax.set_ylabel('$\\chi(\\tau)$')
ax.set_xlim((0,beta))
ax.set_ylim((0,1.0))
ax.legend(loc='upper center',prop={'size':10})
ax.text(beta/2,0.6,delta_max_text,horizontalalignment='center')
pp.savefig(plt.gcf())
# chi_tau (off-diagonal)
plt.cla()
delta_max_text = ""
for s in (0,1):
chi = arch['chi_tau'][s,1-s]
chi_ed = ed_arch['chi_tau'][s,1-s]
oplot(chi, mode='R', lw=0.5, label="QMC, %i%i" % (s,1-s))
oplot(chi_ed, mode='R', lw=0.5, label="ED, %i%i" % (s,1-s))
delta_max = np.max(np.abs(chi.data[:,0,0] - chi_ed.data[:,0,0]))
delta_max_text += "$\\delta^{max}_{%i%i} = %f$\n" % (s,1-s,delta_max)
ax = plt.gca()
ax.set_title('$\\chi(\\tau)$ (off-diagonal)')
ax.set_ylabel('$\\chi(\\tau)$')
ax.set_xlim((0,beta))
ax.set_ylim((0,0.5))
ax.legend(loc='lower center',prop={'size':10})
ax.text(beta/2,0.2,delta_max_text,horizontalalignment='center')
pp.savefig(plt.gcf())
# chi_iw (diagonal)
plt.cla()
delta_max_text = ""
for s in (0,1):
chi = arch['chi_iw'][s,s]
chi_ed = ed_arch['chi_iw'][s,s]
oplot(chi, mode='R', lw=0.5, label="QMC, %i%i" % (s,s))
oplot(chi_ed, mode='R', lw=0.5, label="ED, %i%i" % (s,s))
delta_max = np.max(np.abs(chi.data[:,0,0] - chi_ed.data[:,0,0]))
delta_max_text += "$\\delta^{max}_{%i%i} = %f$\n" % (s,s,delta_max)
ax = plt.gca()
ax.set_title('$\\chi(i\\omega)$ (diagonal)')
ax.set_ylabel('$\\chi(i\\omega)$')
ax.set_xlim((0,5.0))
ax.legend(loc='upper center',prop={'size':10})
ax.text(2.5,ax.get_ylim()[1]*0.6,delta_max_text,horizontalalignment='center')
pp.savefig(plt.gcf())
# chi_iw (off-diagonal)
plt.cla()
delta_max_text = ""
for s in (0,1):
chi = arch['chi_iw'][s,1-s]
chi_ed = ed_arch['chi_iw'][s,1-s]
oplot(chi, mode='R', lw=0.5, label="QMC, %i%i" % (s,1-s))
oplot(chi_ed, mode='R', lw=0.5, label="ED, %i%i" % (s,1-s))
delta_max = np.max(np.abs(chi.data[:,0,0] - chi_ed.data[:,0,0]))
delta_max_text += "$\\delta^{max}_{%i%i} = %f$\n" % (s,1-s,delta_max)
ax = plt.gca()
ax.set_title('$\\chi(i\\omega)$ (off-diagonal)')
ax.set_ylabel('$\\chi(i\\omega)$')
ax.set_xlim((0,5.0))
ax.legend(loc='upper center',prop={'size':10})
ax.text(2.5,ax.get_ylim()[1]*0.6,delta_max_text,horizontalalignment='center')
pp.savefig(plt.gcf())
chi_l_ed = ed_arch['chi_l']
chi_l = chi_l_ed.copy()
chi_l << MatsubaraToLegendre(arch['chi_iw'])
# chi_l (diagonal)
plt.cla()
delta_max_text = ""
for s in (0,1):
oplot(chi_l[s,s], mode='R', lw=0.5, label="QMC, $%i%i$" % (s,s))
oplot(chi_l_ed[s,s], mode='R', lw=0.5, label="ED, $%i%i$" % (s,s))
delta_max = np.max(np.abs(chi_l.data[:,s,s] - chi_l_ed.data[:,s,s]))
delta_max_text += "$\\delta^{max}_{%i%i} = %f$\n" % (s,s,delta_max)
ax = plt.gca()
ax.set_title('$\\chi(\\ell)$ (diagonal)')
ax.set_ylabel('$\\chi(\\ell)$')
ax.set_xlim((0,n_l-1))
ax.legend(loc='upper center',prop={'size':10})
ax.text(n_l/2,ax.get_ylim()[1]*0.6,delta_max_text,horizontalalignment='center')
pp.savefig(plt.gcf())
# chi_l (off-diagonal)
plt.cla()
delta_max_text = ""
for s in (0,1):
oplot(chi_l[s,1-s], mode='R', lw=0.5, label="QMC, $%i%i$" % (s,1-s))
oplot(chi_l_ed[s,1-s], mode='R', lw=0.5, label="ED, $%i%i$" % (s,1-s))
delta_max = np.max(np.abs(chi_l.data[:,s,1-s] - chi_l_ed.data[:,s,1-s]))
delta_max_text += "$\\delta^{max}_{%i%i} = %f$\n" % (s,1-s,delta_max)
ax = plt.gca()
ax.set_title('$\\chi(\\ell)$ (off-diagonal)')
ax.set_ylabel('$\\chi(\\ell)$')
ax.set_xlim((0,n_l-1))
ax.legend(loc='upper center',prop={'size':10})
ax.text(n_l/2,ax.get_ylim()[1]*0.6,delta_max_text,horizontalalignment='center')
pp.savefig(plt.gcf())
pp.close()
| gpl-3.0 |
djajetic/AutoML3 | lib/engine_serial.py | 1 | 6677 | #Damir Jajetic, 2015
from sklearn.externals import joblib
from sklearn import linear_model, naive_bayes, neighbors, cross_validation, feature_selection
from sklearn import metrics, ensemble, decomposition, preprocessing, svm, manifold, mixture, neural_network
from sklearn import cross_decomposition, naive_bayes, neighbors, kernel_approximation, random_projection, isotonic
import libscores
import multiprocessing
import time
import shutil
import os
import numpy as np
import data_io
import psutil
import data_converter
import copy
from sklearn.utils import shuffle
from operator import itemgetter
from sklearn.pipeline import Pipeline
from scipy import stats
def worker (sd, srd, Lfold, Lstart, Ltime_budget):
try:
#this is raw data, this data will soon disappear,
#so this should be exec before any preprocessing
Y_train = np.copy(sd.LD.data['Y_train'])
X_train = np.copy(sd.LD.data['X_train'])
X_valid = np.copy(sd.LD.data['X_valid'])
X_test = np.copy(sd.LD.data['X_test'])
split = int(len(Y_train)*0.5)
Lnum = -1
for model in [linear_model.LogisticRegression(random_state=101),
ensemble.RandomForestClassifier(n_estimators=16, max_depth=3, random_state=102),
linear_model.LogisticRegression(random_state=103),
ensemble.GradientBoostingClassifier(n_estimators=100, max_depth=4, warm_start=False, random_state=104),
ensemble.GradientBoostingClassifier(n_estimators=100, warm_start=False, learning_rate=0.1, random_state=105),
]:
Lnum += 1
if Ltime_budget < 500 and (time.time() - Lstart) / Ltime_budget > 0.5 and Lnum > 0:
break
if (time.time() - Lstart) / Ltime_budget > 0.8 and Lnum > 0:
break
if psutil.phymem_usage()[2] > 80 and Lnum > 0:
time.sleep(4)
if psutil.phymem_usage()[2] > 90 and Lnum > 0:
destroy_this = some_way #todo
if Lfold < 2:
if Lnum < 1:
print "p1x", Lnum, X_train.shape, Y_train.shape, int(split/5)
model.fit(X_train[:10], Y_train[:10])
print "p11x", Lnum, int(split/5)
model.fit(X_train[:int(split/5)], Y_train[:int(split/5)])
print "p2x", Lnum
else:
model.fit(X_train[:split], Y_train[:split])
if psutil.phymem_usage()[2] > 80 and Lnum > 0:
time.sleep(4)
print "s1", Lnum
if psutil.phymem_usage()[2] > 90 and Lnum > 0:
destroy_this = some_way #todo
preds = model.predict_proba(X_train[split:])
exec('CVscore = libscores.'+ sd.LD.info['metric'] + '(sd.yt_raw[split:], preds)')
print "s2", Lnum, CVscore
if Lfold==2:
model2=copy.deepcopy(model)
model2.fit(X_train[split:], Y_train[split:])
preds2 = model2.predict_proba(X_train[:split])
exec('CVscore2 = libscores.'+ sd.LD.info['metric'] + '(sd.yt_raw[:split], preds2)')
CVscore = (CVscore + CVscore2)/2
del model2
try:
preds = np.vstack([preds2, preds])
except:
preds = np.hstack([preds2, preds]) #TODO -- proper check for 1D
if Lfold == 4:
splitcv = int(len(Y_train)/4)
def cvfit_predict(xt, xv, yt, yv, model):
model.fit(xt, yt)
predscv = model.predict_proba(xv)
return predscv
cxt = X_train[splitcv:]
cyt = Y_train[splitcv:]
cxv = X_train[:splitcv]
cyv = sd.yt_raw[:splitcv]
cvp1 = cvfit_predict(cxt, cxv, cyt, cyv, model)
exec('cvs1 = libscores.'+ sd.LD.info['metric'] + '(cyv, cvp1)')
cxt = np.vstack([X_train[:splitcv], X_train[splitcv*2:]])
cyt = np.hstack([Y_train[:splitcv], Y_train[splitcv*2:]])
cxv = X_train[splitcv:splitcv*2]
cyv = sd.yt_raw[splitcv:splitcv*2]
cvp2 = cvfit_predict(cxt, cxv, cyt, cyv, model)
exec('cvs2 = libscores.'+ sd.LD.info['metric'] + '(cyv, cvp2)')
cxt = np.vstack([X_train[:splitcv*2], X_train[splitcv*3:]])
cyt = np.hstack([Y_train[:splitcv*2], Y_train[splitcv*3:]])
cxv = X_train[splitcv*2:splitcv*3]
cyv = sd.yt_raw[splitcv*2:splitcv*3]
cvp3 = cvfit_predict(cxt, cxv, cyt, cyv, model)
exec('cvs3 = libscores.'+ sd.LD.info['metric'] + '(cyv, cvp3)')
cxt = X_train[:splitcv*3]
cyt = Y_train[:splitcv*3]
cxv = X_train[splitcv*3:]
cyv = sd.yt_raw[splitcv*3:]
cvp4 = cvfit_predict(cxt, cxv, cyt, cyv, model)
exec('cvs4 = libscores.'+ sd.LD.info['metric'] + '(cyv, cvp4)')
CVscore = (cvs1 + cvs2 + cvs3 + cvs4)/4
try:
cvp = np.vstack([cvp1, cvp2])
cvp = np.vstack([cvp, cvp3])
cvp = np.vstack([cvp, cvp4])
except: #TODO proper 1D
cvp = np.hstack([cvp1, cvp2])
cvp = np.hstack([cvp, cvp3])
cvp = np.hstack([cvp, cvp4])
exec('CVscore2 = libscores.'+ sd.LD.info['metric'] + '(sd.yt_raw, cvp)')
CVscore = (CVscore + CVscore2)/2
preds = cvp
del cvp1
del cvp2
del cvp3
if psutil.phymem_usage()[2] > 80 and Lnum > 0:
time.sleep(4)
if psutil.phymem_usage()[2] > 90 and Lnum > 0:
destroy_this = some_way #todo
if Lnum > 0:
model.fit(X_train, Y_train)
if psutil.phymem_usage()[2] > 80 and Lnum > 0:
time.sleep(4)
if psutil.phymem_usage()[2] > 90 and Lnum > 0:
destroy_this = some_way #todo
preds_valid = model.predict_proba(X_valid)
preds_test = model.predict_proba(X_test)
if Lnum == 0:
wd = srd.raw_model
wd['preds_valid'] = preds_valid
wd['preds_test'] = preds_test
wd['preds_2fld'] = preds
wd['score'] = CVscore * 0.5 #not reilable
wd['done'] = 1
srd.raw_model = wd
print "*********rmodel score = ", CVscore * 0.5
if Lnum == 1:
wd1 = srd.raw_model1
wd1['preds_valid'] = preds_valid
wd1['preds_test'] = preds_test
wd1['preds_2fld'] = preds
wd1['score'] = CVscore
wd1['done'] = 1
srd.raw_model1 = wd1
print "*********rmodel 1 score = ", CVscore
if Lnum == 2:
wd2 = srd.raw_model2
wd2['preds_valid'] = preds_valid
wd2['preds_test'] = preds_test
wd2['preds_2fld'] = preds
wd2['score'] = CVscore
wd2['done'] = 1
srd.raw_model2 = wd2
print "*********rmodel 2 score = ", CVscore
if Lnum == 3:
wd3 = srd.raw_model3
wd3['preds_valid'] = preds_valid
wd3['preds_test'] = preds_test
wd3['preds_2fld'] = preds
wd3['score'] = CVscore
wd3['done'] = 1
srd.raw_model3 = wd3
print "*********rmodel 3 score = ", CVscore
if Lnum == 4:
wd4 = srd.raw_model4
wd4['preds_valid'] = preds_valid
wd4['preds_test'] = preds_test
wd4['preds_2fld'] = preds
wd4['score'] = CVscore
wd4['done'] = 1
srd.raw_model4 = wd4
print "*********rmodel 4 score = ", CVscore
except Exception as e:
print 'exception in serial worker ' + ' ' + str(e)
| mit |
Samneetsingh/OutlierDetection | odt/nearestneighbour/lof.py | 1 | 1830 | ###############################################
## Local Outlier Factor (LOF) Implementation ##
###############################################
### Import Python Libraries ###
import pandas as pd
from pandas import DataFrame
from numpy import array, matrix
### Import R Libraries ###
import rpy2.robjects as R
from rpy2.robjects.packages import importr
from rpy2.robjects import pandas2ri
pandas2ri.activate()
base = importr("base")
utils = importr("utils")
odtpackage = importr("dbscan")
######################
## Global LOF Class ##
######################
class LOF(object):
### LOF Class Constructor ###
def __init__(self, xdf, minPts):
self.xdf = xdf
self.minPts = minPts
self.score = []
self.label = []
### [TODO:] Implement Nromalization functionality ###
def normalizeData(self):
pass
### LOF Distance estimation Function ###
def LOF(self, xdf):
rdf = pandas2ri.py2ri(xdf)
return odtpackage.lof(base.as_matrix(rdf), self.minPts)
### LOF Execution Function ###
def getOutlier(self, threshold=1):
lof = array(self.LOF(self.xdf))
for i in range(0, len(lof)):
self.score.append(lof[i])
if self.score[i] > threshold:
self.label.append('outlier')
else:
self.label.append('normal')
return DataFrame(data={'Score': self.score, 'Label': self.label}, )
if __name__ == "__main__":
url = '/Users/warchief/Documents/Projects/DataRepository/AnomalyDetection/test.csv'
df = DataFrame.from_csv(path=url, header=0, sep=',', index_col=False)
X = df['SL_RRC_CONN_AVG_PER_CELL'].values
Y = df['I_DL_DRB_CELL_TPUT_MBPS'].values
d = {'x': X, 'y': Y}
pdf = DataFrame(data=d)
lf = LOF(pdf, 200)
print lf.getOutlier()
| gpl-3.0 |
petedaws/pyasciidraw | animator.py | 1 | 1514 | from matplotlib import pyplot as plt
from matplotlib import animation
import random
import networkx as nx
import force_directed
import math
fig = plt.figure()
fig.set_dpi(100)
fig.set_size_inches(10, 10)
ax = plt.axes(xlim=(-30, 30), ylim=(-30, 30))
nodes = {}
for i,node in enumerate(range(10)):
node = {}
node['id'] = i
node['label'] = str(i)
node['x'] = 0+0.01*math.cos(i)
node['y'] = 0+0.01*math.sin(i)
nodes[i] = node
edges = []
try:
edges.append((nodes[0],nodes[1]))
edges.append((nodes[1],nodes[2]))
edges.append((nodes[2],nodes[3]))
edges.append((nodes[3],nodes[4]))
edges.append((nodes[4],nodes[5]))
edges.append((nodes[5],nodes[6]))
edges.append((nodes[3],nodes[7]))
edges.append((nodes[5],nodes[8]))
edges.append((nodes[1],nodes[9]))
edges.append((nodes[0],nodes[6]))
edges.append((nodes[0],nodes[3]))
except:
pass
G = nx.Graph()
def init():
G.add_nodes_from(nodes.keys())
G.add_edges_from([(c['id'],d['id']) for c,d in edges])
return []
def animate(i):
force_directed.init_force(nodes)
force_directed.node_repulsion(nodes)
force_directed.edge_attraction(edges)
force_directed.force_limit(nodes)
force_directed.propogate(nodes)
pos_nx = {}
for node,pos in nodes.items():
pos_nx[node] = (pos['x'],pos['y'])
nns = nx.draw_networkx_nodes(G,pos=pos_nx)
ees = nx.draw_networkx_edges(G,pos=pos_nx)
return ees,nns
anim = animation.FuncAnimation(fig, animate,
init_func=init,
frames=360,
interval=20,
blit=True)
plt.show()
| mit |
PatrickOReilly/scikit-learn | sklearn/model_selection/_validation.py | 4 | 37132 | """
The :mod:`sklearn.model_selection._validation` module includes classes and
functions to validate the model.
"""
# Author: Alexandre Gramfort <[email protected]>,
# Gael Varoquaux <[email protected]>,
# Olivier Grisel <[email protected]>
# License: BSD 3 clause
from __future__ import print_function
from __future__ import division
import warnings
import numbers
import time
import numpy as np
import scipy.sparse as sp
from ..base import is_classifier, clone
from ..utils import indexable, check_random_state, safe_indexing
from ..utils.fixes import astype
from ..utils.validation import _is_arraylike, _num_samples
from ..externals.joblib import Parallel, delayed, logger
from ..metrics.scorer import check_scoring
from ..exceptions import FitFailedWarning
from ._split import KFold
from ._split import LabelKFold
from ._split import LeaveOneLabelOut
from ._split import LeaveOneOut
from ._split import LeavePLabelOut
from ._split import LeavePOut
from ._split import ShuffleSplit
from ._split import LabelShuffleSplit
from ._split import StratifiedKFold
from ._split import StratifiedShuffleSplit
from ._split import PredefinedSplit
from ._split import check_cv, _safe_split
__all__ = ['cross_val_score', 'cross_val_predict', 'permutation_test_score',
'learning_curve', 'validation_curve']
ALL_CVS = {'KFold': KFold,
'LabelKFold': LabelKFold,
'LeaveOneLabelOut': LeaveOneLabelOut,
'LeaveOneOut': LeaveOneOut,
'LeavePLabelOut': LeavePLabelOut,
'LeavePOut': LeavePOut,
'ShuffleSplit': ShuffleSplit,
'LabelShuffleSplit': LabelShuffleSplit,
'StratifiedKFold': StratifiedKFold,
'StratifiedShuffleSplit': StratifiedShuffleSplit,
'PredefinedSplit': PredefinedSplit}
LABEL_CVS = {'LabelKFold': LabelKFold,
'LeaveOneLabelOut': LeaveOneLabelOut,
'LeavePLabelOut': LeavePLabelOut,
'LabelShuffleSplit': LabelShuffleSplit}
def cross_val_score(estimator, X, y=None, labels=None, scoring=None, cv=None,
n_jobs=1, verbose=0, fit_params=None,
pre_dispatch='2*n_jobs'):
"""Evaluate a score by cross-validation
Read more in the :ref:`User Guide <cross_validation>`.
Parameters
----------
estimator : estimator object implementing 'fit'
The object to use to fit the data.
X : array-like
The data to fit. Can be, for example a list, or an array at least 2d.
y : array-like, optional, default: None
The target variable to try to predict in the case of
supervised learning.
labels : array-like, with shape (n_samples,), optional
Group labels for the samples used while splitting the dataset into
train/test set.
scoring : string, callable or None, optional, default: None
A string (see model evaluation documentation) or
a scorer callable object / function with signature
``scorer(estimator, X, y)``.
cv : int, cross-validation generator or an iterable, optional
Determines the cross-validation splitting strategy.
Possible inputs for cv are:
- None, to use the default 3-fold cross validation,
- integer, to specify the number of folds in a `(Stratified)KFold`,
- An object to be used as a cross-validation generator.
- An iterable yielding train, test splits.
For integer/None inputs, if the estimator is a classifier and ``y`` is
either binary or multiclass, :class:`StratifiedKFold` is used. In all
other cases, :class:`KFold` is used.
Refer :ref:`User Guide <cross_validation>` for the various
cross-validation strategies that can be used here.
n_jobs : integer, optional
The number of CPUs to use to do the computation. -1 means
'all CPUs'.
verbose : integer, optional
The verbosity level.
fit_params : dict, optional
Parameters to pass to the fit method of the estimator.
pre_dispatch : int, or string, optional
Controls the number of jobs that get dispatched during parallel
execution. Reducing this number can be useful to avoid an
explosion of memory consumption when more jobs get dispatched
than CPUs can process. This parameter can be:
- None, in which case all the jobs are immediately
created and spawned. Use this for lightweight and
fast-running jobs, to avoid delays due to on-demand
spawning of the jobs
- An int, giving the exact number of total jobs that are
spawned
- A string, giving an expression as a function of n_jobs,
as in '2*n_jobs'
Returns
-------
scores : array of float, shape=(len(list(cv)),)
Array of scores of the estimator for each run of the cross validation.
Examples
--------
>>> from sklearn import datasets, linear_model
>>> from sklearn.model_selection import cross_val_score
>>> diabetes = datasets.load_diabetes()
>>> X = diabetes.data[:150]
>>> y = diabetes.target[:150]
>>> lasso = linear_model.Lasso()
>>> print(cross_val_score(lasso, X, y)) # doctest: +ELLIPSIS
[ 0.33150734 0.08022311 0.03531764]
See Also
---------
:func:`sklearn.metrics.make_scorer`:
Make a scorer from a performance metric or loss function.
"""
X, y, labels = indexable(X, y, labels)
cv = check_cv(cv, y, classifier=is_classifier(estimator))
scorer = check_scoring(estimator, scoring=scoring)
# We clone the estimator to make sure that all the folds are
# independent, and that it is pickle-able.
parallel = Parallel(n_jobs=n_jobs, verbose=verbose,
pre_dispatch=pre_dispatch)
scores = parallel(delayed(_fit_and_score)(clone(estimator), X, y, scorer,
train, test, verbose, None,
fit_params)
for train, test in cv.split(X, y, labels))
return np.array(scores)[:, 0]
def _fit_and_score(estimator, X, y, scorer, train, test, verbose,
parameters, fit_params, return_train_score=False,
return_parameters=False, error_score='raise'):
"""Fit estimator and compute scores for a given dataset split.
Parameters
----------
estimator : estimator object implementing 'fit'
The object to use to fit the data.
X : array-like of shape at least 2D
The data to fit.
y : array-like, optional, default: None
The target variable to try to predict in the case of
supervised learning.
scorer : callable
A scorer callable object / function with signature
``scorer(estimator, X, y)``.
train : array-like, shape (n_train_samples,)
Indices of training samples.
test : array-like, shape (n_test_samples,)
Indices of test samples.
verbose : integer
The verbosity level.
error_score : 'raise' (default) or numeric
Value to assign to the score if an error occurs in estimator fitting.
If set to 'raise', the error is raised. If a numeric value is given,
FitFailedWarning is raised. This parameter does not affect the refit
step, which will always raise the error.
parameters : dict or None
Parameters to be set on the estimator.
fit_params : dict or None
Parameters that will be passed to ``estimator.fit``.
return_train_score : boolean, optional, default: False
Compute and return score on training set.
return_parameters : boolean, optional, default: False
Return parameters that has been used for the estimator.
Returns
-------
train_score : float, optional
Score on training set, returned only if `return_train_score` is `True`.
test_score : float
Score on test set.
n_test_samples : int
Number of test samples.
scoring_time : float
Time spent for fitting and scoring in seconds.
parameters : dict or None, optional
The parameters that have been evaluated.
"""
if verbose > 1:
if parameters is None:
msg = ''
else:
msg = '%s' % (', '.join('%s=%s' % (k, v)
for k, v in parameters.items()))
print("[CV] %s %s" % (msg, (64 - len(msg)) * '.'))
# Adjust length of sample weights
fit_params = fit_params if fit_params is not None else {}
fit_params = dict([(k, _index_param_value(X, v, train))
for k, v in fit_params.items()])
if parameters is not None:
estimator.set_params(**parameters)
start_time = time.time()
X_train, y_train = _safe_split(estimator, X, y, train)
X_test, y_test = _safe_split(estimator, X, y, test, train)
try:
if y_train is None:
estimator.fit(X_train, **fit_params)
else:
estimator.fit(X_train, y_train, **fit_params)
except Exception as e:
if error_score == 'raise':
raise
elif isinstance(error_score, numbers.Number):
test_score = error_score
if return_train_score:
train_score = error_score
warnings.warn("Classifier fit failed. The score on this train-test"
" partition for these parameters will be set to %f. "
"Details: \n%r" % (error_score, e), FitFailedWarning)
else:
raise ValueError("error_score must be the string 'raise' or a"
" numeric value. (Hint: if using 'raise', please"
" make sure that it has been spelled correctly.)")
else:
test_score = _score(estimator, X_test, y_test, scorer)
if return_train_score:
train_score = _score(estimator, X_train, y_train, scorer)
scoring_time = time.time() - start_time
if verbose > 2:
msg += ", score=%f" % test_score
if verbose > 1:
end_msg = "%s -%s" % (msg, logger.short_format_time(scoring_time))
print("[CV] %s %s" % ((64 - len(end_msg)) * '.', end_msg))
ret = [train_score] if return_train_score else []
ret.extend([test_score, _num_samples(X_test), scoring_time])
if return_parameters:
ret.append(parameters)
return ret
def _score(estimator, X_test, y_test, scorer):
"""Compute the score of an estimator on a given test set."""
if y_test is None:
score = scorer(estimator, X_test)
else:
score = scorer(estimator, X_test, y_test)
if hasattr(score, 'item'):
try:
# e.g. unwrap memmapped scalars
score = score.item()
except ValueError:
# non-scalar?
pass
if not isinstance(score, numbers.Number):
raise ValueError("scoring must return a number, got %s (%s) instead."
% (str(score), type(score)))
return score
def cross_val_predict(estimator, X, y=None, labels=None, cv=None, n_jobs=1,
verbose=0, fit_params=None, pre_dispatch='2*n_jobs',
method='predict'):
"""Generate cross-validated estimates for each input data point
Read more in the :ref:`User Guide <cross_validation>`.
Parameters
----------
estimator : estimator object implementing 'fit' and 'predict'
The object to use to fit the data.
X : array-like
The data to fit. Can be, for example a list, or an array at least 2d.
y : array-like, optional, default: None
The target variable to try to predict in the case of
supervised learning.
labels : array-like, with shape (n_samples,), optional
Group labels for the samples used while splitting the dataset into
train/test set.
cv : int, cross-validation generator or an iterable, optional
Determines the cross-validation splitting strategy.
Possible inputs for cv are:
- None, to use the default 3-fold cross validation,
- integer, to specify the number of folds in a `(Stratified)KFold`,
- An object to be used as a cross-validation generator.
- An iterable yielding train, test splits.
For integer/None inputs, if the estimator is a classifier and ``y`` is
either binary or multiclass, :class:`StratifiedKFold` is used. In all
other cases, :class:`KFold` is used.
Refer :ref:`User Guide <cross_validation>` for the various
cross-validation strategies that can be used here.
n_jobs : integer, optional
The number of CPUs to use to do the computation. -1 means
'all CPUs'.
verbose : integer, optional
The verbosity level.
fit_params : dict, optional
Parameters to pass to the fit method of the estimator.
pre_dispatch : int, or string, optional
Controls the number of jobs that get dispatched during parallel
execution. Reducing this number can be useful to avoid an
explosion of memory consumption when more jobs get dispatched
than CPUs can process. This parameter can be:
- None, in which case all the jobs are immediately
created and spawned. Use this for lightweight and
fast-running jobs, to avoid delays due to on-demand
spawning of the jobs
- An int, giving the exact number of total jobs that are
spawned
- A string, giving an expression as a function of n_jobs,
as in '2*n_jobs'
method : string, optional, default: 'predict'
Invokes the passed method name of the passed estimator.
Returns
-------
predictions : ndarray
This is the result of calling ``method``
Examples
--------
>>> from sklearn import datasets, linear_model
>>> from sklearn.model_selection import cross_val_predict
>>> diabetes = datasets.load_diabetes()
>>> X = diabetes.data[:150]
>>> y = diabetes.target[:150]
>>> lasso = linear_model.Lasso()
>>> y_pred = cross_val_predict(lasso, X, y)
"""
X, y, labels = indexable(X, y, labels)
cv = check_cv(cv, y, classifier=is_classifier(estimator))
# Ensure the estimator has implemented the passed decision function
if not callable(getattr(estimator, method)):
raise AttributeError('{} not implemented in estimator'
.format(method))
# We clone the estimator to make sure that all the folds are
# independent, and that it is pickle-able.
parallel = Parallel(n_jobs=n_jobs, verbose=verbose,
pre_dispatch=pre_dispatch)
prediction_blocks = parallel(delayed(_fit_and_predict)(
clone(estimator), X, y, train, test, verbose, fit_params, method)
for train, test in cv.split(X, y, labels))
# Concatenate the predictions
predictions = [pred_block_i for pred_block_i, _ in prediction_blocks]
test_indices = np.concatenate([indices_i
for _, indices_i in prediction_blocks])
if not _check_is_permutation(test_indices, _num_samples(X)):
raise ValueError('cross_val_predict only works for partitions')
inv_test_indices = np.empty(len(test_indices), dtype=int)
inv_test_indices[test_indices] = np.arange(len(test_indices))
# Check for sparse predictions
if sp.issparse(predictions[0]):
predictions = sp.vstack(predictions, format=predictions[0].format)
else:
predictions = np.concatenate(predictions)
return predictions[inv_test_indices]
def _fit_and_predict(estimator, X, y, train, test, verbose, fit_params,
method):
"""Fit estimator and predict values for a given dataset split.
Read more in the :ref:`User Guide <cross_validation>`.
Parameters
----------
estimator : estimator object implementing 'fit' and 'predict'
The object to use to fit the data.
X : array-like of shape at least 2D
The data to fit.
y : array-like, optional, default: None
The target variable to try to predict in the case of
supervised learning.
train : array-like, shape (n_train_samples,)
Indices of training samples.
test : array-like, shape (n_test_samples,)
Indices of test samples.
verbose : integer
The verbosity level.
fit_params : dict or None
Parameters that will be passed to ``estimator.fit``.
method : string
Invokes the passed method name of the passed estimator.
Returns
-------
predictions : sequence
Result of calling 'estimator.method'
test : array-like
This is the value of the test parameter
"""
# Adjust length of sample weights
fit_params = fit_params if fit_params is not None else {}
fit_params = dict([(k, _index_param_value(X, v, train))
for k, v in fit_params.items()])
X_train, y_train = _safe_split(estimator, X, y, train)
X_test, _ = _safe_split(estimator, X, y, test, train)
if y_train is None:
estimator.fit(X_train, **fit_params)
else:
estimator.fit(X_train, y_train, **fit_params)
func = getattr(estimator, method)
predictions = func(X_test)
return predictions, test
def _check_is_permutation(indices, n_samples):
"""Check whether indices is a reordering of the array np.arange(n_samples)
Parameters
----------
indices : ndarray
integer array to test
n_samples : int
number of expected elements
Returns
-------
is_partition : bool
True iff sorted(locs) is range(n)
"""
if len(indices) != n_samples:
return False
hit = np.zeros(n_samples, bool)
hit[indices] = True
if not np.all(hit):
return False
return True
def _index_param_value(X, v, indices):
"""Private helper function for parameter value indexing."""
if not _is_arraylike(v) or _num_samples(v) != _num_samples(X):
# pass through: skip indexing
return v
if sp.issparse(v):
v = v.tocsr()
return safe_indexing(v, indices)
def permutation_test_score(estimator, X, y, labels=None, cv=None,
n_permutations=100, n_jobs=1, random_state=0,
verbose=0, scoring=None):
"""Evaluate the significance of a cross-validated score with permutations
Read more in the :ref:`User Guide <cross_validation>`.
Parameters
----------
estimator : estimator object implementing 'fit'
The object to use to fit the data.
X : array-like of shape at least 2D
The data to fit.
y : array-like
The target variable to try to predict in the case of
supervised learning.
labels : array-like, with shape (n_samples,), optional
Group labels for the samples used while splitting the dataset into
train/test set.
scoring : string, callable or None, optional, default: None
A string (see model evaluation documentation) or
a scorer callable object / function with signature
``scorer(estimator, X, y)``.
cv : int, cross-validation generator or an iterable, optional
Determines the cross-validation splitting strategy.
Possible inputs for cv are:
- None, to use the default 3-fold cross validation,
- integer, to specify the number of folds in a `(Stratified)KFold`,
- An object to be used as a cross-validation generator.
- An iterable yielding train, test splits.
For integer/None inputs, if the estimator is a classifier and ``y`` is
either binary or multiclass, :class:`StratifiedKFold` is used. In all
other cases, :class:`KFold` is used.
Refer :ref:`User Guide <cross_validation>` for the various
cross-validation strategies that can be used here.
n_permutations : integer, optional
Number of times to permute ``y``.
n_jobs : integer, optional
The number of CPUs to use to do the computation. -1 means
'all CPUs'.
random_state : RandomState or an int seed (0 by default)
A random number generator instance to define the state of the
random permutations generator.
verbose : integer, optional
The verbosity level.
Returns
-------
score : float
The true score without permuting targets.
permutation_scores : array, shape (n_permutations,)
The scores obtained for each permutations.
pvalue : float
The returned value equals p-value if `scoring` returns bigger
numbers for better scores (e.g., accuracy_score). If `scoring` is
rather a loss function (i.e. when lower is better such as with
`mean_squared_error`) then this is actually the complement of the
p-value: 1 - p-value.
Notes
-----
This function implements Test 1 in:
Ojala and Garriga. Permutation Tests for Studying Classifier
Performance. The Journal of Machine Learning Research (2010)
vol. 11
"""
X, y, labels = indexable(X, y, labels)
cv = check_cv(cv, y, classifier=is_classifier(estimator))
scorer = check_scoring(estimator, scoring=scoring)
random_state = check_random_state(random_state)
# We clone the estimator to make sure that all the folds are
# independent, and that it is pickle-able.
score = _permutation_test_score(clone(estimator), X, y, labels, cv, scorer)
permutation_scores = Parallel(n_jobs=n_jobs, verbose=verbose)(
delayed(_permutation_test_score)(
clone(estimator), X, _shuffle(y, labels, random_state),
labels, cv, scorer)
for _ in range(n_permutations))
permutation_scores = np.array(permutation_scores)
pvalue = (np.sum(permutation_scores >= score) + 1.0) / (n_permutations + 1)
return score, permutation_scores, pvalue
permutation_test_score.__test__ = False # to avoid a pb with nosetests
def _permutation_test_score(estimator, X, y, labels, cv, scorer):
"""Auxiliary function for permutation_test_score"""
avg_score = []
for train, test in cv.split(X, y, labels):
estimator.fit(X[train], y[train])
avg_score.append(scorer(estimator, X[test], y[test]))
return np.mean(avg_score)
def _shuffle(y, labels, random_state):
"""Return a shuffled copy of y eventually shuffle among same labels."""
if labels is None:
indices = random_state.permutation(len(y))
else:
indices = np.arange(len(labels))
for label in np.unique(labels):
this_mask = (labels == label)
indices[this_mask] = random_state.permutation(indices[this_mask])
return y[indices]
def learning_curve(estimator, X, y, labels=None,
train_sizes=np.linspace(0.1, 1.0, 5), cv=None, scoring=None,
exploit_incremental_learning=False, n_jobs=1,
pre_dispatch="all", verbose=0):
"""Learning curve.
Determines cross-validated training and test scores for different training
set sizes.
A cross-validation generator splits the whole dataset k times in training
and test data. Subsets of the training set with varying sizes will be used
to train the estimator and a score for each training subset size and the
test set will be computed. Afterwards, the scores will be averaged over
all k runs for each training subset size.
Read more in the :ref:`User Guide <learning_curve>`.
Parameters
----------
estimator : object type that implements the "fit" and "predict" methods
An object of that type which is cloned for each validation.
X : array-like, shape (n_samples, n_features)
Training vector, where n_samples is the number of samples and
n_features is the number of features.
y : array-like, shape (n_samples) or (n_samples, n_features), optional
Target relative to X for classification or regression;
None for unsupervised learning.
labels : array-like, with shape (n_samples,), optional
Group labels for the samples used while splitting the dataset into
train/test set.
train_sizes : array-like, shape (n_ticks,), dtype float or int
Relative or absolute numbers of training examples that will be used to
generate the learning curve. If the dtype is float, it is regarded as a
fraction of the maximum size of the training set (that is determined
by the selected validation method), i.e. it has to be within (0, 1].
Otherwise it is interpreted as absolute sizes of the training sets.
Note that for classification the number of samples usually have to
be big enough to contain at least one sample from each class.
(default: np.linspace(0.1, 1.0, 5))
cv : int, cross-validation generator or an iterable, optional
Determines the cross-validation splitting strategy.
Possible inputs for cv are:
- None, to use the default 3-fold cross validation,
- integer, to specify the number of folds in a `(Stratified)KFold`,
- An object to be used as a cross-validation generator.
- An iterable yielding train, test splits.
For integer/None inputs, if the estimator is a classifier and ``y`` is
either binary or multiclass, :class:`StratifiedKFold` is used. In all
other cases, :class:`KFold` is used.
Refer :ref:`User Guide <cross_validation>` for the various
cross-validation strategies that can be used here.
scoring : string, callable or None, optional, default: None
A string (see model evaluation documentation) or
a scorer callable object / function with signature
``scorer(estimator, X, y)``.
exploit_incremental_learning : boolean, optional, default: False
If the estimator supports incremental learning, this will be
used to speed up fitting for different training set sizes.
n_jobs : integer, optional
Number of jobs to run in parallel (default 1).
pre_dispatch : integer or string, optional
Number of predispatched jobs for parallel execution (default is
all). The option can reduce the allocated memory. The string can
be an expression like '2*n_jobs'.
verbose : integer, optional
Controls the verbosity: the higher, the more messages.
Returns
-------
train_sizes_abs : array, shape = (n_unique_ticks,), dtype int
Numbers of training examples that has been used to generate the
learning curve. Note that the number of ticks might be less
than n_ticks because duplicate entries will be removed.
train_scores : array, shape (n_ticks, n_cv_folds)
Scores on training sets.
test_scores : array, shape (n_ticks, n_cv_folds)
Scores on test set.
Notes
-----
See :ref:`examples/model_selection/plot_learning_curve.py
<sphx_glr_auto_examples_model_selection_plot_learning_curve.py>`
"""
if exploit_incremental_learning and not hasattr(estimator, "partial_fit"):
raise ValueError("An estimator must support the partial_fit interface "
"to exploit incremental learning")
X, y, labels = indexable(X, y, labels)
cv = check_cv(cv, y, classifier=is_classifier(estimator))
cv_iter = cv.split(X, y, labels)
# Make a list since we will be iterating multiple times over the folds
cv_iter = list(cv_iter)
scorer = check_scoring(estimator, scoring=scoring)
n_max_training_samples = len(cv_iter[0][0])
# Because the lengths of folds can be significantly different, it is
# not guaranteed that we use all of the available training data when we
# use the first 'n_max_training_samples' samples.
train_sizes_abs = _translate_train_sizes(train_sizes,
n_max_training_samples)
n_unique_ticks = train_sizes_abs.shape[0]
if verbose > 0:
print("[learning_curve] Training set sizes: " + str(train_sizes_abs))
parallel = Parallel(n_jobs=n_jobs, pre_dispatch=pre_dispatch,
verbose=verbose)
if exploit_incremental_learning:
classes = np.unique(y) if is_classifier(estimator) else None
out = parallel(delayed(_incremental_fit_estimator)(
clone(estimator), X, y, classes, train, test, train_sizes_abs,
scorer, verbose) for train, test in cv.split(X, y, labels))
else:
out = parallel(delayed(_fit_and_score)(
clone(estimator), X, y, scorer, train[:n_train_samples], test,
verbose, parameters=None, fit_params=None, return_train_score=True)
for train, test in cv_iter
for n_train_samples in train_sizes_abs)
out = np.array(out)[:, :2]
n_cv_folds = out.shape[0] // n_unique_ticks
out = out.reshape(n_cv_folds, n_unique_ticks, 2)
out = np.asarray(out).transpose((2, 1, 0))
return train_sizes_abs, out[0], out[1]
def _translate_train_sizes(train_sizes, n_max_training_samples):
"""Determine absolute sizes of training subsets and validate 'train_sizes'.
Examples:
_translate_train_sizes([0.5, 1.0], 10) -> [5, 10]
_translate_train_sizes([5, 10], 10) -> [5, 10]
Parameters
----------
train_sizes : array-like, shape (n_ticks,), dtype float or int
Numbers of training examples that will be used to generate the
learning curve. If the dtype is float, it is regarded as a
fraction of 'n_max_training_samples', i.e. it has to be within (0, 1].
n_max_training_samples : int
Maximum number of training samples (upper bound of 'train_sizes').
Returns
-------
train_sizes_abs : array, shape (n_unique_ticks,), dtype int
Numbers of training examples that will be used to generate the
learning curve. Note that the number of ticks might be less
than n_ticks because duplicate entries will be removed.
"""
train_sizes_abs = np.asarray(train_sizes)
n_ticks = train_sizes_abs.shape[0]
n_min_required_samples = np.min(train_sizes_abs)
n_max_required_samples = np.max(train_sizes_abs)
if np.issubdtype(train_sizes_abs.dtype, np.float):
if n_min_required_samples <= 0.0 or n_max_required_samples > 1.0:
raise ValueError("train_sizes has been interpreted as fractions "
"of the maximum number of training samples and "
"must be within (0, 1], but is within [%f, %f]."
% (n_min_required_samples,
n_max_required_samples))
train_sizes_abs = astype(train_sizes_abs * n_max_training_samples,
dtype=np.int, copy=False)
train_sizes_abs = np.clip(train_sizes_abs, 1,
n_max_training_samples)
else:
if (n_min_required_samples <= 0 or
n_max_required_samples > n_max_training_samples):
raise ValueError("train_sizes has been interpreted as absolute "
"numbers of training samples and must be within "
"(0, %d], but is within [%d, %d]."
% (n_max_training_samples,
n_min_required_samples,
n_max_required_samples))
train_sizes_abs = np.unique(train_sizes_abs)
if n_ticks > train_sizes_abs.shape[0]:
warnings.warn("Removed duplicate entries from 'train_sizes'. Number "
"of ticks will be less than the size of "
"'train_sizes' %d instead of %d)."
% (train_sizes_abs.shape[0], n_ticks), RuntimeWarning)
return train_sizes_abs
def _incremental_fit_estimator(estimator, X, y, classes, train, test,
train_sizes, scorer, verbose):
"""Train estimator on training subsets incrementally and compute scores."""
train_scores, test_scores = [], []
partitions = zip(train_sizes, np.split(train, train_sizes)[:-1])
for n_train_samples, partial_train in partitions:
train_subset = train[:n_train_samples]
X_train, y_train = _safe_split(estimator, X, y, train_subset)
X_partial_train, y_partial_train = _safe_split(estimator, X, y,
partial_train)
X_test, y_test = _safe_split(estimator, X, y, test, train_subset)
if y_partial_train is None:
estimator.partial_fit(X_partial_train, classes=classes)
else:
estimator.partial_fit(X_partial_train, y_partial_train,
classes=classes)
train_scores.append(_score(estimator, X_train, y_train, scorer))
test_scores.append(_score(estimator, X_test, y_test, scorer))
return np.array((train_scores, test_scores)).T
def validation_curve(estimator, X, y, param_name, param_range, labels=None,
cv=None, scoring=None, n_jobs=1, pre_dispatch="all",
verbose=0):
"""Validation curve.
Determine training and test scores for varying parameter values.
Compute scores for an estimator with different values of a specified
parameter. This is similar to grid search with one parameter. However, this
will also compute training scores and is merely a utility for plotting the
results.
Read more in the :ref:`User Guide <learning_curve>`.
Parameters
----------
estimator : object type that implements the "fit" and "predict" methods
An object of that type which is cloned for each validation.
X : array-like, shape (n_samples, n_features)
Training vector, where n_samples is the number of samples and
n_features is the number of features.
y : array-like, shape (n_samples) or (n_samples, n_features), optional
Target relative to X for classification or regression;
None for unsupervised learning.
param_name : string
Name of the parameter that will be varied.
param_range : array-like, shape (n_values,)
The values of the parameter that will be evaluated.
labels : array-like, with shape (n_samples,), optional
Group labels for the samples used while splitting the dataset into
train/test set.
cv : int, cross-validation generator or an iterable, optional
Determines the cross-validation splitting strategy.
Possible inputs for cv are:
- None, to use the default 3-fold cross validation,
- integer, to specify the number of folds in a `(Stratified)KFold`,
- An object to be used as a cross-validation generator.
- An iterable yielding train, test splits.
For integer/None inputs, if the estimator is a classifier and ``y`` is
either binary or multiclass, :class:`StratifiedKFold` is used. In all
other cases, :class:`KFold` is used.
Refer :ref:`User Guide <cross_validation>` for the various
cross-validation strategies that can be used here.
scoring : string, callable or None, optional, default: None
A string (see model evaluation documentation) or
a scorer callable object / function with signature
``scorer(estimator, X, y)``.
n_jobs : integer, optional
Number of jobs to run in parallel (default 1).
pre_dispatch : integer or string, optional
Number of predispatched jobs for parallel execution (default is
all). The option can reduce the allocated memory. The string can
be an expression like '2*n_jobs'.
verbose : integer, optional
Controls the verbosity: the higher, the more messages.
Returns
-------
train_scores : array, shape (n_ticks, n_cv_folds)
Scores on training sets.
test_scores : array, shape (n_ticks, n_cv_folds)
Scores on test set.
Notes
-----
See :ref:`sphx_glr_auto_examples_model_selection_plot_validation_curve.py`
"""
X, y, labels = indexable(X, y, labels)
cv = check_cv(cv, y, classifier=is_classifier(estimator))
scorer = check_scoring(estimator, scoring=scoring)
parallel = Parallel(n_jobs=n_jobs, pre_dispatch=pre_dispatch,
verbose=verbose)
out = parallel(delayed(_fit_and_score)(
estimator, X, y, scorer, train, test, verbose,
parameters={param_name: v}, fit_params=None, return_train_score=True)
for train, test in cv.split(X, y, labels) for v in param_range)
out = np.asarray(out)[:, :2]
n_params = len(param_range)
n_cv_folds = out.shape[0] // n_params
out = out.reshape(n_cv_folds, n_params, 2).transpose((2, 1, 0))
return out[0], out[1]
| bsd-3-clause |
cdegroc/scikit-learn | sklearn/decomposition/pca.py | 2 | 17473 | """ Principal Component Analysis
"""
# Author: Alexandre Gramfort <[email protected]>
# Olivier Grisel <[email protected]>
# Mathieu Blondel <[email protected]>
# License: BSD Style.
import numpy as np
from scipy import linalg
from ..base import BaseEstimator, TransformerMixin
from ..utils import array2d, check_random_state, as_float_array
from ..utils.extmath import fast_logdet
from ..utils.extmath import safe_sparse_dot
from ..utils.extmath import randomized_svd
def _assess_dimension_(spectrum, rank, n_samples, dim):
"""Compute the likelihood of a rank rank dataset
The dataset is assumed to be embedded in gaussian noise of shape(n,
dimf) having spectrum spectrum.
Parameters
----------
spectrum: array of shape (n)
data spectrum
rank: int,
tested rank value
n_samples: int,
number of samples
dim: int,
embedding/empirical dimension
Returns
-------
ll: float,
The log-likelihood
Notes
-----
This implements the method of `Thomas P. Minka:
Automatic Choice of Dimensionality for PCA. NIPS 2000: 598-604`
"""
if rank > dim:
raise ValueError("the dimension cannot exceed dim")
from scipy.special import gammaln
pu = -rank * np.log(2)
for i in range(rank):
pu += gammaln((dim - i) / 2) - np.log(np.pi) * (dim - i) / 2
pl = np.sum(np.log(spectrum[:rank]))
pl = -pl * n_samples / 2
if rank == dim:
pv = 0
v = 1
else:
v = np.sum(spectrum[rank:dim]) / (dim - rank)
pv = -np.log(v) * n_samples * (dim - rank) / 2
m = dim * rank - rank * (rank + 1) / 2
pp = np.log(2 * np.pi) * (m + rank + 1) / 2
pa = 0
spectrum_ = spectrum.copy()
spectrum_[rank:dim] = v
for i in range(rank):
for j in range(i + 1, dim):
pa += (np.log((spectrum[i] - spectrum[j])
* (1. / spectrum_[j] - 1. / spectrum_[i]))
+ np.log(n_samples))
ll = pu + pl + pv + pp - pa / 2 - rank * np.log(n_samples) / 2
return ll
def _infer_dimension_(spectrum, n, p):
"""This method infers the dimension of a dataset of shape (n, p)
The dataset is described by its spectrum `spectrum`.
"""
ll = []
for rank in range(min(n, p, len(spectrum))):
ll.append(_assess_dimension_(spectrum, rank, n, p))
ll = np.array(ll)
return ll.argmax()
class PCA(BaseEstimator, TransformerMixin):
"""Principal component analysis (PCA)
Linear dimensionality reduction using Singular Value Decomposition of the
data and keeping only the most significant singular vectors to project the
data to a lower dimensional space.
This implementation uses the scipy.linalg implementation of the singular
value decomposition. It only works for dense arrays and is not scalable to
large dimensional data.
The time complexity of this implementation is ``O(n ** 3)`` assuming
n ~ n_samples ~ n_features.
Parameters
----------
n_components : int, None or string
Number of components to keep.
if n_components is not set all components are kept::
n_components == min(n_samples, n_features)
if n_components == 'mle', Minka\'s MLE is used to guess the dimension
if ``0 < n_components < 1``, select the number of components such that
the amount of variance that needs to be explained is greater than the
percentage specified by n_components
copy : bool
If False, data passed to fit are overwritten
whiten : bool, optional
When True (False by default) the `components_` vectors are divided
by n_samples times singular values to ensure uncorrelated outputs
with unit component-wise variances.
Whitening will remove some information from the transformed signal
(the relative variance scales of the components) but can sometime
improve the predictive accuracy of the downstream estimators by
making there data respect some hard-wired assumptions.
Attributes
----------
`components_` : array, [n_components, n_features]
Components with maximum variance.
`explained_variance_ratio_` : array, [n_components]
Percentage of variance explained by each of the selected components. \
k is not set then all components are stored and the sum of explained \
variances is equal to 1.0
Notes
-----
For n_components='mle', this class uses the method of `Thomas P. Minka:
Automatic Choice of Dimensionality for PCA. NIPS 2000: 598-604`
Due to implementation subtleties of the Singular Value Decomposition (SVD),
which is used in this implementation, running fit twice on the same matrix
can lead to principal components with signs flipped (change in direction).
For this reason, it is important to always use the same estimator object to
transform data in a consistent fashion.
Examples
--------
>>> import numpy as np
>>> from sklearn.decomposition import PCA
>>> X = np.array([[-1, -1], [-2, -1], [-3, -2], [1, 1], [2, 1], [3, 2]])
>>> pca = PCA(n_components=2)
>>> pca.fit(X)
PCA(copy=True, n_components=2, whiten=False)
>>> print pca.explained_variance_ratio_ # doctest: +ELLIPSIS
[ 0.99244... 0.00755...]
See also
--------
ProbabilisticPCA
RandomizedPCA
KernelPCA
SparsePCA
"""
def __init__(self, n_components=None, copy=True, whiten=False):
self.n_components = n_components
self.copy = copy
self.whiten = whiten
def fit(self, X, y=None, **params):
"""Fit the model with X.
Parameters
----------
X: array-like, shape (n_samples, n_features)
Training data, where n_samples in the number of samples
and n_features is the number of features.
Returns
-------
self : object
Returns the instance itself.
"""
self._fit(X, **params)
return self
def fit_transform(self, X, y=None, **params):
"""Fit the model with X and apply the dimensionality reduction on X.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Training data, where n_samples in the number of samples
and n_features is the number of features.
Returns
-------
X_new : array-like, shape (n_samples, n_components)
"""
U, S, V = self._fit(X, **params)
U = U[:, :self.n_components]
if self.whiten:
# X_new = X * V / S * sqrt(n_samples) = U * sqrt(n_samples)
U *= np.sqrt(X.shape[0])
else:
# X_new = X * V = U * S * V^T * V = U * S
U *= S[:self.n_components]
return U
def _fit(self, X):
X = array2d(X)
n_samples, n_features = X.shape
X = as_float_array(X, copy=self.copy)
# Center data
self.mean_ = np.mean(X, axis=0)
X -= self.mean_
U, S, V = linalg.svd(X, full_matrices=False)
self.explained_variance_ = (S ** 2) / n_samples
self.explained_variance_ratio_ = self.explained_variance_ / \
self.explained_variance_.sum()
if self.whiten:
self.components_ = V / S[:, np.newaxis] * np.sqrt(n_samples)
else:
self.components_ = V
if self.n_components == 'mle':
self.n_components = _infer_dimension_(self.explained_variance_,
n_samples, X.shape[1])
elif (self.n_components is not None
and 0 < self.n_components
and self.n_components < 1.0):
# number of components for which the cumulated explained variance
# percentage is superior to the desired threshold
ratio_cumsum = self.explained_variance_ratio_.cumsum()
self.n_components = np.sum(ratio_cumsum < self.n_components) + 1
if self.n_components is not None:
self.components_ = self.components_[:self.n_components, :]
self.explained_variance_ = \
self.explained_variance_[:self.n_components]
self.explained_variance_ratio_ = \
self.explained_variance_ratio_[:self.n_components]
return (U, S, V)
def transform(self, X):
"""Apply the dimensionality reduction on X.
Parameters
----------
X : array-like, shape (n_samples, n_features)
New data, where n_samples in the number of samples
and n_features is the number of features.
Returns
-------
X_new : array-like, shape (n_samples, n_components)
"""
X_transformed = X - self.mean_
X_transformed = np.dot(X_transformed, self.components_.T)
return X_transformed
def inverse_transform(self, X):
"""Transform data back to its original space, i.e.,
return an input X_original whose transform would be X
Parameters
----------
X : array-like, shape (n_samples, n_components)
New data, where n_samples in the number of samples
and n_components is the number of components.
Returns
-------
X_original array-like, shape (n_samples, n_features)
Notes
-----
If whitening is enabled, inverse_transform does not compute the
exact inverse operation as transform.
"""
return np.dot(X, self.components_) + self.mean_
class ProbabilisticPCA(PCA):
"""Additional layer on top of PCA that adds a probabilistic evaluation"""
__doc__ += PCA.__doc__
def fit(self, X, y=None, homoscedastic=True):
"""Additionally to PCA.fit, learns a covariance model
Parameters
----------
X : array of shape(n_samples, n_dim)
The data to fit
homoscedastic : bool, optional,
If True, average variance across remaining dimensions
"""
PCA.fit(self, X)
self.dim = X.shape[1]
Xr = X - self.mean_
Xr -= np.dot(np.dot(Xr, self.components_.T), self.components_)
n_samples = X.shape[0]
if self.dim <= self.n_components:
delta = np.zeros(self.dim)
elif homoscedastic:
delta = (Xr ** 2).sum() * np.ones(self.dim) \
/ (n_samples * self.dim)
else:
delta = (Xr ** 2).mean(0) / (self.dim - self.n_components)
self.covariance_ = np.diag(delta)
for k in range(self.n_components):
add_cov = np.outer(self.components_[k], self.components_[k])
self.covariance_ += self.explained_variance_[k] * add_cov
return self
def score(self, X, y=None):
"""Return a score associated to new data
Parameters
----------
X: array of shape(n_samples, n_dim)
The data to test
Returns
-------
ll: array of shape (n_samples),
log-likelihood of each row of X under the current model
"""
Xr = X - self.mean_
log_like = np.zeros(X.shape[0])
self.precision_ = linalg.inv(self.covariance_)
for i in range(X.shape[0]):
log_like[i] = -.5 * np.dot(np.dot(self.precision_, Xr[i]), Xr[i])
log_like += fast_logdet(self.precision_) - \
self.dim / 2 * np.log(2 * np.pi)
return log_like
class RandomizedPCA(BaseEstimator, TransformerMixin):
"""Principal component analysis (PCA) using randomized SVD
Linear dimensionality reduction using approximated Singular Value
Decomposition of the data and keeping only the most significant
singular vectors to project the data to a lower dimensional space.
This implementation uses a randomized SVD implementation and can
handle both scipy.sparse and numpy dense arrays as input.
Parameters
----------
n_components : int
Maximum number of components to keep: default is 50.
copy : bool
If False, data passed to fit are overwritten
iterated_power : int, optional
Number of iteration for the power method. 3 by default.
whiten : bool, optional
When True (False by default) the `components_` vectors are divided
by the singular values to ensure uncorrelated outputs with unit
component-wise variances.
Whitening will remove some information from the transformed signal
(the relative variance scales of the components) but can sometime
improve the predictive accuracy of the downstream estimators by
making there data respect some hard-wired assumptions.
random_state : int or RandomState instance or None (default)
Pseudo Random Number generator seed control. If None, use the
numpy.random singleton.
Attributes
----------
`components_` : array, [n_components, n_features]
Components with maximum variance.
`explained_variance_ratio_` : array, [n_components]
Percentage of variance explained by each of the selected components. \
k is not set then all components are stored and the sum of explained \
variances is equal to 1.0
Examples
--------
>>> import numpy as np
>>> from sklearn.decomposition import RandomizedPCA
>>> X = np.array([[-1, -1], [-2, -1], [-3, -2], [1, 1], [2, 1], [3, 2]])
>>> pca = RandomizedPCA(n_components=2)
>>> pca.fit(X) # doctest: +ELLIPSIS +NORMALIZE_WHITESPACE
RandomizedPCA(copy=True, iterated_power=3, n_components=2,
random_state=<mtrand.RandomState object at 0x...>, whiten=False)
>>> print pca.explained_variance_ratio_ # doctest: +ELLIPSIS
[ 0.99244... 0.00755...]
See also
--------
PCA
ProbabilisticPCA
Notes
-------
**References**:
.. [Halko2009] `Finding structure with randomness: Stochastic algorithms
for constructing approximate matrix decompositions Halko, et al., 2009
(arXiv:909)`
.. [MRT] `A randomized algorithm for the decomposition of matrices
Per-Gunnar Martinsson, Vladimir Rokhlin and Mark Tygert`
"""
def __init__(self, n_components, copy=True, iterated_power=3,
whiten=False, random_state=None):
self.n_components = n_components
self.copy = copy
self.iterated_power = iterated_power
self.whiten = whiten
self.mean_ = None
self.random_state = random_state
def fit(self, X, y=None):
"""Fit the model to the data X.
Parameters
----------
X: array-like or scipy.sparse matrix, shape (n_samples, n_features)
Training vector, where n_samples in the number of samples and
n_features is the number of features.
Returns
-------
self : object
Returns the instance itself.
"""
self.random_state = check_random_state(self.random_state)
if not hasattr(X, 'todense'):
# not a sparse matrix, ensure this is a 2D array
X = array2d(X)
n_samples = X.shape[0]
if not hasattr(X, 'todense'):
X = as_float_array(X, copy=self.copy)
# Center data
self.mean_ = np.mean(X, axis=0)
X -= self.mean_
U, S, V = randomized_svd(X, self.n_components,
n_iterations=self.iterated_power,
random_state=self.random_state)
self.explained_variance_ = (S ** 2) / n_samples
self.explained_variance_ratio_ = self.explained_variance_ / \
self.explained_variance_.sum()
if self.whiten:
n = X.shape[0]
self.components_ = V / S[:, np.newaxis] * np.sqrt(n)
else:
self.components_ = V
return self
def transform(self, X):
"""Apply the dimensionality reduction on X.
Parameters
----------
X : array-like or scipy.sparse matrix, shape (n_samples, n_features)
New data, where n_samples in the number of samples
and n_features is the number of features.
Returns
-------
X_new : array-like, shape (n_samples, n_components)
"""
if self.mean_ is not None:
X = X - self.mean_
X = safe_sparse_dot(X, self.components_.T)
return X
def inverse_transform(self, X):
"""Transform data back to its original space, i.e.,
return an input X_original whose transform would be X
Parameters
----------
X : array-like or scipy.sparse matrix, shape (n_samples, n_components)
New data, where n_samples in the number of samples
and n_components is the number of components.
Returns
-------
X_original array-like, shape (n_samples, n_features)
Notes
-----
If whitening is enabled, inverse_transform does not compute the
exact inverse operation as transform.
"""
X_original = safe_sparse_dot(X, self.components_)
if self.mean_ is not None:
X_original = X_original + self.mean_
return X_original
| bsd-3-clause |
DessimozLab/treeCl | treeCl/utils/silhouette.py | 1 | 5970 | from __future__ import print_function
from __future__ import division
from builtins import object
import numpy as np
import pandas as pd
from ..partition import Partition
class Silhouette(object):
def __init__(self, dm):
self._pvec = None
self.distances = dm
self.groups = None
self.neighbours = None
self.scores = None
@staticmethod
def __get_indices_for_groups_by_index(ix, jx):
if len(ix) == len(jx) == 1 and ix == jx:
return [list(ix)], [list(jx)]
row_indices = [[i for j in jx if i != j] for i in ix]
column_indices = [[j for j in jx if j != i] for i in ix]
return row_indices, column_indices
@staticmethod
def __silhouette_calc(ingroup, outgroup):
if len(ingroup) == 1:
return 0
max_ = np.array([ingroup, outgroup]).max(axis=0)
return (outgroup - ingroup) / max_
def get_indices_for_group(self, group):
return np.where(self.pvec == group)[0]
def get_indices_for_groups(self, group1, group2):
ix = np.where(self.pvec == group1)[0]
jx = np.where(self.pvec == group2)[0]
return self.__get_indices_for_groups_by_index(ix, jx)
def get_mean_dissimilarities_for_group(self, group):
outgroups = self.groups[self.groups != group]
within_indices = self.get_indices_for_groups(group, group)
within_distances = self.distances[within_indices].mean(axis=1)
dissimilarities = []
for outgroup in outgroups:
between_indices = self.get_indices_for_groups(group, outgroup)
between_distances = self.distances[between_indices]
dissimilarities.append(between_distances.mean(axis=1))
return within_distances, np.array(dissimilarities), outgroups
def run(self):
if len(self.groups) == 1:
raise ValueError("Silhouette is not defined for singleton clusters")
for ingroup in self.groups:
ingroup_ix = self.get_indices_for_group(ingroup)
within, between, outgroups = self.get_mean_dissimilarities_for_group(ingroup)
between_min = between.min(axis=0)
outgroup_ix, neighbours_ix = np.where(between == between_min)
neighbours = np.zeros(neighbours_ix.shape)
neighbours[neighbours_ix] = outgroups[outgroup_ix]
self.neighbours[ingroup_ix] = neighbours
self.scores[ingroup_ix] = self.__silhouette_calc(within, between_min)
@property
def pvec(self):
return self._pvec
@pvec.setter
def pvec(self, partition):
if isinstance(partition, Partition):
self._pvec = np.array(partition.partition_vector)
else:
self._pvec = np.array(partition)
self.groups = np.unique(self._pvec)
self.neighbours = np.zeros(self._pvec.shape)
self.scores = np.zeros(self._pvec.shape)
def __call__(self, partition):
self.pvec = partition
self.run()
return self.neighbours, self.scores
def add_silhouettes_to_dataframe(path_to_distances, path_to_table, **kwargs):
table = pd.read_csv(path_to_table, **kwargs)
dm = np.loadtxt(path_to_distances)
if __name__ == '__main__':
dm = np.array(
[[0., 0.352, 0.23, 0.713, 0.426, 0.653, 0.481, 0.554, 1.533, 1.549, 1.505, 1.46],
[0.352, 0., 0.249, 0.772, 0.625, 0.909, 0.668, 0.725, 1.613, 1.623, 1.568, 1.523],
[0.23, 0.249, 0., 0.811, 0.417, 0.751, 0.456, 0.52, 1.489, 1.501, 1.446, 1.396],
[0.713, 0.772, 0.811, 0., 0.962, 0.894, 1.025, 1.068, 1.748, 1.782, 1.724, 1.72],
[0.426, 0.625, 0.417, 0.962, 0., 0.644, 0.083, 0.216, 1.424, 1.439, 1.398, 1.339],
[0.653, 0.909, 0.751, 0.894, 0.644, 0., 0.685, 0.659, 1.467, 1.502, 1.448, 1.416],
[0.481, 0.668, 0.456, 1.025, 0.083, 0.685, 0., 0.203, 1.419, 1.432, 1.394, 1.331],
[0.554, 0.725, 0.52, 1.068, 0.216, 0.659, 0.203, 0., 1.503, 1.53, 1.472, 1.416],
[1.533, 1.613, 1.489, 1.748, 1.424, 1.467, 1.419, 1.503, 0., 0.288, 0.299, 0.262],
[1.549, 1.623, 1.501, 1.782, 1.439, 1.502, 1.432, 1.53, 0.288, 0., 0.296, 0.185],
[1.505, 1.568, 1.446, 1.724, 1.398, 1.448, 1.394, 1.472, 0.299, 0.296, 0., 0.197],
[1.46, 1.523, 1.396, 1.72, 1.339, 1.416, 1.331, 1.416, 0.262, 0.185, 0.197, 0.]])
plist = [Partition((1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12)),
Partition((1, 2, 3, 4, 5, 6, 5, 7, 8, 9, 10, 11)),
Partition((1, 2, 3, 4, 5, 6, 5, 7, 8, 9, 10, 9)),
Partition((1, 2, 1, 3, 4, 5, 4, 6, 7, 8, 9, 8)),
Partition((1, 2, 1, 3, 4, 5, 4, 4, 6, 7, 8, 7)),
Partition((1, 2, 1, 3, 4, 5, 4, 4, 6, 7, 7, 7)),
Partition((1, 2, 1, 3, 4, 5, 4, 4, 6, 6, 6, 6)),
Partition((1, 1, 1, 2, 3, 4, 3, 3, 5, 5, 5, 5)),
Partition((1, 1, 1, 2, 3, 3, 3, 3, 4, 4, 4, 4)),
Partition((1, 1, 1, 1, 2, 2, 2, 2, 3, 3, 3, 3)),
Partition((1, 1, 1, 1, 1, 1, 1, 1, 2, 2, 2, 2)),
Partition((1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1))]
s = Silhouette(dm)
skips = 0
for p in plist:
try:
neighbours, scores = s(p)
print("{} clusters: avg score = {}".format(len(p), scores.mean()))
except ValueError:
print("{} clusters: skipping".format(len(p)))
skips += 1
print ("{} tests, {} skipped".format(len(plist), skips))
import treeCl
cl = treeCl.Clustering(dm)
skips = 0
for p in plist:
try:
anosim = cl.anosim(p)
except ValueError:
skips += 1
continue
try:
permanova = cl.permanova(p)
except ValueError:
skips += 1
continue
print ("{} clusters: anosim = {}; permanova = {}".format(len(p), anosim.p_value, permanova.p_value))
print ("{} tests, {} skipped".format(2*len(plist), skips))
| mit |
briehl/narrative | src/biokbase/narrative/viewers.py | 2 | 5271 | import clustergrammer_widget
from clustergrammer_widget.clustergrammer import Network
import pandas as pd
import biokbase.narrative.clients as clients
from biokbase.narrative.app_util import system_variable
def view_as_clustergrammer(
ws_ref, col_categories=(), row_categories=(), normalize_on=None
):
"""
This function returns an interactive clustergrammer widget for a specified object. Data type
must contain a 'data' key with a FloatMatrix2D type value
:param ws_ref: Object workspace reference
:param col_categories: iterable with the permitted factors from the col_attributemapping.
Defaults to all factors, pass None to exclude.
:param row_categories: iterable with the permitted categories from the row_attributemapping.
Defaults to all factors, pass None to exclude.
:param normalize_on: If provided, the matrix will be converted to z-scores normalized on the
'row' or 'column' axis
:return:
"""
assert isinstance(col_categories, (tuple, set, list))
assert isinstance(row_categories, (tuple, set, list))
assert normalize_on in {None, "row", "column"}
generic_df = get_df(ws_ref, col_categories, row_categories, True)
net = Network(clustergrammer_widget)
net.df_to_dat({"mat": generic_df})
if normalize_on:
net.normalize(axis=normalize_on)
net.cluster(enrichrgram=False)
return net.widget()
def get_df(ws_ref, col_attributes=(), row_attributes=(), clustergrammer=False):
"""
Gets a dataframe from the WS object
:param ws_ref: The Workspace reference of the 2DMatrix containing object
:param col_attributes: Which column attributes should appear in the resulting DataFrame as a
multiIndex. Defaults to all attributes, pass None to use a simple index of only ID.
:param row_attributes: Which row attributes should appear in the resulting DataFrame as a
multiIndex. Defaults to all attributes, pass None to use a simple index of only ID.
:param clustergrammer: Returns a DataFrame with Clustergrammer compatible indices and columns.
Defaults to False.
:return: A Pandas DataFrame
"""
ws = clients.get("workspace")
if "/" not in ws_ref:
ws_ref = "{}/{}".format(system_variable("workspace"), ws_ref)
generic_data = ws.get_objects2({"objects": [{"ref": ws_ref}]})["data"][0]["data"]
if not _is_compatible_matrix(generic_data):
raise ValueError(
"{} is not a compatible data type for this viewer. Data type must "
"contain a 'data' key with a FloatMatrix2D type value".format(ws_ref)
)
cols = _get_categories(
generic_data["data"]["col_ids"],
ws_ref,
generic_data.get("col_attributemapping_ref"),
generic_data.get("col_mapping"),
col_attributes,
clustergrammer,
)
rows = _get_categories(
generic_data["data"]["row_ids"],
ws_ref,
generic_data.get("row_attributemapping_ref"),
generic_data.get("row_mapping"),
row_attributes,
clustergrammer,
)
return pd.DataFrame(data=generic_data["data"]["values"], columns=cols, index=rows)
def _is_compatible_matrix(obj):
try:
assert "data" in obj
assert "col_ids" in obj["data"]
assert "row_ids" in obj["data"]
assert "values" in obj["data"]
except AssertionError:
return False
return True
def _get_categories(
ids,
matrix_ref,
attributemapping_ref=None,
mapping=None,
whitelist=(),
clustergrammer=False,
):
"""Creates the correct kind of multi-factor index for clustergrammer display"""
if not attributemapping_ref or whitelist is None:
return ids
cat_list = []
ws = clients.get("workspace")
attribute_data = ws.get_objects2(
{"objects": [{"ref": matrix_ref + ";" + attributemapping_ref}]}
)["data"][0]["data"]
if not mapping:
mapping = {x: x for x in ids}
whitelist = set(whitelist)
for _id in ids:
try:
attribute_values = attribute_data["instances"][mapping[_id]]
except KeyError:
if _id not in mapping:
raise ValueError(
"Row or column id {} is not in the provided mapping".format(_id)
)
raise ValueError(
"AttributeMapping {} has no attribute {} which corresponds to row or "
"column id {} in the provided object.".format(
attributemapping_ref, mapping[_id], _id
)
)
cats = [_id]
for i, val in enumerate(attribute_values):
cat_name = attribute_data["attributes"][i]["attribute"]
if whitelist and cat_name not in whitelist:
continue
if clustergrammer:
cats.append("{}: {}".format(cat_name, val))
else:
cats.append(val)
cat_list.append(tuple(cats))
if clustergrammer:
return cat_list
attribute_names = [
x["attribute"]
for x in attribute_data["attributes"]
if not whitelist or x["attribute"] in whitelist
]
return pd.MultiIndex.from_tuples(cat_list, names=["ID"] + attribute_names)
| mit |
akionakamura/scikit-learn | examples/linear_model/plot_ols.py | 220 | 1940 | #!/usr/bin/python
# -*- coding: utf-8 -*-
"""
=========================================================
Linear Regression Example
=========================================================
This example uses the only the first feature of the `diabetes` dataset, in
order to illustrate a two-dimensional plot of this regression technique. The
straight line can be seen in the plot, showing how linear regression attempts
to draw a straight line that will best minimize the residual sum of squares
between the observed responses in the dataset, and the responses predicted by
the linear approximation.
The coefficients, the residual sum of squares and the variance score are also
calculated.
"""
print(__doc__)
# Code source: Jaques Grobler
# License: BSD 3 clause
import matplotlib.pyplot as plt
import numpy as np
from sklearn import datasets, linear_model
# Load the diabetes dataset
diabetes = datasets.load_diabetes()
# Use only one feature
diabetes_X = diabetes.data[:, np.newaxis, 2]
# Split the data into training/testing sets
diabetes_X_train = diabetes_X[:-20]
diabetes_X_test = diabetes_X[-20:]
# Split the targets into training/testing sets
diabetes_y_train = diabetes.target[:-20]
diabetes_y_test = diabetes.target[-20:]
# Create linear regression object
regr = linear_model.LinearRegression()
# Train the model using the training sets
regr.fit(diabetes_X_train, diabetes_y_train)
# The coefficients
print('Coefficients: \n', regr.coef_)
# The mean square error
print("Residual sum of squares: %.2f"
% np.mean((regr.predict(diabetes_X_test) - diabetes_y_test) ** 2))
# Explained variance score: 1 is perfect prediction
print('Variance score: %.2f' % regr.score(diabetes_X_test, diabetes_y_test))
# Plot outputs
plt.scatter(diabetes_X_test, diabetes_y_test, color='black')
plt.plot(diabetes_X_test, regr.predict(diabetes_X_test), color='blue',
linewidth=3)
plt.xticks(())
plt.yticks(())
plt.show()
| bsd-3-clause |
ilyes14/scikit-learn | examples/linear_model/plot_ols_ridge_variance.py | 387 | 2060 | #!/usr/bin/python
# -*- coding: utf-8 -*-
"""
=========================================================
Ordinary Least Squares and Ridge Regression Variance
=========================================================
Due to the few points in each dimension and the straight
line that linear regression uses to follow these points
as well as it can, noise on the observations will cause
great variance as shown in the first plot. Every line's slope
can vary quite a bit for each prediction due to the noise
induced in the observations.
Ridge regression is basically minimizing a penalised version
of the least-squared function. The penalising `shrinks` the
value of the regression coefficients.
Despite the few data points in each dimension, the slope
of the prediction is much more stable and the variance
in the line itself is greatly reduced, in comparison to that
of the standard linear regression
"""
print(__doc__)
# Code source: Gaël Varoquaux
# Modified for documentation by Jaques Grobler
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
from sklearn import linear_model
X_train = np.c_[.5, 1].T
y_train = [.5, 1]
X_test = np.c_[0, 2].T
np.random.seed(0)
classifiers = dict(ols=linear_model.LinearRegression(),
ridge=linear_model.Ridge(alpha=.1))
fignum = 1
for name, clf in classifiers.items():
fig = plt.figure(fignum, figsize=(4, 3))
plt.clf()
plt.title(name)
ax = plt.axes([.12, .12, .8, .8])
for _ in range(6):
this_X = .1 * np.random.normal(size=(2, 1)) + X_train
clf.fit(this_X, y_train)
ax.plot(X_test, clf.predict(X_test), color='.5')
ax.scatter(this_X, y_train, s=3, c='.5', marker='o', zorder=10)
clf.fit(X_train, y_train)
ax.plot(X_test, clf.predict(X_test), linewidth=2, color='blue')
ax.scatter(X_train, y_train, s=30, c='r', marker='+', zorder=10)
ax.set_xticks(())
ax.set_yticks(())
ax.set_ylim((0, 1.6))
ax.set_xlabel('X')
ax.set_ylabel('y')
ax.set_xlim(0, 2)
fignum += 1
plt.show()
| bsd-3-clause |
gotomypc/scikit-learn | examples/linear_model/plot_sgd_comparison.py | 167 | 1659 | """
==================================
Comparing various online solvers
==================================
An example showing how different online solvers perform
on the hand-written digits dataset.
"""
# Author: Rob Zinkov <rob at zinkov dot com>
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
from sklearn import datasets
from sklearn.cross_validation import train_test_split
from sklearn.linear_model import SGDClassifier, Perceptron
from sklearn.linear_model import PassiveAggressiveClassifier
heldout = [0.95, 0.90, 0.75, 0.50, 0.01]
rounds = 20
digits = datasets.load_digits()
X, y = digits.data, digits.target
classifiers = [
("SGD", SGDClassifier()),
("ASGD", SGDClassifier(average=True)),
("Perceptron", Perceptron()),
("Passive-Aggressive I", PassiveAggressiveClassifier(loss='hinge',
C=1.0)),
("Passive-Aggressive II", PassiveAggressiveClassifier(loss='squared_hinge',
C=1.0)),
]
xx = 1. - np.array(heldout)
for name, clf in classifiers:
rng = np.random.RandomState(42)
yy = []
for i in heldout:
yy_ = []
for r in range(rounds):
X_train, X_test, y_train, y_test = \
train_test_split(X, y, test_size=i, random_state=rng)
clf.fit(X_train, y_train)
y_pred = clf.predict(X_test)
yy_.append(1 - np.mean(y_pred == y_test))
yy.append(np.mean(yy_))
plt.plot(xx, yy, label=name)
plt.legend(loc="upper right")
plt.xlabel("Proportion train")
plt.ylabel("Test Error Rate")
plt.show()
| bsd-3-clause |
fabianp/scikit-learn | examples/calibration/plot_calibration_curve.py | 225 | 5903 | """
==============================
Probability Calibration curves
==============================
When performing classification one often wants to predict not only the class
label, but also the associated probability. This probability gives some
kind of confidence on the prediction. This example demonstrates how to display
how well calibrated the predicted probabilities are and how to calibrate an
uncalibrated classifier.
The experiment is performed on an artificial dataset for binary classification
with 100.000 samples (1.000 of them are used for model fitting) with 20
features. Of the 20 features, only 2 are informative and 10 are redundant. The
first figure shows the estimated probabilities obtained with logistic
regression, Gaussian naive Bayes, and Gaussian naive Bayes with both isotonic
calibration and sigmoid calibration. The calibration performance is evaluated
with Brier score, reported in the legend (the smaller the better). One can
observe here that logistic regression is well calibrated while raw Gaussian
naive Bayes performs very badly. This is because of the redundant features
which violate the assumption of feature-independence and result in an overly
confident classifier, which is indicated by the typical transposed-sigmoid
curve.
Calibration of the probabilities of Gaussian naive Bayes with isotonic
regression can fix this issue as can be seen from the nearly diagonal
calibration curve. Sigmoid calibration also improves the brier score slightly,
albeit not as strongly as the non-parametric isotonic regression. This can be
attributed to the fact that we have plenty of calibration data such that the
greater flexibility of the non-parametric model can be exploited.
The second figure shows the calibration curve of a linear support-vector
classifier (LinearSVC). LinearSVC shows the opposite behavior as Gaussian
naive Bayes: the calibration curve has a sigmoid curve, which is typical for
an under-confident classifier. In the case of LinearSVC, this is caused by the
margin property of the hinge loss, which lets the model focus on hard samples
that are close to the decision boundary (the support vectors).
Both kinds of calibration can fix this issue and yield nearly identical
results. This shows that sigmoid calibration can deal with situations where
the calibration curve of the base classifier is sigmoid (e.g., for LinearSVC)
but not where it is transposed-sigmoid (e.g., Gaussian naive Bayes).
"""
print(__doc__)
# Author: Alexandre Gramfort <[email protected]>
# Jan Hendrik Metzen <[email protected]>
# License: BSD Style.
import matplotlib.pyplot as plt
from sklearn import datasets
from sklearn.naive_bayes import GaussianNB
from sklearn.svm import LinearSVC
from sklearn.linear_model import LogisticRegression
from sklearn.metrics import (brier_score_loss, precision_score, recall_score,
f1_score)
from sklearn.calibration import CalibratedClassifierCV, calibration_curve
from sklearn.cross_validation import train_test_split
# Create dataset of classification task with many redundant and few
# informative features
X, y = datasets.make_classification(n_samples=100000, n_features=20,
n_informative=2, n_redundant=10,
random_state=42)
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.99,
random_state=42)
def plot_calibration_curve(est, name, fig_index):
"""Plot calibration curve for est w/o and with calibration. """
# Calibrated with isotonic calibration
isotonic = CalibratedClassifierCV(est, cv=2, method='isotonic')
# Calibrated with sigmoid calibration
sigmoid = CalibratedClassifierCV(est, cv=2, method='sigmoid')
# Logistic regression with no calibration as baseline
lr = LogisticRegression(C=1., solver='lbfgs')
fig = plt.figure(fig_index, figsize=(10, 10))
ax1 = plt.subplot2grid((3, 1), (0, 0), rowspan=2)
ax2 = plt.subplot2grid((3, 1), (2, 0))
ax1.plot([0, 1], [0, 1], "k:", label="Perfectly calibrated")
for clf, name in [(lr, 'Logistic'),
(est, name),
(isotonic, name + ' + Isotonic'),
(sigmoid, name + ' + Sigmoid')]:
clf.fit(X_train, y_train)
y_pred = clf.predict(X_test)
if hasattr(clf, "predict_proba"):
prob_pos = clf.predict_proba(X_test)[:, 1]
else: # use decision function
prob_pos = clf.decision_function(X_test)
prob_pos = \
(prob_pos - prob_pos.min()) / (prob_pos.max() - prob_pos.min())
clf_score = brier_score_loss(y_test, prob_pos, pos_label=y.max())
print("%s:" % name)
print("\tBrier: %1.3f" % (clf_score))
print("\tPrecision: %1.3f" % precision_score(y_test, y_pred))
print("\tRecall: %1.3f" % recall_score(y_test, y_pred))
print("\tF1: %1.3f\n" % f1_score(y_test, y_pred))
fraction_of_positives, mean_predicted_value = \
calibration_curve(y_test, prob_pos, n_bins=10)
ax1.plot(mean_predicted_value, fraction_of_positives, "s-",
label="%s (%1.3f)" % (name, clf_score))
ax2.hist(prob_pos, range=(0, 1), bins=10, label=name,
histtype="step", lw=2)
ax1.set_ylabel("Fraction of positives")
ax1.set_ylim([-0.05, 1.05])
ax1.legend(loc="lower right")
ax1.set_title('Calibration plots (reliability curve)')
ax2.set_xlabel("Mean predicted value")
ax2.set_ylabel("Count")
ax2.legend(loc="upper center", ncol=2)
plt.tight_layout()
# Plot calibration cuve for Gaussian Naive Bayes
plot_calibration_curve(GaussianNB(), "Naive Bayes", 1)
# Plot calibration cuve for Linear SVC
plot_calibration_curve(LinearSVC(), "SVC", 2)
plt.show()
| bsd-3-clause |
arthurfait/HMM-3 | Gen_Synthetic_Data/work/preprocess.py | 1 | 5524 | #!/usr/bin/env python
import sys, os, cmd
import numpy as NUM
sys.path.append('../') #need to access GMMHMM directory and python libraries
from sklearn.decomposition import PCA, KernelPCA
from sklearn.decomposition import ProjectedGradientNMF
import GMMHMM.tr_obj as tr_obj
import GMMHMM.HMM_IO as HMM_IO
import GMMHMM.algo_HMM as algo_HMM
if len(sys.argv)<5:
print "usage :",sys.argv[0]," window size, train_dataset, test_dataset, profileset, pca_dataset output dir"
sys.exit(-1)
pset=open(sys.argv[2]).readlines()
win = int(sys.argv[1])
halwin=int(win/2)
# read all proteins from training set
trobjlist = []
pre_pca_lst = []
for protname in pset:
prot=open(sys.argv[4]+protname.strip()+".profile",'r').readlines()
seq=[]
true_path = []
seqtmp=[]
for line in prot:
v=line.split()
if (v[-1].lower() == 'f') or (v[-1].lower() == 'b'):
listain=[float(x) for x in v[:-1]]
slist=sum(listain)
if slist >0:
listain=[x/slist for x in listain]
else:
listain=[1.0/len(listain) for x in listain]
seqtmp.append(listain)
true_path.append(str(v[-1].lower())) #last element is the path label
#else:
# print "Training Prot: %s, label: %s"%(protname.strip(),v[-1].lower())
unif=[1.0/len(listain) for x in listain] #add uniformly distributed freq for extra half windows
for i in range(halwin): #insert the extra half windows at the termini
seqtmp.insert(0,unif)
seqtmp.append(unif)
for i in range(halwin,len(seqtmp)-halwin):
v=[]
for j in range(-halwin,halwin+1):
v+=seqtmp[i+j]
seq.append(v)
pre_pca_lst.append(v)
trobjlist.append(tr_obj.TR_OBJ(seq,labels=true_path,name=protname.strip()))
# transform all training set proteins using PCA, ensure a maximum number of sample feature is 200
#pca_trans = PCA(n_components='mle')
pca_trans = KernelPCA(n_components=200,kernel="rbf", gamma=10)
pca_lst = pca_trans.fit_transform(pre_pca_lst)
#if len(pca_trans.explained_variance_ratio_) > 200:
# pca_trans = PCA(n_components=200)
# pca_lst = pca_trans.fit_transform(pre_pca_lst)
#print "Variance ratio explained by each PCA component",pca_trans.explained_variance_ratio_
#print "Number of PCA components %s"%len(pca_trans.explained_variance_ratio_)
'''
# non-negative matrix factorization using projected gradient
nmf_trans = ProjectedGradientNMF(n_components=200)
nmf_lst = nmf_trans.fit_transform(pre_pca_lst)
pca_trans = nmf_trans
pca_lst = nmf_lst
# Frobenius norm of the matrix difference between the training data and the reconstructed
# data from the fit produced by the model. || X - WH ||_2
print "Reconstruction Error ",nmf_trans.reconstruction_err_
'''
# write newly PCA transformed input encodings to file, including original annotations
# the elements of pca_lst should be in the same positional ordering as trobjlist
cur_start = 0 #start index position for current protein sequence
for i in range(len(trobjlist)):
o = trobjlist[i]
cur_len = o.len
cur_seq_str = ""
cur_pca = pca_lst[cur_start:(cur_start+o.len)]
for j in range(o.len):
cur_seq_str += " ".join([str(e) for e in cur_pca[j]]) + " " + o.labels[j] + "\n"
fout = open(sys.argv[5] + o.name + ".profile",'w')
fout.write(cur_seq_str)
fout.close()
cur_start += o.len
# use the current PCA transformation to transfor the test data
pset=open(sys.argv[3]).readlines()
win = int(sys.argv[1])
halwin=int(win/2)
# read all proteins from training set
testobjlist = []
pre_pca_lst = []
for protname in pset:
prot=open(sys.argv[4]+protname.strip()+".profile",'r').readlines()
seq=[]
true_path = []
seqtmp=[]
for line in prot:
v=line.split()
if (v[-1].lower() == 'f') or (v[-1].lower() == 'b'):
listain=[float(x) for x in v[:-1]]
slist=sum(listain)
if slist >0:
listain=[x/slist for x in listain]
else:
listain=[1.0/len(listain) for x in listain]
seqtmp.append(listain)
true_path.append(str(v[-1].lower())) #last element is the path label
#else:
# print "Testing Prot: %s, label: %s"%(protname.strip(),v[-1].lower())
unif=[1.0/len(listain) for x in listain] #add uniformly distributed freq for extra half windows
for i in range(halwin): #insert the extra half windows at the termini
seqtmp.insert(0,unif)
seqtmp.append(unif)
for i in range(halwin,len(seqtmp)-halwin):
v=[]
for j in range(-halwin,halwin+1):
v+=seqtmp[i+j]
seq.append(v)
pre_pca_lst.append(v)
testobjlist.append(tr_obj.TR_OBJ(seq,labels=true_path,name=protname.strip()))
# transform all testing set proteins using PCA
pca_lst = pca_trans.transform(pre_pca_lst)
# write newly PCA transformed input encodings to file, including original annotations
# the elements of pca_lst should be in the same positional ordering as trobjlist
cur_start = 0 #start index position for current protein sequence
for i in range(len(testobjlist)):
o = testobjlist[i]
cur_len = o.len
cur_seq_str = ""
cur_pca = pca_lst[cur_start:(cur_start+o.len)]
for j in range(o.len):
cur_seq_str += " ".join([str(e) for e in cur_pca[j]]) + " " +o.labels[j] + "\n"
fout = open(sys.argv[5] + o.name + ".profile",'w')
fout.write(cur_seq_str)
fout.close()
cur_start += o.len | gpl-3.0 |
ebertti/nospam | exemplo/document_classification_20newsgroups.py | 1 | 9832 | # coding=utf-8
# Author: Peter Prettenhofer <[email protected]>
# Olivier Grisel <[email protected]>
# Mathieu Blondel <[email protected]>
# Lars Buitinck <[email protected]>
# License: BSD 3 clause
from __future__ import print_function
import logging
import numpy as np
from optparse import OptionParser
import sys
from time import time
import pylab as pl
from sklearn.datasets import fetch_20newsgroups
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.feature_extraction.text import HashingVectorizer
from sklearn.feature_selection import SelectKBest, chi2
from sklearn.linear_model import RidgeClassifier
from sklearn.svm import LinearSVC
from sklearn.linear_model import SGDClassifier
from sklearn.linear_model import Perceptron
from sklearn.linear_model import PassiveAggressiveClassifier
from sklearn.naive_bayes import BernoulliNB, MultinomialNB
from sklearn.neighbors import KNeighborsClassifier
from sklearn.neighbors import NearestCentroid
from sklearn.utils.extmath import density
from sklearn import metrics
# Display progress logs on stdout
from classificacao import Classificacao
logging.basicConfig(level=logging.INFO,
format='%(asctime)s %(levelname)s %(message)s')
# parse commandline arguments
op = OptionParser()
op.add_option("--report",
action="store_true", dest="print_report",
help="Print a detailed classification report.")
op.add_option("--chi2_select",
action="store", type="int", dest="select_chi2",
help="Select some number of features using a chi-squared test")
op.add_option("--confusion_matrix",
action="store_true", dest="print_cm",
help="Print the confusion matrix.")
op.add_option("--top10",
action="store_true", dest="print_top10",
help="Print ten most discriminative terms per class"
" for every classifier.")
op.add_option("--all_categories",
action="store_true", dest="all_categories",
help="Whether to use all categories or not.")
op.add_option("--use_hashing",
action="store_true",
help="Use a hashing vectorizer.")
op.add_option("--n_features",
action="store", type=int, default=2 ** 16,
help="n_features when using the hashing vectorizer.")
op.add_option("--filtered",
action="store_true",
help="Remove newsgroup information that is easily overfit: "
"headers, signatures, and quoting.")
(opts, args) = op.parse_args()
if len(args) > 0:
op.error("this script takes no arguments.")
sys.exit(1)
print(__doc__)
op.print_help()
print()
###############################################################################
# Load some categories from the training set
if opts.all_categories:
categories = None
else:
categories = [
'alt.atheism',
'talk.religion.misc',
'comp.graphics',
'sci.space',
]
if opts.filtered:
remove = ('headers', 'footers', 'quotes')
else:
remove = ()
print("Loading 20 newsgroups dataset for categories:")
print(categories if categories else "all")
"""
data_train = fetch_20newsgroups(subset='train', categories=categories,
shuffle=True, random_state=42,
remove=remove)
data_test = fetch_20newsgroups(subset='test', categories=categories,
shuffle=True, random_state=42,
remove=remove)
"""
data_train, data_test = Classificacao().rodar('it')
print('data loaded')
categories = data_train.target_names # for case categories == None
def size_mb(docs):
return sum(len(s.encode('utf-8')) for s in docs) / 1e6
data_train_size_mb = size_mb(data_train.data)
data_test_size_mb = size_mb(data_test.data)
print("%d documents - %0.3fMB (training set)" % (
len(data_train.data), data_train_size_mb))
print("%d documents - %0.3fMB (test set)" % (
len(data_test.data), data_test_size_mb))
print("%d categories" % len(categories))
print()
# split a training set and a test set
y_train, y_test = data_train.target, data_test.target
print("Extracting features from the training dataset using a sparse vectorizer")
t0 = time()
if opts.use_hashing:
vectorizer = HashingVectorizer(stop_words='english', non_negative=True,
n_features=opts.n_features)
X_train = vectorizer.transform(data_train.data)
else:
vectorizer = TfidfVectorizer(sublinear_tf=True, max_df=0.5,
stop_words='english')
X_train = vectorizer.fit_transform(data_train.data)
duration = time() - t0
print("done in %fs at %0.3fMB/s" % (duration, data_train_size_mb / duration))
print("n_samples: %d, n_features: %d" % X_train.shape)
print()
print("Extracting features from the test dataset using the same vectorizer")
t0 = time()
X_test = vectorizer.transform(data_test.data)
duration = time() - t0
print("done in %fs at %0.3fMB/s" % (duration, data_test_size_mb / duration))
print("n_samples: %d, n_features: %d" % X_test.shape)
print()
if opts.select_chi2:
print("Extracting %d best features by a chi-squared test" %
opts.select_chi2)
t0 = time()
ch2 = SelectKBest(chi2, k=opts.select_chi2)
X_train = ch2.fit_transform(X_train, y_train)
X_test = ch2.transform(X_test)
print("done in %fs" % (time() - t0))
print()
def trim(s):
"""Trim string to fit on terminal (assuming 80-column display)"""
return s if len(s) <= 80 else s[:77] + "..."
# mapping from integer feature name to original token string
if opts.use_hashing:
feature_names = None
else:
feature_names = np.asarray(vectorizer.get_feature_names())
###############################################################################
# Benchmark classifiers
def benchmark(clf):
print('_' * 80)
print("Training: ")
print(clf)
t0 = time()
clf.fit(X_train, y_train)
train_time = time() - t0
print("train time: %0.3fs" % train_time)
t0 = time()
pred = clf.predict(X_test)
test_time = time() - t0
print("test time: %0.3fs" % test_time)
score = metrics.f1_score(y_test, pred)
print("f1-score: %0.3f" % score)
if hasattr(clf, 'coef_'):
print("dimensionality: %d" % clf.coef_.shape[1])
print("density: %f" % density(clf.coef_))
if opts.print_top10 and feature_names is not None:
print("top 10 keywords per class:")
for i, category in enumerate(categories):
top10 = np.argsort(clf.coef_[i])[-10:]
print(trim("%s: %s"
% (category, " ".join(feature_names[top10]))))
print()
if opts.print_report:
print("classification report:")
print(metrics.classification_report(y_test, pred,
target_names=categories))
if opts.print_cm:
print("confusion matrix:")
print(metrics.confusion_matrix(y_test, pred))
print()
clf_descr = str(clf).split('(')[0]
return clf_descr, score, train_time, test_time
results = []
for clf, name in (
(RidgeClassifier(tol=1e-2, solver="lsqr"), "Ridge Classifier"),
(Perceptron(n_iter=50), "Perceptron"),
(PassiveAggressiveClassifier(n_iter=50), "Passive-Aggressive"),
(KNeighborsClassifier(n_neighbors=10), "kNN")):
print('=' * 80)
print(name)
results.append(benchmark(clf))
for penalty in ["l2", "l1"]:
print('=' * 80)
print("%s penalty" % penalty.upper())
# Train Liblinear model
results.append(benchmark(LinearSVC(loss='l2', penalty=penalty,
dual=False, tol=1e-3)))
# Train SGD model
results.append(benchmark(SGDClassifier(alpha=.0001, n_iter=50,
penalty=penalty)))
# Train SGD with Elastic Net penalty
print('=' * 80)
print("Elastic-Net penalty")
results.append(benchmark(SGDClassifier(alpha=.0001, n_iter=50,
penalty="elasticnet")))
# Train NearestCentroid without threshold
print('=' * 80)
print("NearestCentroid (aka Rocchio classifier)")
results.append(benchmark(NearestCentroid()))
# Train sparse Naive Bayes classifiers
print('=' * 80)
print("Naive Bayes")
results.append(benchmark(MultinomialNB(alpha=.01)))
results.append(benchmark(BernoulliNB(alpha=.01)))
class L1LinearSVC(LinearSVC):
def fit(self, X, y):
# The smaller C, the stronger the regularization.
# The more regularization, the more sparsity.
self.transformer_ = LinearSVC(penalty="l1",
dual=False, tol=1e-3)
X = self.transformer_.fit_transform(X, y)
return LinearSVC.fit(self, X, y)
def predict(self, X):
X = self.transformer_.transform(X)
return LinearSVC.predict(self, X)
print('=' * 80)
print("LinearSVC with L1-based feature selection")
results.append(benchmark(L1LinearSVC()))
# make some plots
indices = np.arange(len(results))
results = [[x[i] for x in results] for i in range(4)]
clf_names, score, training_time, test_time = results
training_time = np.array(training_time) / np.max(training_time)
test_time = np.array(test_time) / np.max(test_time)
pl.figure(figsize=(12,8))
pl.title("Score")
pl.barh(indices, score, .2, label="score", color='r')
pl.barh(indices + .3, training_time, .2, label="training time", color='g')
pl.barh(indices + .6, test_time, .2, label="test time", color='b')
pl.yticks(())
pl.legend(loc='best')
pl.subplots_adjust(left=.25)
pl.subplots_adjust(top=.95)
pl.subplots_adjust(bottom=.05)
for i, c in zip(indices, clf_names):
pl.text(-.3, i, c)
pl.show() | mit |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.