repo_name
stringlengths 6
112
| path
stringlengths 4
204
| copies
stringlengths 1
3
| size
stringlengths 4
6
| content
stringlengths 714
810k
| license
stringclasses 15
values |
---|---|---|---|---|---|
maps16/FComputacional1
|
Actividad10/Codigo/Animacion_Pendulo.py
|
1
|
4663
|
from numpy import sin, cos
import numpy as np
import matplotlib.pyplot as plt
import scipy.integrate as integrate
import matplotlib.animation as animation
class DoublePendulum:
"""Double Pendulum Class
init_state is [theta1, omega1, theta2, omega2] in degrees,
where theta1, omega1 is the angular position and velocity of the first
pendulum arm, and theta2, omega2 is that of the second pendulum arm
"""
def __init__(self,
init_state = [120, 0, 0, 0],
L1=1.0, # length of pendulum 1 in m
L2=0.0, # length of pendulum 2 in m
M1=1.0, # mass of pendulum 1 in kg
M2=1.0, # mass of pendulum 2 in kg
G=9.8, # acceleration due to gravity, in m/s^2
origin=(0, 0)):
self.init_state = np.asarray(init_state, dtype='float')
self.params = (L1, L2, M1, M2, G)
self.origin = origin
self.time_elapsed = 0
self.state = self.init_state * np.pi / 180.
def position(self):
"""compute the current x,y positions of the pendulum arms"""
(L1, L2, M1, M2, G) = self.params
x = np.cumsum([self.origin[0],
L1 * sin(self.state[0]),
L2 * sin(self.state[2])])
y = np.cumsum([self.origin[1],
-L1 * cos(self.state[0]),
-L2 * cos(self.state[2])])
return (x, y)
def energy(self):
"""compute the energy of the current state"""
(L1, L2, M1, M2, G) = self.params
x = np.cumsum([L1 * sin(self.state[0]),
L2 * sin(self.state[2])])
y = np.cumsum([-L1 * cos(self.state[0]),
-L2 * cos(self.state[2])])
vx = np.cumsum([L1 * self.state[1] * cos(self.state[0]),
L2 * self.state[3] * cos(self.state[2])])
vy = np.cumsum([L1 * self.state[1] * sin(self.state[0]),
L2 * self.state[3] * sin(self.state[2])])
U = G * (M1 * y[0] + M2 * y[1])
K = 0.5 * (M1 * np.dot(vx, vx) + M2 * np.dot(vy, vy))
return U + K
def dstate_dt(self, state, t):
"""compute the derivative of the given state"""
(M1, M2, L1, L2, G) = self.params
dydx = np.zeros_like(state)
dydx[0] = state[1]
dydx[2] = state[3]
cos_delta = cos(state[2] - state[0])
sin_delta = sin(state[2] - state[0])
den1 = (M1 + M2) * L1 - M2 * L1 * cos_delta * cos_delta
dydx[1] = (M2 * L1 * state[1] * state[1] * sin_delta * cos_delta
+ M2 * G * sin(state[2]) * cos_delta
+ M2 * L2 * state[3] * state[3] * sin_delta
- (M1 + M2) * G * sin(state[0])) / den1
den2 = (L2 / L1) * den1
dydx[3] = (-M2 * L2 * state[3] * state[3] * sin_delta * cos_delta
+ (M1 + M2) * G * sin(state[0]) * cos_delta
- (M1 + M2) * L1 * state[1] * state[1] * sin_delta
- (M1 + M2) * G * sin(state[2])) / den2
return dydx
def step(self, dt):
"""execute one time step of length dt and update state"""
self.state = integrate.odeint(self.dstate_dt, self.state, [0, dt])[1]
self.time_elapsed += dt
#------------------------------------------------------------
# set up initial state and global variables
pendulum = DoublePendulum([120.0, 0.0, 0.0, 0.0])
dt = 1./60. # 60 fps
#------------------------------------------------------------
# set up figure and animation
fig = plt.figure()
ax = fig.add_subplot(111, aspect='equal', autoscale_on=False,
xlim=(-2, 2), ylim=(-2, 2))
ax.grid()
line, = ax.plot([], [], 'o-', lw=2)
time_text = ax.text(0.02, 0.95, '', transform=ax.transAxes)
energy_text = ax.text(0.02, 0.90, '', transform=ax.transAxes)
def init():
#initialize animation
line.set_data([], [])
time_text.set_text('')
energy_text.set_text('')
return line, time_text, energy_text
def animate(i):
#perform animation step
global pendulum, dt
pendulum.step(dt)
line.set_data(*pendulum.position())
time_text.set_text('time = %.1f' % pendulum.time_elapsed)
#energy_text.set_text('energy = %.3f J' % pendulum.energy())
return line, time_text, energy_text
# choose the interval based on dt and the time to animate one step
from time import time
t0 = time()
animate(0)
t1 = time()
interval = 1000 * dt - (t1 - t0)
ani = animation.FuncAnimation(fig, animate, frames=300,
interval=interval, blit=True, init_func=init)
plt.show()
|
mit
|
fabianp/scikit-learn
|
sklearn/__check_build/__init__.py
|
345
|
1671
|
""" Module to give helpful messages to the user that did not
compile the scikit properly.
"""
import os
INPLACE_MSG = """
It appears that you are importing a local scikit-learn source tree. For
this, you need to have an inplace install. Maybe you are in the source
directory and you need to try from another location."""
STANDARD_MSG = """
If you have used an installer, please check that it is suited for your
Python version, your operating system and your platform."""
def raise_build_error(e):
# Raise a comprehensible error and list the contents of the
# directory to help debugging on the mailing list.
local_dir = os.path.split(__file__)[0]
msg = STANDARD_MSG
if local_dir == "sklearn/__check_build":
# Picking up the local install: this will work only if the
# install is an 'inplace build'
msg = INPLACE_MSG
dir_content = list()
for i, filename in enumerate(os.listdir(local_dir)):
if ((i + 1) % 3):
dir_content.append(filename.ljust(26))
else:
dir_content.append(filename + '\n')
raise ImportError("""%s
___________________________________________________________________________
Contents of %s:
%s
___________________________________________________________________________
It seems that scikit-learn has not been built correctly.
If you have installed scikit-learn from source, please do not forget
to build the package before using it: run `python setup.py install` or
`make` in the source directory.
%s""" % (e, local_dir, ''.join(dir_content).strip(), msg))
try:
from ._check_build import check_build
except ImportError as e:
raise_build_error(e)
|
bsd-3-clause
|
chenyyx/scikit-learn-doc-zh
|
examples/zh/decomposition/plot_ica_vs_pca.py
|
59
|
3329
|
"""
==========================
FastICA on 2D point clouds
==========================
This example illustrates visually in the feature space a comparison by
results using two different component analysis techniques.
:ref:`ICA` vs :ref:`PCA`.
Representing ICA in the feature space gives the view of 'geometric ICA':
ICA is an algorithm that finds directions in the feature space
corresponding to projections with high non-Gaussianity. These directions
need not be orthogonal in the original feature space, but they are
orthogonal in the whitened feature space, in which all directions
correspond to the same variance.
PCA, on the other hand, finds orthogonal directions in the raw feature
space that correspond to directions accounting for maximum variance.
Here we simulate independent sources using a highly non-Gaussian
process, 2 student T with a low number of degrees of freedom (top left
figure). We mix them to create observations (top right figure).
In this raw observation space, directions identified by PCA are
represented by orange vectors. We represent the signal in the PCA space,
after whitening by the variance corresponding to the PCA vectors (lower
left). Running ICA corresponds to finding a rotation in this space to
identify the directions of largest non-Gaussianity (lower right).
"""
print(__doc__)
# Authors: Alexandre Gramfort, Gael Varoquaux
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
from sklearn.decomposition import PCA, FastICA
# #############################################################################
# Generate sample data
rng = np.random.RandomState(42)
S = rng.standard_t(1.5, size=(20000, 2))
S[:, 0] *= 2.
# Mix data
A = np.array([[1, 1], [0, 2]]) # Mixing matrix
X = np.dot(S, A.T) # Generate observations
pca = PCA()
S_pca_ = pca.fit(X).transform(X)
ica = FastICA(random_state=rng)
S_ica_ = ica.fit(X).transform(X) # Estimate the sources
S_ica_ /= S_ica_.std(axis=0)
# #############################################################################
# Plot results
def plot_samples(S, axis_list=None):
plt.scatter(S[:, 0], S[:, 1], s=2, marker='o', zorder=10,
color='steelblue', alpha=0.5)
if axis_list is not None:
colors = ['orange', 'red']
for color, axis in zip(colors, axis_list):
axis /= axis.std()
x_axis, y_axis = axis
# Trick to get legend to work
plt.plot(0.1 * x_axis, 0.1 * y_axis, linewidth=2, color=color)
plt.quiver(0, 0, x_axis, y_axis, zorder=11, width=0.01, scale=6,
color=color)
plt.hlines(0, -3, 3)
plt.vlines(0, -3, 3)
plt.xlim(-3, 3)
plt.ylim(-3, 3)
plt.xlabel('x')
plt.ylabel('y')
plt.figure()
plt.subplot(2, 2, 1)
plot_samples(S / S.std())
plt.title('True Independent Sources')
axis_list = [pca.components_.T, ica.mixing_]
plt.subplot(2, 2, 2)
plot_samples(X / np.std(X), axis_list=axis_list)
legend = plt.legend(['PCA', 'ICA'], loc='upper right')
legend.set_zorder(100)
plt.title('Observations')
plt.subplot(2, 2, 3)
plot_samples(S_pca_ / np.std(S_pca_, axis=0))
plt.title('PCA recovered signals')
plt.subplot(2, 2, 4)
plot_samples(S_ica_ / np.std(S_ica_))
plt.title('ICA recovered signals')
plt.subplots_adjust(0.09, 0.04, 0.94, 0.94, 0.26, 0.36)
plt.show()
|
gpl-3.0
|
equialgo/scikit-learn
|
sklearn/covariance/tests/test_robust_covariance.py
|
28
|
3792
|
# Author: Alexandre Gramfort <[email protected]>
# Gael Varoquaux <[email protected]>
# Virgile Fritsch <[email protected]>
#
# License: BSD 3 clause
import numpy as np
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_raise_message
from sklearn.exceptions import NotFittedError
from sklearn import datasets
from sklearn.covariance import empirical_covariance, MinCovDet, \
EllipticEnvelope
from sklearn.covariance import fast_mcd
X = datasets.load_iris().data
X_1d = X[:, 0]
n_samples, n_features = X.shape
def test_mcd():
# Tests the FastMCD algorithm implementation
# Small data set
# test without outliers (random independent normal data)
launch_mcd_on_dataset(100, 5, 0, 0.01, 0.1, 80)
# test with a contaminated data set (medium contamination)
launch_mcd_on_dataset(100, 5, 20, 0.01, 0.01, 70)
# test with a contaminated data set (strong contamination)
launch_mcd_on_dataset(100, 5, 40, 0.1, 0.1, 50)
# Medium data set
launch_mcd_on_dataset(1000, 5, 450, 0.1, 0.1, 540)
# Large data set
launch_mcd_on_dataset(1700, 5, 800, 0.1, 0.1, 870)
# 1D data set
launch_mcd_on_dataset(500, 1, 100, 0.001, 0.001, 350)
def test_fast_mcd_on_invalid_input():
X = np.arange(100)
assert_raise_message(ValueError, 'Got X with X.ndim=1',
fast_mcd, X)
def test_mcd_class_on_invalid_input():
X = np.arange(100)
mcd = MinCovDet()
assert_raise_message(ValueError, 'Got X with X.ndim=1',
mcd.fit, X)
def launch_mcd_on_dataset(n_samples, n_features, n_outliers, tol_loc, tol_cov,
tol_support):
rand_gen = np.random.RandomState(0)
data = rand_gen.randn(n_samples, n_features)
# add some outliers
outliers_index = rand_gen.permutation(n_samples)[:n_outliers]
outliers_offset = 10. * \
(rand_gen.randint(2, size=(n_outliers, n_features)) - 0.5)
data[outliers_index] += outliers_offset
inliers_mask = np.ones(n_samples).astype(bool)
inliers_mask[outliers_index] = False
pure_data = data[inliers_mask]
# compute MCD by fitting an object
mcd_fit = MinCovDet(random_state=rand_gen).fit(data)
T = mcd_fit.location_
S = mcd_fit.covariance_
H = mcd_fit.support_
# compare with the estimates learnt from the inliers
error_location = np.mean((pure_data.mean(0) - T) ** 2)
assert(error_location < tol_loc)
error_cov = np.mean((empirical_covariance(pure_data) - S) ** 2)
assert(error_cov < tol_cov)
assert(np.sum(H) >= tol_support)
assert_array_almost_equal(mcd_fit.mahalanobis(data), mcd_fit.dist_)
def test_mcd_issue1127():
# Check that the code does not break with X.shape = (3, 1)
# (i.e. n_support = n_samples)
rnd = np.random.RandomState(0)
X = rnd.normal(size=(3, 1))
mcd = MinCovDet()
mcd.fit(X)
def test_outlier_detection():
rnd = np.random.RandomState(0)
X = rnd.randn(100, 10)
clf = EllipticEnvelope(contamination=0.1)
assert_raises(NotFittedError, clf.predict, X)
assert_raises(NotFittedError, clf.decision_function, X)
clf.fit(X)
y_pred = clf.predict(X)
decision = clf.decision_function(X, raw_values=True)
decision_transformed = clf.decision_function(X, raw_values=False)
assert_array_almost_equal(
decision, clf.mahalanobis(X))
assert_array_almost_equal(clf.mahalanobis(X), clf.dist_)
assert_almost_equal(clf.score(X, np.ones(100)),
(100 - y_pred[y_pred == -1].size) / 100.)
assert(sum(y_pred == -1) == sum(decision_transformed < 0))
|
bsd-3-clause
|
tdhopper/scikit-learn
|
examples/gaussian_process/plot_gp_probabilistic_classification_after_regression.py
|
252
|
3490
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
"""
==============================================================================
Gaussian Processes classification example: exploiting the probabilistic output
==============================================================================
A two-dimensional regression exercise with a post-processing allowing for
probabilistic classification thanks to the Gaussian property of the prediction.
The figure illustrates the probability that the prediction is negative with
respect to the remaining uncertainty in the prediction. The red and blue lines
corresponds to the 95% confidence interval on the prediction of the zero level
set.
"""
print(__doc__)
# Author: Vincent Dubourg <[email protected]>
# Licence: BSD 3 clause
import numpy as np
from scipy import stats
from sklearn.gaussian_process import GaussianProcess
from matplotlib import pyplot as pl
from matplotlib import cm
# Standard normal distribution functions
phi = stats.distributions.norm().pdf
PHI = stats.distributions.norm().cdf
PHIinv = stats.distributions.norm().ppf
# A few constants
lim = 8
def g(x):
"""The function to predict (classification will then consist in predicting
whether g(x) <= 0 or not)"""
return 5. - x[:, 1] - .5 * x[:, 0] ** 2.
# Design of experiments
X = np.array([[-4.61611719, -6.00099547],
[4.10469096, 5.32782448],
[0.00000000, -0.50000000],
[-6.17289014, -4.6984743],
[1.3109306, -6.93271427],
[-5.03823144, 3.10584743],
[-2.87600388, 6.74310541],
[5.21301203, 4.26386883]])
# Observations
y = g(X)
# Instanciate and fit Gaussian Process Model
gp = GaussianProcess(theta0=5e-1)
# Don't perform MLE or you'll get a perfect prediction for this simple example!
gp.fit(X, y)
# Evaluate real function, the prediction and its MSE on a grid
res = 50
x1, x2 = np.meshgrid(np.linspace(- lim, lim, res),
np.linspace(- lim, lim, res))
xx = np.vstack([x1.reshape(x1.size), x2.reshape(x2.size)]).T
y_true = g(xx)
y_pred, MSE = gp.predict(xx, eval_MSE=True)
sigma = np.sqrt(MSE)
y_true = y_true.reshape((res, res))
y_pred = y_pred.reshape((res, res))
sigma = sigma.reshape((res, res))
k = PHIinv(.975)
# Plot the probabilistic classification iso-values using the Gaussian property
# of the prediction
fig = pl.figure(1)
ax = fig.add_subplot(111)
ax.axes.set_aspect('equal')
pl.xticks([])
pl.yticks([])
ax.set_xticklabels([])
ax.set_yticklabels([])
pl.xlabel('$x_1$')
pl.ylabel('$x_2$')
cax = pl.imshow(np.flipud(PHI(- y_pred / sigma)), cmap=cm.gray_r, alpha=0.8,
extent=(- lim, lim, - lim, lim))
norm = pl.matplotlib.colors.Normalize(vmin=0., vmax=0.9)
cb = pl.colorbar(cax, ticks=[0., 0.2, 0.4, 0.6, 0.8, 1.], norm=norm)
cb.set_label('${\\rm \mathbb{P}}\left[\widehat{G}(\mathbf{x}) \leq 0\\right]$')
pl.plot(X[y <= 0, 0], X[y <= 0, 1], 'r.', markersize=12)
pl.plot(X[y > 0, 0], X[y > 0, 1], 'b.', markersize=12)
cs = pl.contour(x1, x2, y_true, [0.], colors='k', linestyles='dashdot')
cs = pl.contour(x1, x2, PHI(- y_pred / sigma), [0.025], colors='b',
linestyles='solid')
pl.clabel(cs, fontsize=11)
cs = pl.contour(x1, x2, PHI(- y_pred / sigma), [0.5], colors='k',
linestyles='dashed')
pl.clabel(cs, fontsize=11)
cs = pl.contour(x1, x2, PHI(- y_pred / sigma), [0.975], colors='r',
linestyles='solid')
pl.clabel(cs, fontsize=11)
pl.show()
|
bsd-3-clause
|
lin-credible/scikit-learn
|
examples/applications/plot_model_complexity_influence.py
|
323
|
6372
|
"""
==========================
Model Complexity Influence
==========================
Demonstrate how model complexity influences both prediction accuracy and
computational performance.
The dataset is the Boston Housing dataset (resp. 20 Newsgroups) for
regression (resp. classification).
For each class of models we make the model complexity vary through the choice
of relevant model parameters and measure the influence on both computational
performance (latency) and predictive power (MSE or Hamming Loss).
"""
print(__doc__)
# Author: Eustache Diemert <[email protected]>
# License: BSD 3 clause
import time
import numpy as np
import matplotlib.pyplot as plt
from mpl_toolkits.axes_grid1.parasite_axes import host_subplot
from mpl_toolkits.axisartist.axislines import Axes
from scipy.sparse.csr import csr_matrix
from sklearn import datasets
from sklearn.utils import shuffle
from sklearn.metrics import mean_squared_error
from sklearn.svm.classes import NuSVR
from sklearn.ensemble.gradient_boosting import GradientBoostingRegressor
from sklearn.linear_model.stochastic_gradient import SGDClassifier
from sklearn.metrics import hamming_loss
###############################################################################
# Routines
# initialize random generator
np.random.seed(0)
def generate_data(case, sparse=False):
"""Generate regression/classification data."""
bunch = None
if case == 'regression':
bunch = datasets.load_boston()
elif case == 'classification':
bunch = datasets.fetch_20newsgroups_vectorized(subset='all')
X, y = shuffle(bunch.data, bunch.target)
offset = int(X.shape[0] * 0.8)
X_train, y_train = X[:offset], y[:offset]
X_test, y_test = X[offset:], y[offset:]
if sparse:
X_train = csr_matrix(X_train)
X_test = csr_matrix(X_test)
else:
X_train = np.array(X_train)
X_test = np.array(X_test)
y_test = np.array(y_test)
y_train = np.array(y_train)
data = {'X_train': X_train, 'X_test': X_test, 'y_train': y_train,
'y_test': y_test}
return data
def benchmark_influence(conf):
"""
Benchmark influence of :changing_param: on both MSE and latency.
"""
prediction_times = []
prediction_powers = []
complexities = []
for param_value in conf['changing_param_values']:
conf['tuned_params'][conf['changing_param']] = param_value
estimator = conf['estimator'](**conf['tuned_params'])
print("Benchmarking %s" % estimator)
estimator.fit(conf['data']['X_train'], conf['data']['y_train'])
conf['postfit_hook'](estimator)
complexity = conf['complexity_computer'](estimator)
complexities.append(complexity)
start_time = time.time()
for _ in range(conf['n_samples']):
y_pred = estimator.predict(conf['data']['X_test'])
elapsed_time = (time.time() - start_time) / float(conf['n_samples'])
prediction_times.append(elapsed_time)
pred_score = conf['prediction_performance_computer'](
conf['data']['y_test'], y_pred)
prediction_powers.append(pred_score)
print("Complexity: %d | %s: %.4f | Pred. Time: %fs\n" % (
complexity, conf['prediction_performance_label'], pred_score,
elapsed_time))
return prediction_powers, prediction_times, complexities
def plot_influence(conf, mse_values, prediction_times, complexities):
"""
Plot influence of model complexity on both accuracy and latency.
"""
plt.figure(figsize=(12, 6))
host = host_subplot(111, axes_class=Axes)
plt.subplots_adjust(right=0.75)
par1 = host.twinx()
host.set_xlabel('Model Complexity (%s)' % conf['complexity_label'])
y1_label = conf['prediction_performance_label']
y2_label = "Time (s)"
host.set_ylabel(y1_label)
par1.set_ylabel(y2_label)
p1, = host.plot(complexities, mse_values, 'b-', label="prediction error")
p2, = par1.plot(complexities, prediction_times, 'r-',
label="latency")
host.legend(loc='upper right')
host.axis["left"].label.set_color(p1.get_color())
par1.axis["right"].label.set_color(p2.get_color())
plt.title('Influence of Model Complexity - %s' % conf['estimator'].__name__)
plt.show()
def _count_nonzero_coefficients(estimator):
a = estimator.coef_.toarray()
return np.count_nonzero(a)
###############################################################################
# main code
regression_data = generate_data('regression')
classification_data = generate_data('classification', sparse=True)
configurations = [
{'estimator': SGDClassifier,
'tuned_params': {'penalty': 'elasticnet', 'alpha': 0.001, 'loss':
'modified_huber', 'fit_intercept': True},
'changing_param': 'l1_ratio',
'changing_param_values': [0.25, 0.5, 0.75, 0.9],
'complexity_label': 'non_zero coefficients',
'complexity_computer': _count_nonzero_coefficients,
'prediction_performance_computer': hamming_loss,
'prediction_performance_label': 'Hamming Loss (Misclassification Ratio)',
'postfit_hook': lambda x: x.sparsify(),
'data': classification_data,
'n_samples': 30},
{'estimator': NuSVR,
'tuned_params': {'C': 1e3, 'gamma': 2 ** -15},
'changing_param': 'nu',
'changing_param_values': [0.1, 0.25, 0.5, 0.75, 0.9],
'complexity_label': 'n_support_vectors',
'complexity_computer': lambda x: len(x.support_vectors_),
'data': regression_data,
'postfit_hook': lambda x: x,
'prediction_performance_computer': mean_squared_error,
'prediction_performance_label': 'MSE',
'n_samples': 30},
{'estimator': GradientBoostingRegressor,
'tuned_params': {'loss': 'ls'},
'changing_param': 'n_estimators',
'changing_param_values': [10, 50, 100, 200, 500],
'complexity_label': 'n_trees',
'complexity_computer': lambda x: x.n_estimators,
'data': regression_data,
'postfit_hook': lambda x: x,
'prediction_performance_computer': mean_squared_error,
'prediction_performance_label': 'MSE',
'n_samples': 30},
]
for conf in configurations:
prediction_performances, prediction_times, complexities = \
benchmark_influence(conf)
plot_influence(conf, prediction_performances, prediction_times,
complexities)
|
bsd-3-clause
|
Knifa/Glasgow-Baxter
|
src/glasgow_baxter/scripts/block_stacker/understanding.py
|
1
|
5987
|
#!/usr/bin/env python
import rospy
from glasgow_baxter_helpers import BaxterNode
from glasgow_baxter.msg import DetectedSquares, TrackedSquares
from square import Square, TrackedSquare
import numpy as np
from scipy.spatial import distance
from sklearn.cluster import MeanShift, AffinityPropagation, DBSCAN, estimate_bandwidth
import random
import itertools
import collections
####################################################################################################
class UnderstandingNode(BaxterNode):
def __init__(self):
super(UnderstandingNode, self).__init__()
self._squares_sub = rospy.Subscriber(
'/squares',
DetectedSquares,
self.on_squaremsg_received)
self._squares_pub = rospy.Publisher(
'/tracked_squares',
TrackedSquares,
tcp_nodelay=True)
self._prev_squares = collections.deque(maxlen=20)
############################################################################
def start(self):
super(UnderstandingNode, self).start(spin=True)
############################################################################
def on_squaremsg_received(self, msg):
detected_squares = []
for square_msg in msg.squares:
detected_squares.append(TrackedSquare.from_msg(square_msg))
self._prev_squares.append(detected_squares)
all_squares = list(itertools.chain.from_iterable(self._prev_squares))
square_centers = [list(s.center) + [s.hue] for s in all_squares]
data = np.array(square_centers)
ms = DBSCAN(eps=64, min_samples=3)
ms.fit(data)
labels = ms.labels_
ts_msg = TrackedSquares()
for i, s in enumerate(all_squares):
label = np.int0(labels[i])
if label < 0:
continue
s.tracking_colour = TrackedSquare.TRACKING_COLOURS[label % len(TrackedSquare.TRACKING_COLOURS)]
s.tracking_detected = True
ts_msg.squares.append(s.to_msg())
self._squares_pub.publish(ts_msg)
############################################################################
def _track_squares(self, detected_squares):
if self._tracked_squares is None:
self._tracked_squares = dict(map(lambda s: (s.tracking_id, s), detected_squares))
return
min_squares = self._match_min_squares(detected_squares, self._tracked_squares.values())
# Update tracking with the new square.
for ds, ts in min_squares.items():
self._tracked_squares[ts.tracking_id] = ds
ds.tracking_detected = True
ds.tracking_id = ts.tracking_id
ds.tracking_colour = ts.tracking_colour
# Mark any untracked squares this frame as inactive.
ts_msg = TrackedSquares()
for ts in self._tracked_squares.values():
if not ts in min_squares.keys():
ts.tracking_detected = False
self._publish_tracked_squares()
############################################################################
def _build_distance_matrix(self, detected_squares, tracked_squares):
# Calculate distances between tracked squares and new squares.
distance_matrix = {}
for ds in detected_squares:
distance_matrix[ds] = {}
for ts in tracked_squares:
distance_matrix[ds][ts] = distance.minkowski(ds.center, ts.center, 128)
return distance_matrix
def _sort_squares_by_distance_matrix(self, distance_matrix):
# Calculate distances between tracked squares and new squares.
sorted_squares = {}
for ds in distance_matrix.keys():
sorted_squares[ds] = collections.deque(
sorted(distance_matrix[ds].keys(), key=lambda ts: distance_matrix[ds][ts]))
return sorted_squares
def _match_min_squares(self, detected_squares, tracked_squares):
distance_matrix = self._build_distance_matrix(detected_squares,
self._tracked_squares.values())
sorted_squares = self._sort_squares_by_distance_matrix(distance_matrix)
min_squares_ts_to_ds = {}
min_squares_ds_to_ts = {}
need_matched = collections.deque(distance_matrix.keys())
while len(need_matched) > 0:
ds = need_matched.popleft()
# Leave it unmatched if there's nothing else.
if not len(sorted_squares[ds]) > 0:
continue
min_ts = sorted_squares[ds].popleft()
if not min_ts in min_squares_ts_to_ds:
# Tracked square is unmatched, so match it right away.
min_squares_ds_to_ts[ds] = min_ts
min_squares_ts_to_ds[min_ts] = ds
else:
# Closest tracked square has already been tracked.
ds2 = min_squares_ts_to_ds[min_ts]
# Check which one is closest.
if distance_matrix[ds][min_ts] < distance_matrix[ds2][min_ts]:
# This one is closest, so remap.
min_squares_ds_to_ts[ds] = min_ts
min_squares_ts_to_ds[min_ts] = ds
del min_squares_ds_to_ts[ds2]
need_matched.append(ds2)
else:
# Otherwise, try again later.
need_matched.append(ds)
return min_squares_ds_to_ts
def _publish_tracked_squares(self):
# Output all detected squares.
ts_msg = TrackedSquares()
for s in self._tracked_squares.values():
ts_msg.squares.append(s.to_msg())
self._squares_pub.publish(ts_msg)
####################################################################################################
def main():
rospy.init_node('understanding', anonymous=True)
node = UnderstandingNode()
node.start()
if __name__ == '__main__':
main()
|
gpl-2.0
|
lqhuang/SAXS-tools
|
scripts/animate_data_frames.py
|
1
|
8255
|
from __future__ import print_function, division
import os
import sys
import glob
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.animation as animation
from mpl_toolkits.axes_grid1.axes_divider import make_axes_locatable
from PIL import Image
ROOT_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
if ROOT_DIR not in sys.path:
sys.path.append(ROOT_DIR)
from RAW import RAWSimulator
def get_boxcenter(array_shape, center, radius=100):
box_center = (np.minimum(curr_center, radius, dtype=int)
for curr_center in zip(center))
return np.vstack(box_center).flatten()
def get_boxsize(array_shape, center, radius=100):
if len(center) != len(array_shape):
raise ValueError(
'Length of center must be the same with dimension of array')
size = (np.minimum(curr_center + radius, max_len, dtype=int) -
np.maximum(curr_center - radius, 0, dtype=int)
for curr_center, max_len in zip(center, array_shape))
return tuple(size)
def boxslice(array, center, radius=100):
"""Slice a box with given radius from ndim array and return a view.
Please notice the size of return is uncertain, which depends on boundary.
Parameters
----------
array : array_like
Input array.
center : tuple of int
Center in array to boxing. For 2D array, it's (row_center, col_center).
Length must be the same with dimension of array.
Returns
-------
out : array_like
A view of `array` with given box range.
"""
if len(center) != array.ndim:
raise ValueError(
'Length of center must be the same with dimension of array')
slicer = [
slice(
np.maximum(curr_center - radius, 0, dtype=int),
np.minimum(curr_center + radius, max_len, dtype=int),
) for curr_center, max_len in zip(center, array.shape)
]
return array[slicer]
def subtract_radial_average(img, center, mask=None):
"""Let image subtract its radial average matrix.
Parameters
----------
img : numpy.ndarray
2D matrix of input image
center : tuple of int
center of image
mask : numpy.ndarray, optional
mask for image. 1 means valid area, 0 means masked area.
(the default is None, which is no mask.)
Returns
-------
numpy.ndarray
return residual image.
"""
assert img.ndim == 2, 'Wrong dimension for image.'
assert len(center) == 2, 'Wrong dimension for center.'
if mask is not None:
masked_img = img * mask
else:
masked_img = img
center = np.round(center)
meshgrids = np.indices(img.shape) # return (xx, yy)
# eq: r = sqrt( (x - x_center)**2 + (y - y_center)**2 + (z - z_center)**2 )
r = np.sqrt(sum(((grid - c)**2 for grid, c in zip(meshgrids, center))))
r = np.round(r).astype(np.int)
total_bin = np.bincount(r.ravel(), masked_img.ravel())
nr = np.bincount(r.ravel()) # count for each r
if mask is not None:
r_mask = np.zeros(r.shape)
r_mask[np.where(mask == 0.0)] = 1
nr_mask = np.bincount(r.ravel(), r_mask.ravel())
nr = nr - nr_mask
radialprofile = np.zeros_like(nr)
# r_pixel = np.unique(r.ravel()) # sorted
nomaskr = np.where(nr > 0)
radialprofile[nomaskr] = total_bin[nomaskr] / nr[nomaskr]
if mask is None:
residual_img = masked_img - radialprofile[r] # subtract mean matrix
else:
residual_img = masked_img - radialprofile[r] * mask
return residual_img
def animate_frames(framefiles,
mask,
image_dim,
center,
radius=150,
subtract_average=False,
vmin=0,
vmax=400,
show=False,
save_to_video=True,
animation_name=None):
"""animate sas data frames
"""
boxshape = get_boxsize(image_dim, center, radius)
# stack_shape: (num_images, row, col)
stack_shape = [len(framefiles)] + list(boxshape)
image_stack = np.zeros(stack_shape, dtype=float)
boxed_mask = boxslice(mask, center, radius)
box_center = get_boxcenter(image_dim, center, radius)
for i, filename in enumerate(framefiles):
with Image.open(filename) as tiff:
boxed_image = boxslice(
np.fliplr(np.asarray(tiff, dtype=float)), center,
radius) * boxed_mask
if subtract_average:
boxed_image = subtract_radial_average(boxed_image, box_center,
boxed_mask)
image_stack[i, :, :] = boxed_image
fig, ax = plt.subplots()
ax_divider = make_axes_locatable(ax)
cax = ax_divider.append_axes('right', size='7%', pad='2%')
if subtract_average:
im = ax.imshow(image_stack[0], cmap='jet', animated=True)
else:
im = ax.imshow(
image_stack[0], cmap='jet', vmin=vmin, vmax=vmax, animated=True)
title = ax.set_title('current frame: {}'.format(str(1).zfill(3)))
cb = fig.colorbar(im, cax=cax)
fig.tight_layout()
def update_im(fr):
im.set_data(image_stack[fr])
cb.set_array(image_stack[fr])
cb.autoscale()
title.set_text('current frame: {}'.format(str(fr + 1).zfill(3)))
cb.draw_all()
fig.canvas.draw_idle()
# return a sequence of artists, not a single artist
return im,
# call the animator. blit=True means only re-draw the parts that have changed.
# interval: Delay between frames in milliseconds. Defaults to 200.
# Additional arguments to pass to each call to func
# http://matplotlib.org/api/_as_gen/matplotlib.animation.FuncAnimation.html
# anim = animation.ArtistAnimation(fig, ims, interval=100, blit=True)
frames_iter = range(len(framefiles))
anim = animation.FuncAnimation(
fig, update_im, frames_iter, interval=500, blit=True, repeat=False)
if save_to_video:
# save the animation as an mp4. This requires ffmpeg or mencoder to be
# installed. The extra_args ensure that the x264 codec is used, so that
# the video can be embedded in html5. You may need to adjust this for
# your system: for more information, see
# http://matplotlib.sourceforge.net/api/animation_api.html
if animation_name is None:
animation_name = 'animation.mp4'
elif not animation_name.endswith('.mp4'):
animation_name += '.mp4'
anim.save(animation_name, fps=4, extra_args=['-vcodec', 'libx264'])
if show:
plt.show()
def gen_animation(raw_settings, image_filenames, animation_name='./animation'):
x_center = int(raw_settings.get('Xcenter'))
y_center = int(raw_settings.get('Ycenter'))
image_dim = tuple(int(v) for v in raw_settings.get('MaskDimension'))
col_center = x_center
row_center = image_dim[0] - y_center
center = [row_center, col_center]
mask = raw_settings.get('BeamStopMask')
if mask is None:
mask = raw_settings.get('Masks')['BeamStopMask']
if not image_filenames:
raise FileNotFoundError('No image files found.')
animate_frames(
image_filenames,
mask,
image_dim,
center,
vmax=70,
save_to_video=True,
# show=True,
animation_name=animation_name)
def main():
image_directory = sys.argv[1]
raw_cfg_path = sys.argv[2]
raw_simulator = RAWSimulator(raw_cfg_path)
raw_settings = raw_simulator.get_raw_settings()
image_format = '.tif'
image_filenames = sorted(
glob.glob(os.path.join(image_directory, '*' + image_format)))
for filename in reversed(image_filenames):
if 'buffer' in filename:
image_filenames.remove(filename)
root = os.path.dirname(image_directory)
root_name = os.path.basename(root)
animation_name = os.path.join(root, 'Figures',
root_name + '_dynamic_video')
os.makedirs(os.path.join(root, 'Figures'), exist_ok=True)
gen_animation(raw_settings, image_filenames, animation_name)
if __name__ == '__main__':
main()
|
gpl-3.0
|
vlad17/spark-sklearn
|
python/spark_sklearn/tests/test_grid_search_1.py
|
1
|
2453
|
import unittest
import sklearn.grid_search
from spark_sklearn import GridSearchCV
from spark_sklearn.test_utils import fixtureReuseSparkSession
# Overwrite the sklearn GridSearch in this suite so that we can run the same tests with the same
# parameters.
@fixtureReuseSparkSession
class AllTests(unittest.TestCase):
# After testing, make sure to revert sklearn to normal (see _add_to_module())
@classmethod
def tearDownClass(cls):
super(AllTests, cls).tearDownClass()
# Restore sklearn module to the original state after done testing this fixture.
sklearn.grid_search.GridSearchCV = sklearn.grid_search.GridSearchCV_original
del sklearn.grid_search.GridSearchCV_original
class SPGridSearchWrapper(GridSearchCV):
def __init__(self, estimator, param_grid, scoring=None, fit_params=None,
n_jobs=1, iid=True, refit=True, cv=None, verbose=0,
pre_dispatch='2*n_jobs', error_score='raise'):
super(SPGridSearchWrapper, self).__init__(AllTests.spark.sparkContext, estimator, param_grid,
scoring, fit_params, n_jobs, iid, refit, cv,
verbose, pre_dispatch, error_score)
# These methods do not raise ValueError but something different
_blacklist = set(['test_pickle',
'test_grid_search_precomputed_kernel_error_nonsquare',
'test_grid_search_precomputed_kernel_error_kernel_function',
'test_grid_search_precomputed_kernel',
'test_grid_search_failing_classifier_raise',
'test_grid_search_failing_classifier']) # This one we should investigate
def _create_method(method):
def do_test_expected(*kwargs):
method()
return do_test_expected
def _add_to_module():
SKGridSearchCV = sklearn.grid_search.GridSearchCV
sklearn.grid_search.GridSearchCV = SPGridSearchWrapper
sklearn.grid_search.GridSearchCV_original = SKGridSearchCV
from sklearn.tests import test_grid_search
all_methods = [(mname, method) for (mname, method) in test_grid_search.__dict__.items()
if mname.startswith("test_") and mname not in _blacklist]
for name, method in all_methods:
method_for_test = _create_method(method)
method_for_test.__name__ = name
setattr (AllTests, method.__name__, method_for_test)
_add_to_module()
|
apache-2.0
|
Lawrence-Liu/scikit-learn
|
sklearn/utils/tests/test_class_weight.py
|
140
|
11909
|
import numpy as np
from sklearn.linear_model import LogisticRegression
from sklearn.datasets import make_blobs
from sklearn.utils.class_weight import compute_class_weight
from sklearn.utils.class_weight import compute_sample_weight
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_warns
def test_compute_class_weight():
# Test (and demo) compute_class_weight.
y = np.asarray([2, 2, 2, 3, 3, 4])
classes = np.unique(y)
cw = assert_warns(DeprecationWarning,
compute_class_weight, "auto", classes, y)
assert_almost_equal(cw.sum(), classes.shape)
assert_true(cw[0] < cw[1] < cw[2])
cw = compute_class_weight("balanced", classes, y)
# total effect of samples is preserved
class_counts = np.bincount(y)[2:]
assert_almost_equal(np.dot(cw, class_counts), y.shape[0])
assert_true(cw[0] < cw[1] < cw[2])
def test_compute_class_weight_not_present():
# Raise error when y does not contain all class labels
classes = np.arange(4)
y = np.asarray([0, 0, 0, 1, 1, 2])
assert_raises(ValueError, compute_class_weight, "auto", classes, y)
assert_raises(ValueError, compute_class_weight, "balanced", classes, y)
def test_compute_class_weight_invariance():
# Test that results with class_weight="balanced" is invariant wrt
# class imbalance if the number of samples is identical.
# The test uses a balanced two class dataset with 100 datapoints.
# It creates three versions, one where class 1 is duplicated
# resulting in 150 points of class 1 and 50 of class 0,
# one where there are 50 points in class 1 and 150 in class 0,
# and one where there are 100 points of each class (this one is balanced
# again).
# With balancing class weights, all three should give the same model.
X, y = make_blobs(centers=2, random_state=0)
# create dataset where class 1 is duplicated twice
X_1 = np.vstack([X] + [X[y == 1]] * 2)
y_1 = np.hstack([y] + [y[y == 1]] * 2)
# create dataset where class 0 is duplicated twice
X_0 = np.vstack([X] + [X[y == 0]] * 2)
y_0 = np.hstack([y] + [y[y == 0]] * 2)
# cuplicate everything
X_ = np.vstack([X] * 2)
y_ = np.hstack([y] * 2)
# results should be identical
logreg1 = LogisticRegression(class_weight="balanced").fit(X_1, y_1)
logreg0 = LogisticRegression(class_weight="balanced").fit(X_0, y_0)
logreg = LogisticRegression(class_weight="balanced").fit(X_, y_)
assert_array_almost_equal(logreg1.coef_, logreg0.coef_)
assert_array_almost_equal(logreg.coef_, logreg0.coef_)
def test_compute_class_weight_auto_negative():
# Test compute_class_weight when labels are negative
# Test with balanced class labels.
classes = np.array([-2, -1, 0])
y = np.asarray([-1, -1, 0, 0, -2, -2])
cw = assert_warns(DeprecationWarning, compute_class_weight, "auto",
classes, y)
assert_almost_equal(cw.sum(), classes.shape)
assert_equal(len(cw), len(classes))
assert_array_almost_equal(cw, np.array([1., 1., 1.]))
cw = compute_class_weight("balanced", classes, y)
assert_equal(len(cw), len(classes))
assert_array_almost_equal(cw, np.array([1., 1., 1.]))
# Test with unbalanced class labels.
y = np.asarray([-1, 0, 0, -2, -2, -2])
cw = assert_warns(DeprecationWarning, compute_class_weight, "auto",
classes, y)
assert_almost_equal(cw.sum(), classes.shape)
assert_equal(len(cw), len(classes))
assert_array_almost_equal(cw, np.array([0.545, 1.636, 0.818]), decimal=3)
cw = compute_class_weight("balanced", classes, y)
assert_equal(len(cw), len(classes))
class_counts = np.bincount(y + 2)
assert_almost_equal(np.dot(cw, class_counts), y.shape[0])
assert_array_almost_equal(cw, [2. / 3, 2., 1.])
def test_compute_class_weight_auto_unordered():
# Test compute_class_weight when classes are unordered
classes = np.array([1, 0, 3])
y = np.asarray([1, 0, 0, 3, 3, 3])
cw = assert_warns(DeprecationWarning, compute_class_weight, "auto",
classes, y)
assert_almost_equal(cw.sum(), classes.shape)
assert_equal(len(cw), len(classes))
assert_array_almost_equal(cw, np.array([1.636, 0.818, 0.545]), decimal=3)
cw = compute_class_weight("balanced", classes, y)
class_counts = np.bincount(y)[classes]
assert_almost_equal(np.dot(cw, class_counts), y.shape[0])
assert_array_almost_equal(cw, [2., 1., 2. / 3])
def test_compute_sample_weight():
# Test (and demo) compute_sample_weight.
# Test with balanced classes
y = np.asarray([1, 1, 1, 2, 2, 2])
sample_weight = assert_warns(DeprecationWarning,
compute_sample_weight, "auto", y)
assert_array_almost_equal(sample_weight, [1., 1., 1., 1., 1., 1.])
sample_weight = compute_sample_weight("balanced", y)
assert_array_almost_equal(sample_weight, [1., 1., 1., 1., 1., 1.])
# Test with user-defined weights
sample_weight = compute_sample_weight({1: 2, 2: 1}, y)
assert_array_almost_equal(sample_weight, [2., 2., 2., 1., 1., 1.])
# Test with column vector of balanced classes
y = np.asarray([[1], [1], [1], [2], [2], [2]])
sample_weight = assert_warns(DeprecationWarning,
compute_sample_weight, "auto", y)
assert_array_almost_equal(sample_weight, [1., 1., 1., 1., 1., 1.])
sample_weight = compute_sample_weight("balanced", y)
assert_array_almost_equal(sample_weight, [1., 1., 1., 1., 1., 1.])
# Test with unbalanced classes
y = np.asarray([1, 1, 1, 2, 2, 2, 3])
sample_weight = assert_warns(DeprecationWarning,
compute_sample_weight, "auto", y)
expected_auto = np.asarray([.6, .6, .6, .6, .6, .6, 1.8])
assert_array_almost_equal(sample_weight, expected_auto)
sample_weight = compute_sample_weight("balanced", y)
expected_balanced = np.array([0.7777, 0.7777, 0.7777, 0.7777, 0.7777, 0.7777, 2.3333])
assert_array_almost_equal(sample_weight, expected_balanced, decimal=4)
# Test with `None` weights
sample_weight = compute_sample_weight(None, y)
assert_array_almost_equal(sample_weight, [1., 1., 1., 1., 1., 1., 1.])
# Test with multi-output of balanced classes
y = np.asarray([[1, 0], [1, 0], [1, 0], [2, 1], [2, 1], [2, 1]])
sample_weight = assert_warns(DeprecationWarning,
compute_sample_weight, "auto", y)
assert_array_almost_equal(sample_weight, [1., 1., 1., 1., 1., 1.])
sample_weight = compute_sample_weight("balanced", y)
assert_array_almost_equal(sample_weight, [1., 1., 1., 1., 1., 1.])
# Test with multi-output with user-defined weights
y = np.asarray([[1, 0], [1, 0], [1, 0], [2, 1], [2, 1], [2, 1]])
sample_weight = compute_sample_weight([{1: 2, 2: 1}, {0: 1, 1: 2}], y)
assert_array_almost_equal(sample_weight, [2., 2., 2., 2., 2., 2.])
# Test with multi-output of unbalanced classes
y = np.asarray([[1, 0], [1, 0], [1, 0], [2, 1], [2, 1], [2, 1], [3, -1]])
sample_weight = assert_warns(DeprecationWarning,
compute_sample_weight, "auto", y)
assert_array_almost_equal(sample_weight, expected_auto ** 2)
sample_weight = compute_sample_weight("balanced", y)
assert_array_almost_equal(sample_weight, expected_balanced ** 2, decimal=3)
def test_compute_sample_weight_with_subsample():
# Test compute_sample_weight with subsamples specified.
# Test with balanced classes and all samples present
y = np.asarray([1, 1, 1, 2, 2, 2])
sample_weight = assert_warns(DeprecationWarning,
compute_sample_weight, "auto", y)
assert_array_almost_equal(sample_weight, [1., 1., 1., 1., 1., 1.])
sample_weight = compute_sample_weight("balanced", y, range(6))
assert_array_almost_equal(sample_weight, [1., 1., 1., 1., 1., 1.])
# Test with column vector of balanced classes and all samples present
y = np.asarray([[1], [1], [1], [2], [2], [2]])
sample_weight = assert_warns(DeprecationWarning,
compute_sample_weight, "auto", y)
assert_array_almost_equal(sample_weight, [1., 1., 1., 1., 1., 1.])
sample_weight = compute_sample_weight("balanced", y, range(6))
assert_array_almost_equal(sample_weight, [1., 1., 1., 1., 1., 1.])
# Test with a subsample
y = np.asarray([1, 1, 1, 2, 2, 2])
sample_weight = assert_warns(DeprecationWarning,
compute_sample_weight, "auto", y, range(4))
assert_array_almost_equal(sample_weight, [.5, .5, .5, 1.5, 1.5, 1.5])
sample_weight = compute_sample_weight("balanced", y, range(4))
assert_array_almost_equal(sample_weight, [2. / 3, 2. / 3,
2. / 3, 2., 2., 2.])
# Test with a bootstrap subsample
y = np.asarray([1, 1, 1, 2, 2, 2])
sample_weight = assert_warns(DeprecationWarning, compute_sample_weight,
"auto", y, [0, 1, 1, 2, 2, 3])
expected_auto = np.asarray([1 / 3., 1 / 3., 1 / 3., 5 / 3., 5 / 3., 5 / 3.])
assert_array_almost_equal(sample_weight, expected_auto)
sample_weight = compute_sample_weight("balanced", y, [0, 1, 1, 2, 2, 3])
expected_balanced = np.asarray([0.6, 0.6, 0.6, 3., 3., 3.])
assert_array_almost_equal(sample_weight, expected_balanced)
# Test with a bootstrap subsample for multi-output
y = np.asarray([[1, 0], [1, 0], [1, 0], [2, 1], [2, 1], [2, 1]])
sample_weight = assert_warns(DeprecationWarning, compute_sample_weight,
"auto", y, [0, 1, 1, 2, 2, 3])
assert_array_almost_equal(sample_weight, expected_auto ** 2)
sample_weight = compute_sample_weight("balanced", y, [0, 1, 1, 2, 2, 3])
assert_array_almost_equal(sample_weight, expected_balanced ** 2)
# Test with a missing class
y = np.asarray([1, 1, 1, 2, 2, 2, 3])
sample_weight = assert_warns(DeprecationWarning, compute_sample_weight,
"auto", y, range(6))
assert_array_almost_equal(sample_weight, [1., 1., 1., 1., 1., 1., 0.])
sample_weight = compute_sample_weight("balanced", y, range(6))
assert_array_almost_equal(sample_weight, [1., 1., 1., 1., 1., 1., 0.])
# Test with a missing class for multi-output
y = np.asarray([[1, 0], [1, 0], [1, 0], [2, 1], [2, 1], [2, 1], [2, 2]])
sample_weight = assert_warns(DeprecationWarning, compute_sample_weight,
"auto", y, range(6))
assert_array_almost_equal(sample_weight, [1., 1., 1., 1., 1., 1., 0.])
sample_weight = compute_sample_weight("balanced", y, range(6))
assert_array_almost_equal(sample_weight, [1., 1., 1., 1., 1., 1., 0.])
def test_compute_sample_weight_errors():
# Test compute_sample_weight raises errors expected.
# Invalid preset string
y = np.asarray([1, 1, 1, 2, 2, 2])
y_ = np.asarray([[1, 0], [1, 0], [1, 0], [2, 1], [2, 1], [2, 1]])
assert_raises(ValueError, compute_sample_weight, "ni", y)
assert_raises(ValueError, compute_sample_weight, "ni", y, range(4))
assert_raises(ValueError, compute_sample_weight, "ni", y_)
assert_raises(ValueError, compute_sample_weight, "ni", y_, range(4))
# Not "auto" for subsample
assert_raises(ValueError,
compute_sample_weight, {1: 2, 2: 1}, y, range(4))
# Not a list or preset for multi-output
assert_raises(ValueError, compute_sample_weight, {1: 2, 2: 1}, y_)
# Incorrect length list for multi-output
assert_raises(ValueError, compute_sample_weight, [{1: 2, 2: 1}], y_)
|
bsd-3-clause
|
wavelets/zipline
|
zipline/data/benchmarks.py
|
33
|
4096
|
#
# Copyright 2013 Quantopian, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import collections
from datetime import datetime
import csv
from functools import partial
import requests
import pandas as pd
from six import iteritems
from . loader_utils import (
date_conversion,
source_to_records,
Mapping
)
DailyReturn = collections.namedtuple('DailyReturn', ['date', 'returns'])
class BenchmarkDataNotFoundError(Exception):
pass
_BENCHMARK_MAPPING = {
# Need to add 'symbol'
'volume': (int, 'Volume'),
'open': (float, 'Open'),
'close': (float, 'Close'),
'high': (float, 'High'),
'low': (float, 'Low'),
'adj_close': (float, 'Adj Close'),
'date': (partial(date_conversion, date_pattern='%Y-%m-%d'), 'Date')
}
def benchmark_mappings():
return {key: Mapping(*value)
for key, value
in iteritems(_BENCHMARK_MAPPING)}
def get_raw_benchmark_data(start_date, end_date, symbol):
# create benchmark files
# ^GSPC 19500103
params = collections.OrderedDict((
('s', symbol),
# start_date month, zero indexed
('a', start_date.month - 1),
# start_date day
('b', start_date.day),
# start_date year
('c', start_date.year),
# end_date month, zero indexed
('d', end_date.month - 1),
# end_date day str(int(todate[6:8])) #day
('e', end_date.day),
# end_date year str(int(todate[0:4]))
('f', end_date.year),
# daily frequency
('g', 'd'),
))
res = requests.get('http://ichart.finance.yahoo.com/table.csv',
params=params, stream=True)
if not res.ok:
raise BenchmarkDataNotFoundError("""
No benchmark data found for date range.
start_date={start_date}, end_date={end_date}, url={url}""".strip().
format(start_date=start_date,
end_date=end_date,
url=res.url))
return csv.DictReader(res.text.splitlines())
def get_benchmark_data(symbol, start_date=None, end_date=None):
"""
Benchmarks from Yahoo.
"""
if start_date is None:
start_date = datetime(year=1950, month=1, day=3)
if end_date is None:
end_date = datetime.utcnow()
raw_benchmark_data = get_raw_benchmark_data(start_date, end_date, symbol)
mappings = benchmark_mappings()
return source_to_records(mappings, raw_benchmark_data)
def get_benchmark_returns(symbol, start_date=None, end_date=None):
"""
Returns a list of return percentages in chronological order.
"""
if start_date is None:
start_date = datetime(year=1950, month=1, day=3)
if end_date is None:
end_date = datetime.utcnow()
# Get the benchmark data and convert it to a list in chronological order.
data_points = list(get_benchmark_data(symbol, start_date, end_date))
data_points.reverse()
# Calculate the return percentages.
benchmark_returns = []
for i, data_point in enumerate(data_points):
if i == 0:
curr_open = data_points[i]['open']
returns = (data_points[i]['close'] - curr_open) / curr_open
else:
prev_close = data_points[i - 1]['close']
returns = (data_point['close'] - prev_close) / prev_close
date = pd.tseries.tools.normalize_date(data_point['date'])
daily_return = DailyReturn(date=date, returns=returns)
benchmark_returns.append(daily_return)
return benchmark_returns
|
apache-2.0
|
mxjl620/scikit-learn
|
sklearn/metrics/tests/test_regression.py
|
272
|
6066
|
from __future__ import division, print_function
import numpy as np
from itertools import product
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.metrics import explained_variance_score
from sklearn.metrics import mean_absolute_error
from sklearn.metrics import mean_squared_error
from sklearn.metrics import median_absolute_error
from sklearn.metrics import r2_score
from sklearn.metrics.regression import _check_reg_targets
def test_regression_metrics(n_samples=50):
y_true = np.arange(n_samples)
y_pred = y_true + 1
assert_almost_equal(mean_squared_error(y_true, y_pred), 1.)
assert_almost_equal(mean_absolute_error(y_true, y_pred), 1.)
assert_almost_equal(median_absolute_error(y_true, y_pred), 1.)
assert_almost_equal(r2_score(y_true, y_pred), 0.995, 2)
assert_almost_equal(explained_variance_score(y_true, y_pred), 1.)
def test_multioutput_regression():
y_true = np.array([[1, 0, 0, 1], [0, 1, 1, 1], [1, 1, 0, 1]])
y_pred = np.array([[0, 0, 0, 1], [1, 0, 1, 1], [0, 0, 0, 1]])
error = mean_squared_error(y_true, y_pred)
assert_almost_equal(error, (1. / 3 + 2. / 3 + 2. / 3) / 4.)
# mean_absolute_error and mean_squared_error are equal because
# it is a binary problem.
error = mean_absolute_error(y_true, y_pred)
assert_almost_equal(error, (1. / 3 + 2. / 3 + 2. / 3) / 4.)
error = r2_score(y_true, y_pred, multioutput='variance_weighted')
assert_almost_equal(error, 1. - 5. / 2)
error = r2_score(y_true, y_pred, multioutput='uniform_average')
assert_almost_equal(error, -.875)
def test_regression_metrics_at_limits():
assert_almost_equal(mean_squared_error([0.], [0.]), 0.00, 2)
assert_almost_equal(mean_absolute_error([0.], [0.]), 0.00, 2)
assert_almost_equal(median_absolute_error([0.], [0.]), 0.00, 2)
assert_almost_equal(explained_variance_score([0.], [0.]), 1.00, 2)
assert_almost_equal(r2_score([0., 1], [0., 1]), 1.00, 2)
def test__check_reg_targets():
# All of length 3
EXAMPLES = [
("continuous", [1, 2, 3], 1),
("continuous", [[1], [2], [3]], 1),
("continuous-multioutput", [[1, 1], [2, 2], [3, 1]], 2),
("continuous-multioutput", [[5, 1], [4, 2], [3, 1]], 2),
("continuous-multioutput", [[1, 3, 4], [2, 2, 2], [3, 1, 1]], 3),
]
for (type1, y1, n_out1), (type2, y2, n_out2) in product(EXAMPLES,
repeat=2):
if type1 == type2 and n_out1 == n_out2:
y_type, y_check1, y_check2, multioutput = _check_reg_targets(
y1, y2, None)
assert_equal(type1, y_type)
if type1 == 'continuous':
assert_array_equal(y_check1, np.reshape(y1, (-1, 1)))
assert_array_equal(y_check2, np.reshape(y2, (-1, 1)))
else:
assert_array_equal(y_check1, y1)
assert_array_equal(y_check2, y2)
else:
assert_raises(ValueError, _check_reg_targets, y1, y2, None)
def test_regression_multioutput_array():
y_true = [[1, 2], [2.5, -1], [4.5, 3], [5, 7]]
y_pred = [[1, 1], [2, -1], [5, 4], [5, 6.5]]
mse = mean_squared_error(y_true, y_pred, multioutput='raw_values')
mae = mean_absolute_error(y_true, y_pred, multioutput='raw_values')
r = r2_score(y_true, y_pred, multioutput='raw_values')
evs = explained_variance_score(y_true, y_pred, multioutput='raw_values')
assert_array_almost_equal(mse, [0.125, 0.5625], decimal=2)
assert_array_almost_equal(mae, [0.25, 0.625], decimal=2)
assert_array_almost_equal(r, [0.95, 0.93], decimal=2)
assert_array_almost_equal(evs, [0.95, 0.93], decimal=2)
# mean_absolute_error and mean_squared_error are equal because
# it is a binary problem.
y_true = [[0, 0]]*4
y_pred = [[1, 1]]*4
mse = mean_squared_error(y_true, y_pred, multioutput='raw_values')
mae = mean_absolute_error(y_true, y_pred, multioutput='raw_values')
r = r2_score(y_true, y_pred, multioutput='raw_values')
assert_array_almost_equal(mse, [1., 1.], decimal=2)
assert_array_almost_equal(mae, [1., 1.], decimal=2)
assert_array_almost_equal(r, [0., 0.], decimal=2)
r = r2_score([[0, -1], [0, 1]], [[2, 2], [1, 1]], multioutput='raw_values')
assert_array_almost_equal(r, [0, -3.5], decimal=2)
assert_equal(np.mean(r), r2_score([[0, -1], [0, 1]], [[2, 2], [1, 1]],
multioutput='uniform_average'))
evs = explained_variance_score([[0, -1], [0, 1]], [[2, 2], [1, 1]],
multioutput='raw_values')
assert_array_almost_equal(evs, [0, -1.25], decimal=2)
# Checking for the condition in which both numerator and denominator is
# zero.
y_true = [[1, 3], [-1, 2]]
y_pred = [[1, 4], [-1, 1]]
r2 = r2_score(y_true, y_pred, multioutput='raw_values')
assert_array_almost_equal(r2, [1., -3.], decimal=2)
assert_equal(np.mean(r2), r2_score(y_true, y_pred,
multioutput='uniform_average'))
evs = explained_variance_score(y_true, y_pred, multioutput='raw_values')
assert_array_almost_equal(evs, [1., -3.], decimal=2)
assert_equal(np.mean(evs), explained_variance_score(y_true, y_pred))
def test_regression_custom_weights():
y_true = [[1, 2], [2.5, -1], [4.5, 3], [5, 7]]
y_pred = [[1, 1], [2, -1], [5, 4], [5, 6.5]]
msew = mean_squared_error(y_true, y_pred, multioutput=[0.4, 0.6])
maew = mean_absolute_error(y_true, y_pred, multioutput=[0.4, 0.6])
rw = r2_score(y_true, y_pred, multioutput=[0.4, 0.6])
evsw = explained_variance_score(y_true, y_pred, multioutput=[0.4, 0.6])
assert_almost_equal(msew, 0.39, decimal=2)
assert_almost_equal(maew, 0.475, decimal=3)
assert_almost_equal(rw, 0.94, decimal=2)
assert_almost_equal(evsw, 0.94, decimal=2)
|
bsd-3-clause
|
cl4rke/scikit-learn
|
sklearn/feature_selection/tests/test_chi2.py
|
221
|
2398
|
"""
Tests for chi2, currently the only feature selection function designed
specifically to work with sparse matrices.
"""
import numpy as np
from scipy.sparse import coo_matrix, csr_matrix
import scipy.stats
from sklearn.feature_selection import SelectKBest, chi2
from sklearn.feature_selection.univariate_selection import _chisquare
from nose.tools import assert_raises
from numpy.testing import assert_equal, assert_array_almost_equal
# Feature 0 is highly informative for class 1;
# feature 1 is the same everywhere;
# feature 2 is a bit informative for class 2.
X = [[2, 1, 2],
[9, 1, 1],
[6, 1, 2],
[0, 1, 2]]
y = [0, 1, 2, 2]
def mkchi2(k):
"""Make k-best chi2 selector"""
return SelectKBest(chi2, k=k)
def test_chi2():
# Test Chi2 feature extraction
chi2 = mkchi2(k=1).fit(X, y)
chi2 = mkchi2(k=1).fit(X, y)
assert_equal(chi2.get_support(indices=True), [0])
assert_equal(chi2.transform(X), np.array(X)[:, [0]])
chi2 = mkchi2(k=2).fit(X, y)
assert_equal(sorted(chi2.get_support(indices=True)), [0, 2])
Xsp = csr_matrix(X, dtype=np.float)
chi2 = mkchi2(k=2).fit(Xsp, y)
assert_equal(sorted(chi2.get_support(indices=True)), [0, 2])
Xtrans = chi2.transform(Xsp)
assert_equal(Xtrans.shape, [Xsp.shape[0], 2])
# == doesn't work on scipy.sparse matrices
Xtrans = Xtrans.toarray()
Xtrans2 = mkchi2(k=2).fit_transform(Xsp, y).toarray()
assert_equal(Xtrans, Xtrans2)
def test_chi2_coo():
# Check that chi2 works with a COO matrix
# (as returned by CountVectorizer, DictVectorizer)
Xcoo = coo_matrix(X)
mkchi2(k=2).fit_transform(Xcoo, y)
# if we got here without an exception, we're safe
def test_chi2_negative():
# Check for proper error on negative numbers in the input X.
X, y = [[0, 1], [-1e-20, 1]], [0, 1]
for X in (X, np.array(X), csr_matrix(X)):
assert_raises(ValueError, chi2, X, y)
def test_chisquare():
# Test replacement for scipy.stats.chisquare against the original.
obs = np.array([[2., 2.],
[1., 1.]])
exp = np.array([[1.5, 1.5],
[1.5, 1.5]])
# call SciPy first because our version overwrites obs
chi_scp, p_scp = scipy.stats.chisquare(obs, exp)
chi_our, p_our = _chisquare(obs, exp)
assert_array_almost_equal(chi_scp, chi_our)
assert_array_almost_equal(p_scp, p_our)
|
bsd-3-clause
|
YinongLong/scikit-learn
|
sklearn/mixture/tests/test_gmm.py
|
5
|
20902
|
# Important note for the deprecation cleaning of 0.20 :
# All the functions and classes of this file have been deprecated in 0.18.
# When you remove this file please remove the related files
# - 'sklearn/mixture/dpgmm.py'
# - 'sklearn/mixture/gmm.py'
# - 'sklearn/mixture/test_dpgmm.py'
import unittest
import copy
import sys
from nose.tools import assert_true
import numpy as np
from numpy.testing import (assert_array_equal, assert_array_almost_equal,
assert_raises)
from scipy import stats
from sklearn import mixture
from sklearn.datasets.samples_generator import make_spd_matrix
from sklearn.utils.testing import (assert_greater, assert_raise_message,
assert_warns_message, ignore_warnings)
from sklearn.metrics.cluster import adjusted_rand_score
from sklearn.externals.six.moves import cStringIO as StringIO
rng = np.random.RandomState(0)
def test_sample_gaussian():
# Test sample generation from mixture.sample_gaussian where covariance
# is diagonal, spherical and full
n_features, n_samples = 2, 300
axis = 1
mu = rng.randint(10) * rng.rand(n_features)
cv = (rng.rand(n_features) + 1.0) ** 2
samples = mixture.sample_gaussian(
mu, cv, covariance_type='diag', n_samples=n_samples)
assert_true(np.allclose(samples.mean(axis), mu, atol=1.3))
assert_true(np.allclose(samples.var(axis), cv, atol=1.5))
# the same for spherical covariances
cv = (rng.rand() + 1.0) ** 2
samples = mixture.sample_gaussian(
mu, cv, covariance_type='spherical', n_samples=n_samples)
assert_true(np.allclose(samples.mean(axis), mu, atol=1.5))
assert_true(np.allclose(
samples.var(axis), np.repeat(cv, n_features), atol=1.5))
# and for full covariances
A = rng.randn(n_features, n_features)
cv = np.dot(A.T, A) + np.eye(n_features)
samples = mixture.sample_gaussian(
mu, cv, covariance_type='full', n_samples=n_samples)
assert_true(np.allclose(samples.mean(axis), mu, atol=1.3))
assert_true(np.allclose(np.cov(samples), cv, atol=2.5))
# Numerical stability check: in SciPy 0.12.0 at least, eigh may return
# tiny negative values in its second return value.
from sklearn.mixture import sample_gaussian
x = sample_gaussian([0, 0], [[4, 3], [1, .1]],
covariance_type='full', random_state=42)
assert_true(np.isfinite(x).all())
def _naive_lmvnpdf_diag(X, mu, cv):
# slow and naive implementation of lmvnpdf
ref = np.empty((len(X), len(mu)))
stds = np.sqrt(cv)
for i, (m, std) in enumerate(zip(mu, stds)):
ref[:, i] = np.log(stats.norm.pdf(X, m, std)).sum(axis=1)
return ref
def test_lmvnpdf_diag():
# test a slow and naive implementation of lmvnpdf and
# compare it to the vectorized version (mixture.lmvnpdf) to test
# for correctness
n_features, n_components, n_samples = 2, 3, 10
mu = rng.randint(10) * rng.rand(n_components, n_features)
cv = (rng.rand(n_components, n_features) + 1.0) ** 2
X = rng.randint(10) * rng.rand(n_samples, n_features)
ref = _naive_lmvnpdf_diag(X, mu, cv)
lpr = assert_warns_message(DeprecationWarning, "The function"
" log_multivariate_normal_density is "
"deprecated in 0.18 and will be removed in 0.20.",
mixture.log_multivariate_normal_density,
X, mu, cv, 'diag')
assert_array_almost_equal(lpr, ref)
def test_lmvnpdf_spherical():
n_features, n_components, n_samples = 2, 3, 10
mu = rng.randint(10) * rng.rand(n_components, n_features)
spherecv = rng.rand(n_components, 1) ** 2 + 1
X = rng.randint(10) * rng.rand(n_samples, n_features)
cv = np.tile(spherecv, (n_features, 1))
reference = _naive_lmvnpdf_diag(X, mu, cv)
lpr = assert_warns_message(DeprecationWarning, "The function"
" log_multivariate_normal_density is "
"deprecated in 0.18 and will be removed in 0.20.",
mixture.log_multivariate_normal_density,
X, mu, spherecv, 'spherical')
assert_array_almost_equal(lpr, reference)
def test_lmvnpdf_full():
n_features, n_components, n_samples = 2, 3, 10
mu = rng.randint(10) * rng.rand(n_components, n_features)
cv = (rng.rand(n_components, n_features) + 1.0) ** 2
X = rng.randint(10) * rng.rand(n_samples, n_features)
fullcv = np.array([np.diag(x) for x in cv])
reference = _naive_lmvnpdf_diag(X, mu, cv)
lpr = assert_warns_message(DeprecationWarning, "The function"
" log_multivariate_normal_density is "
"deprecated in 0.18 and will be removed in 0.20.",
mixture.log_multivariate_normal_density,
X, mu, fullcv, 'full')
assert_array_almost_equal(lpr, reference)
def test_lvmpdf_full_cv_non_positive_definite():
n_features, n_samples = 2, 10
rng = np.random.RandomState(0)
X = rng.randint(10) * rng.rand(n_samples, n_features)
mu = np.mean(X, 0)
cv = np.array([[[-1, 0], [0, 1]]])
expected_message = "'covars' must be symmetric, positive-definite"
assert_raise_message(ValueError, expected_message,
mixture.log_multivariate_normal_density,
X, mu, cv, 'full')
# This function tests the deprecated old GMM class
@ignore_warnings(category=DeprecationWarning)
def test_GMM_attributes():
n_components, n_features = 10, 4
covariance_type = 'diag'
g = mixture.GMM(n_components, covariance_type, random_state=rng)
weights = rng.rand(n_components)
weights = weights / weights.sum()
means = rng.randint(-20, 20, (n_components, n_features))
assert_true(g.n_components == n_components)
assert_true(g.covariance_type == covariance_type)
g.weights_ = weights
assert_array_almost_equal(g.weights_, weights)
g.means_ = means
assert_array_almost_equal(g.means_, means)
covars = (0.1 + 2 * rng.rand(n_components, n_features)) ** 2
g.covars_ = covars
assert_array_almost_equal(g.covars_, covars)
assert_raises(ValueError, g._set_covars, [])
assert_raises(ValueError, g._set_covars,
np.zeros((n_components - 2, n_features)))
assert_raises(ValueError, mixture.GMM, n_components=20,
covariance_type='badcovariance_type')
class GMMTester():
do_test_eval = True
def _setUp(self):
self.n_components = 10
self.n_features = 4
self.weights = rng.rand(self.n_components)
self.weights = self.weights / self.weights.sum()
self.means = rng.randint(-20, 20, (self.n_components, self.n_features))
self.threshold = -0.5
self.I = np.eye(self.n_features)
self.covars = {
'spherical': (0.1 + 2 * rng.rand(self.n_components,
self.n_features)) ** 2,
'tied': (make_spd_matrix(self.n_features, random_state=0)
+ 5 * self.I),
'diag': (0.1 + 2 * rng.rand(self.n_components,
self.n_features)) ** 2,
'full': np.array([make_spd_matrix(self.n_features, random_state=0)
+ 5 * self.I for x in range(self.n_components)])}
# This function tests the deprecated old GMM class
@ignore_warnings(category=DeprecationWarning)
def test_eval(self):
if not self.do_test_eval:
return # DPGMM does not support setting the means and
# covariances before fitting There is no way of fixing this
# due to the variational parameters being more expressive than
# covariance matrices
g = self.model(n_components=self.n_components,
covariance_type=self.covariance_type, random_state=rng)
# Make sure the means are far apart so responsibilities.argmax()
# picks the actual component used to generate the observations.
g.means_ = 20 * self.means
g.covars_ = self.covars[self.covariance_type]
g.weights_ = self.weights
gaussidx = np.repeat(np.arange(self.n_components), 5)
n_samples = len(gaussidx)
X = rng.randn(n_samples, self.n_features) + g.means_[gaussidx]
with ignore_warnings(category=DeprecationWarning):
ll, responsibilities = g.score_samples(X)
self.assertEqual(len(ll), n_samples)
self.assertEqual(responsibilities.shape,
(n_samples, self.n_components))
assert_array_almost_equal(responsibilities.sum(axis=1),
np.ones(n_samples))
assert_array_equal(responsibilities.argmax(axis=1), gaussidx)
# This function tests the deprecated old GMM class
@ignore_warnings(category=DeprecationWarning)
def test_sample(self, n=100):
g = self.model(n_components=self.n_components,
covariance_type=self.covariance_type,
random_state=rng)
# Make sure the means are far apart so responsibilities.argmax()
# picks the actual component used to generate the observations.
g.means_ = 20 * self.means
g.covars_ = np.maximum(self.covars[self.covariance_type], 0.1)
g.weights_ = self.weights
with ignore_warnings(category=DeprecationWarning):
samples = g.sample(n)
self.assertEqual(samples.shape, (n, self.n_features))
# This function tests the deprecated old GMM class
@ignore_warnings(category=DeprecationWarning)
def test_train(self, params='wmc'):
g = mixture.GMM(n_components=self.n_components,
covariance_type=self.covariance_type)
with ignore_warnings(category=DeprecationWarning):
g.weights_ = self.weights
g.means_ = self.means
g.covars_ = 20 * self.covars[self.covariance_type]
# Create a training set by sampling from the predefined distribution.
with ignore_warnings(category=DeprecationWarning):
X = g.sample(n_samples=100)
g = self.model(n_components=self.n_components,
covariance_type=self.covariance_type,
random_state=rng, min_covar=1e-1,
n_iter=1, init_params=params)
g.fit(X)
# Do one training iteration at a time so we can keep track of
# the log likelihood to make sure that it increases after each
# iteration.
trainll = []
with ignore_warnings(category=DeprecationWarning):
for _ in range(5):
g.params = params
g.init_params = ''
g.fit(X)
trainll.append(self.score(g, X))
g.n_iter = 10
g.init_params = ''
g.params = params
g.fit(X) # finish fitting
# Note that the log likelihood will sometimes decrease by a
# very small amount after it has more or less converged due to
# the addition of min_covar to the covariance (to prevent
# underflow). This is why the threshold is set to -0.5
# instead of 0.
with ignore_warnings(category=DeprecationWarning):
delta_min = np.diff(trainll).min()
self.assertTrue(
delta_min > self.threshold,
"The min nll increase is %f which is lower than the admissible"
" threshold of %f, for model %s. The likelihoods are %s."
% (delta_min, self.threshold, self.covariance_type, trainll))
# This function tests the deprecated old GMM class
@ignore_warnings(category=DeprecationWarning)
def test_train_degenerate(self, params='wmc'):
# Train on degenerate data with 0 in some dimensions
# Create a training set by sampling from the predefined
# distribution.
X = rng.randn(100, self.n_features)
X.T[1:] = 0
g = self.model(n_components=2,
covariance_type=self.covariance_type,
random_state=rng, min_covar=1e-3, n_iter=5,
init_params=params)
with ignore_warnings(category=DeprecationWarning):
g.fit(X)
trainll = g.score(X)
self.assertTrue(np.sum(np.abs(trainll / 100 / X.shape[1])) < 5)
# This function tests the deprecated old GMM class
@ignore_warnings(category=DeprecationWarning)
def test_train_1d(self, params='wmc'):
# Train on 1-D data
# Create a training set by sampling from the predefined
# distribution.
X = rng.randn(100, 1)
# X.T[1:] = 0
g = self.model(n_components=2,
covariance_type=self.covariance_type,
random_state=rng, min_covar=1e-7, n_iter=5,
init_params=params)
with ignore_warnings(category=DeprecationWarning):
g.fit(X)
trainll = g.score(X)
if isinstance(g, mixture.dpgmm._DPGMMBase):
self.assertTrue(np.sum(np.abs(trainll / 100)) < 5)
else:
self.assertTrue(np.sum(np.abs(trainll / 100)) < 2)
# This function tests the deprecated old GMM class
@ignore_warnings(category=DeprecationWarning)
def score(self, g, X):
with ignore_warnings(category=DeprecationWarning):
return g.score(X).sum()
class TestGMMWithSphericalCovars(unittest.TestCase, GMMTester):
covariance_type = 'spherical'
model = mixture.GMM
setUp = GMMTester._setUp
class TestGMMWithDiagonalCovars(unittest.TestCase, GMMTester):
covariance_type = 'diag'
model = mixture.GMM
setUp = GMMTester._setUp
class TestGMMWithTiedCovars(unittest.TestCase, GMMTester):
covariance_type = 'tied'
model = mixture.GMM
setUp = GMMTester._setUp
class TestGMMWithFullCovars(unittest.TestCase, GMMTester):
covariance_type = 'full'
model = mixture.GMM
setUp = GMMTester._setUp
# This function tests the deprecated old GMM class
@ignore_warnings(category=DeprecationWarning)
def test_multiple_init():
# Test that multiple inits does not much worse than a single one
X = rng.randn(30, 5)
X[:10] += 2
g = mixture.GMM(n_components=2, covariance_type='spherical',
random_state=rng, min_covar=1e-7, n_iter=5)
with ignore_warnings(category=DeprecationWarning):
train1 = g.fit(X).score(X).sum()
g.n_init = 5
train2 = g.fit(X).score(X).sum()
assert_true(train2 >= train1 - 1.e-2)
# This function tests the deprecated old GMM class
@ignore_warnings(category=DeprecationWarning)
def test_n_parameters():
n_samples, n_dim, n_components = 7, 5, 2
X = rng.randn(n_samples, n_dim)
n_params = {'spherical': 13, 'diag': 21, 'tied': 26, 'full': 41}
for cv_type in ['full', 'tied', 'diag', 'spherical']:
with ignore_warnings(category=DeprecationWarning):
g = mixture.GMM(n_components=n_components, covariance_type=cv_type,
random_state=rng, min_covar=1e-7, n_iter=1)
g.fit(X)
assert_true(g._n_parameters() == n_params[cv_type])
# This function tests the deprecated old GMM class
@ignore_warnings(category=DeprecationWarning)
def test_1d_1component():
# Test all of the covariance_types return the same BIC score for
# 1-dimensional, 1 component fits.
n_samples, n_dim, n_components = 100, 1, 1
X = rng.randn(n_samples, n_dim)
g_full = mixture.GMM(n_components=n_components, covariance_type='full',
random_state=rng, min_covar=1e-7, n_iter=1)
with ignore_warnings(category=DeprecationWarning):
g_full.fit(X)
g_full_bic = g_full.bic(X)
for cv_type in ['tied', 'diag', 'spherical']:
g = mixture.GMM(n_components=n_components, covariance_type=cv_type,
random_state=rng, min_covar=1e-7, n_iter=1)
g.fit(X)
assert_array_almost_equal(g.bic(X), g_full_bic)
def assert_fit_predict_correct(model, X):
model2 = copy.deepcopy(model)
predictions_1 = model.fit(X).predict(X)
predictions_2 = model2.fit_predict(X)
assert adjusted_rand_score(predictions_1, predictions_2) == 1.0
# This function tests the deprecated old GMM class
@ignore_warnings(category=DeprecationWarning)
def test_fit_predict():
"""
test that gmm.fit_predict is equivalent to gmm.fit + gmm.predict
"""
lrng = np.random.RandomState(101)
n_samples, n_dim, n_comps = 100, 2, 2
mu = np.array([[8, 8]])
component_0 = lrng.randn(n_samples, n_dim)
component_1 = lrng.randn(n_samples, n_dim) + mu
X = np.vstack((component_0, component_1))
for m_constructor in (mixture.GMM, mixture.VBGMM, mixture.DPGMM):
model = m_constructor(n_components=n_comps, covariance_type='full',
min_covar=1e-7, n_iter=5,
random_state=np.random.RandomState(0))
assert_fit_predict_correct(model, X)
model = mixture.GMM(n_components=n_comps, n_iter=0)
z = model.fit_predict(X)
assert np.all(z == 0), "Quick Initialization Failed!"
# This function tests the deprecated old GMM class
@ignore_warnings(category=DeprecationWarning)
def test_aic():
# Test the aic and bic criteria
n_samples, n_dim, n_components = 50, 3, 2
X = rng.randn(n_samples, n_dim)
SGH = 0.5 * (X.var() + np.log(2 * np.pi)) # standard gaussian entropy
for cv_type in ['full', 'tied', 'diag', 'spherical']:
g = mixture.GMM(n_components=n_components, covariance_type=cv_type,
random_state=rng, min_covar=1e-7)
g.fit(X)
aic = 2 * n_samples * SGH * n_dim + 2 * g._n_parameters()
bic = (2 * n_samples * SGH * n_dim +
np.log(n_samples) * g._n_parameters())
bound = n_dim * 3. / np.sqrt(n_samples)
assert_true(np.abs(g.aic(X) - aic) / n_samples < bound)
assert_true(np.abs(g.bic(X) - bic) / n_samples < bound)
# This function tests the deprecated old GMM class
@ignore_warnings(category=DeprecationWarning)
def check_positive_definite_covars(covariance_type):
r"""Test that covariance matrices do not become non positive definite
Due to the accumulation of round-off errors, the computation of the
covariance matrices during the learning phase could lead to non-positive
definite covariance matrices. Namely the use of the formula:
.. math:: C = (\sum_i w_i x_i x_i^T) - \mu \mu^T
instead of:
.. math:: C = \sum_i w_i (x_i - \mu)(x_i - \mu)^T
while mathematically equivalent, was observed a ``LinAlgError`` exception,
when computing a ``GMM`` with full covariance matrices and fixed mean.
This function ensures that some later optimization will not introduce the
problem again.
"""
rng = np.random.RandomState(1)
# we build a dataset with 2 2d component. The components are unbalanced
# (respective weights 0.9 and 0.1)
X = rng.randn(100, 2)
X[-10:] += (3, 3) # Shift the 10 last points
gmm = mixture.GMM(2, params="wc", covariance_type=covariance_type,
min_covar=1e-3)
# This is a non-regression test for issue #2640. The following call used
# to trigger:
# numpy.linalg.linalg.LinAlgError: 2-th leading minor not positive definite
gmm.fit(X)
if covariance_type == "diag" or covariance_type == "spherical":
assert_greater(gmm.covars_.min(), 0)
else:
if covariance_type == "tied":
covs = [gmm.covars_]
else:
covs = gmm.covars_
for c in covs:
assert_greater(np.linalg.det(c), 0)
def test_positive_definite_covars():
# Check positive definiteness for all covariance types
for covariance_type in ["full", "tied", "diag", "spherical"]:
yield check_positive_definite_covars, covariance_type
# This function tests the deprecated old GMM class
@ignore_warnings(category=DeprecationWarning)
def test_verbose_first_level():
# Create sample data
X = rng.randn(30, 5)
X[:10] += 2
g = mixture.GMM(n_components=2, n_init=2, verbose=1)
old_stdout = sys.stdout
sys.stdout = StringIO()
try:
g.fit(X)
finally:
sys.stdout = old_stdout
# This function tests the deprecated old GMM class
@ignore_warnings(category=DeprecationWarning)
def test_verbose_second_level():
# Create sample data
X = rng.randn(30, 5)
X[:10] += 2
g = mixture.GMM(n_components=2, n_init=2, verbose=2)
old_stdout = sys.stdout
sys.stdout = StringIO()
try:
g.fit(X)
finally:
sys.stdout = old_stdout
|
bsd-3-clause
|
daniorerio/trackpy
|
trackpy/framewise_data.py
|
1
|
10185
|
from __future__ import (absolute_import, division, print_function,
unicode_literals)
import six
import os
from abc import ABCMeta, abstractmethod, abstractproperty
import warnings
import pandas as pd
from .utils import print_update
class FramewiseData(object):
"Abstract base class defining a data container with framewise access."
__metaclass__ = ABCMeta
@abstractmethod
def put(self, df):
pass
@abstractmethod
def get(self, frame_no):
pass
@abstractproperty
def frames(self):
pass
@abstractmethod
def close(self):
pass
@abstractproperty
def t_column(self):
pass
def __getitem__(self, frame_no):
return self.get(frame_no)
def __len__(self):
return len(self.frames)
def dump(self, N=None):
"""Return data from all, or the first N, frames in a single DataFrame
Parameters
----------
N : integer
optional; if None, return all frames
Returns
-------
DataFrame
"""
if N is None:
return pd.concat(iter(self))
else:
i = iter(self)
return pd.concat((next(i) for _ in range(N)))
@property
def max_frame(self):
return max(self.frames)
def _validate(self, df):
if self.t_column not in df.columns:
raise ValueError("Cannot write frame without a column "
"called {0}".format(self.t_column))
if df[self.t_column].nunique() != 1:
raise ValueError("Found multiple values for 'frame'. "
"Write one frame at a time.")
def __iter__(self):
return self._build_generator()
def _build_generator(self):
for frame_no in self.frames:
yield self.get(frame_no)
def __enter__(self):
return self
def __exit__(self, type, value, traceback):
self.close()
KEY_PREFIX = 'Frame_'
len_key_prefix = len(KEY_PREFIX)
def code_key(frame_no):
"Turn the frame_no into a 'natural name' string idiomatic of HDFStore"
key = '{0}{1}'.format(KEY_PREFIX, frame_no)
return key
def decode_key(key):
frame_no = int(key[len_key_prefix:])
return frame_no
class PandasHDFStore(FramewiseData):
"""An interface to an HDF5 file with framewise access, using pandas.
Save each frame's data to a node in a pandas HDFStore.
Any additional keyword arguments to the constructor are passed to
pandas.HDFStore().
"""
def __init__(self, filename, mode='a', t_column='frame', **kwargs):
self.filename = os.path.abspath(filename)
self._t_column = t_column
self.store = pd.HDFStore(self.filename, mode, **kwargs)
@property
def t_column(self):
return self._t_column
@property
def max_frame(self):
return max(self.frames)
def put(self, df):
if len(df) == 0:
warnings.warn('An empty DataFrame was passed to put(). Continuing.')
return
frame_no = df[self.t_column].values[0] # validated to be all the same
key = code_key(frame_no)
# Store data as tabular instead of fixed-format.
# Make sure remove any prexisting data, so don't really 'append'.
try:
self.store.remove(key)
except KeyError:
pass
self.store.put(key, df, format='table')
def get(self, frame_no):
key = code_key(frame_no)
frame = self.store.get(key)
return frame
@property
def frames(self):
"""Returns sorted list of integer frame numbers in file"""
return self._get_frame_nos()
def _get_frame_nos(self):
"""Returns sorted list of integer frame numbers in file"""
# Pandas' store.keys() scans the entire file looking for stored Pandas
# structures. This is very slow for large numbers of frames.
# Instead, scan the root level of the file for nodes with names
# matching our scheme; we know they are DataFrames.
r = [decode_key(key) for key in self.store.root._v_children.keys() if
key.startswith(KEY_PREFIX)]
r.sort()
return r
def close(self):
self.store.close()
class PandasHDFStoreBig(PandasHDFStore):
"""Like PandasHDFStore, but keeps a cache of frame numbers.
This can give a large performance boost when a file contains thousands
of frames.
If a file was made in PandasHDFStore, opening it with this class
and then closing it will add a cache (if mode != 'r').
Any additional keyword arguments to the constructor are passed to
pandas.HDFStore().
"""
def __init__(self, filename, mode='a', t_column='frame', **kwargs):
self._CACHE_NAME = '_Frames_Cache'
self._frames_cache = None
self._cache_dirty = False # Whether _frames_cache needs to be written out
super(PandasHDFStoreBig, self).__init__(filename, mode, t_column,
**kwargs)
@property
def frames(self):
# Hit memory cache, then disk cache
if self._frames_cache is not None:
return self._frames_cache
else:
try:
self._frames_cache = list(self.store[self._CACHE_NAME].index.values)
self._cache_dirty = False
except KeyError:
self._frames_cache = self._get_frame_nos()
self._cache_dirty = True # In memory, but not in file
return self._frames_cache
def put(self, df):
self._invalidate_cache()
super(PandasHDFStoreBig, self).put(df)
def rebuild_cache(self):
"""Delete cache on disk and rebuild it."""
self._invalidate_cache()
_ = self.frames # Compute cache
self._flush_cache()
def _invalidate_cache(self):
self._frames_cache = None
try:
del self.store[self._CACHE_NAME]
except KeyError: pass
def _flush_cache(self):
"""Writes frame cache if dirty and file is writable."""
if (self._frames_cache is not None and self._cache_dirty
and self.store.root._v_file._iswritable()):
self.store[self._CACHE_NAME] = pd.DataFrame({'dummy': 1},
index=self._frames_cache)
self._cache_dirty = False
def close(self):
"""Updates cache, writes if necessary, then closes file."""
if self.store.root._v_file._iswritable():
_ = self.frames # Compute cache
self._flush_cache()
super(PandasHDFStoreBig, self).close()
class PandasHDFStoreSingleNode(FramewiseData):
"""An interface to an HDF5 file with framewise access,
using pandas, that is faster for cross-frame queries.
This implementation is more complex than PandasHDFStore,
but it simplifies (speeds up?) cross-frame queries,
like queries for a single probe's entire trajectory.
Any additional keyword arguments to the constructor are passed to
pandas.HDFStore().
"""
def __init__(self, filename, key='FrameData', mode='a', t_column='frame',
use_tabular_copy=False, **kwargs):
self.filename = os.path.abspath(filename)
self.key = key
self._t_column = t_column
self.store = pd.HDFStore(self.filename, mode, **kwargs)
with pd.get_store(self.filename) as store:
try:
store[self.key]
except KeyError:
pass
else:
self._validate_node(use_tabular_copy)
@property
def t_column(self):
return self._t_column
def put(self, df):
if len(df) == 0:
warnings.warn('An empty DataFrame was passed to put(). Continuing.')
return
self._validate(df)
self.store.append(self.key, df, data_columns=True)
def get(self, frame_no):
frame = self.store.select(self.key, '{0} == {1}'.format(
self._t_column, frame_no))
return frame
def dump(self, N=None):
"""Return data from all, or the first N, frames in a single DataFrame
Parameters
----------
N : integer
optional; if None, return all frames
Returns
-------
DataFrame
"""
if N is None:
return self.store.select(self.key)
else:
Nth_frame = self.frames[N - 1]
return self.store.select(self.key, '{0} <= {1}'.format(
self._t_column, Nth_frame))
def close(self):
self.store.close()
def __del__(self):
if hasattr(self, 'store'):
self.close()
@property
def frames(self):
"""Returns sorted list of integer frame numbers in file"""
# I assume one column can fit in memory, which is not ideal.
# Chunking does not seem to be implemented for select_column.
frame_nos = self.store.select_column(self.key, self.t_column).unique()
frame_nos.sort()
return frame_nos
def _validate_node(self, use_tabular_copy):
# The HDFStore might be non-tabular, which means we cannot select a
# subset, and this whole structure will not work.
# For convenience, this can rewrite the table into a tabular node.
if use_tabular_copy:
self.key = _make_tabular_copy(self.filename, self.key)
pandas_type = getattr(getattr(getattr(
self.store._handle.root, self.key, None), '_v_attrs', None),
'pandas_type', None)
if not pandas_type == 'frame_table':
raise ValueError("This node is not tabular. Call with "
"use_tabular_copy=True to proceed.")
def _make_tabular_copy(store, key):
"""Copy the contents nontabular node in a pandas HDFStore
into a tabular node"""
tabular_key = key + '/tabular'
print_update("Making a tabular copy of %s at %s" % (key, tabular_key))
store.append(tabular_key, store.get(key), data_columns=True)
return tabular_key
|
bsd-3-clause
|
CforED/Machine-Learning
|
examples/gaussian_process/plot_gpc_xor.py
|
104
|
2132
|
"""
========================================================================
Illustration of Gaussian process classification (GPC) on the XOR dataset
========================================================================
This example illustrates GPC on XOR data. Compared are a stationary, isotropic
kernel (RBF) and a non-stationary kernel (DotProduct). On this particular
dataset, the DotProduct kernel obtains considerably better results because the
class-boundaries are linear and coincide with the coordinate axes. In general,
stationary kernels often obtain better results.
"""
print(__doc__)
# Authors: Jan Hendrik Metzen <[email protected]>
#
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
from sklearn.gaussian_process import GaussianProcessClassifier
from sklearn.gaussian_process.kernels import RBF, DotProduct
xx, yy = np.meshgrid(np.linspace(-3, 3, 50),
np.linspace(-3, 3, 50))
rng = np.random.RandomState(0)
X = rng.randn(200, 2)
Y = np.logical_xor(X[:, 0] > 0, X[:, 1] > 0)
# fit the model
plt.figure(figsize=(10, 5))
kernels = [1.0 * RBF(length_scale=1.0), 1.0 * DotProduct(sigma_0=1.0)**2]
for i, kernel in enumerate(kernels):
clf = GaussianProcessClassifier(kernel=kernel, warm_start=True).fit(X, Y)
# plot the decision function for each datapoint on the grid
Z = clf.predict_proba(np.vstack((xx.ravel(), yy.ravel())).T)[:, 1]
Z = Z.reshape(xx.shape)
plt.subplot(1, 2, i + 1)
image = plt.imshow(Z, interpolation='nearest',
extent=(xx.min(), xx.max(), yy.min(), yy.max()),
aspect='auto', origin='lower', cmap=plt.cm.PuOr_r)
contours = plt.contour(xx, yy, Z, levels=[0], linewidths=2,
linetypes='--')
plt.scatter(X[:, 0], X[:, 1], s=30, c=Y, cmap=plt.cm.Paired)
plt.xticks(())
plt.yticks(())
plt.axis([-3, 3, -3, 3])
plt.colorbar(image)
plt.title("%s\n Log-Marginal-Likelihood:%.3f"
% (clf.kernel_, clf.log_marginal_likelihood(clf.kernel_.theta)),
fontsize=12)
plt.tight_layout()
plt.show()
|
bsd-3-clause
|
KarchinLab/2020plus
|
src/utils/python/util.py
|
1
|
11712
|
import numpy as np
import pandas as pd
from src.utils.python.amino_acid import AminoAcid
from src.utils.python.nucleotide import Nucleotide
import sqlite3
import pandas.io.sql as psql
import logging
import os
import sys
import datetime
from functools import wraps
import warnings
try:
import ConfigParser
except Exception as e:
import configparser as ConfigParser
logger = logging.getLogger(__name__)
proj_dir = os.path.abspath(os.path.join(os.path.dirname(__file__), '../../..'))
config_dir = os.path.join(proj_dir, 'config/')
onco_label, tsg_label, other_label = 1, 2, 0
class_to_label = {'oncogene': onco_label,
'tsg': tsg_label,
'other': other_label}
def get_input_config(section):
"""Returns the config object to input.cfg."""
cfg = ConfigParser.ConfigParser()
cfg.read(config_dir + 'input.cfg')
cfg_options = dict(cfg.items(section))
return cfg_options
# setup directory paths
_opts = get_input_config('result')
save_dir = os.path.join(proj_dir, _opts['save_dir'])
clf_plot_dir = save_dir + _opts['clf_plot_dir']
clf_result_dir = save_dir + _opts['clf_result_dir']
feature_plot_dir = save_dir + _opts['feature_plot_dir']
def make_result_dir(save_dir):
global clf_plot_dir, clf_result_dir, feature_plot_dir
if save_dir is not None:
_opts = get_input_config('result')
clf_plot_dir = os.path.join(save_dir, _opts['clf_plot_dir'])
clf_result_dir = os.path.join(save_dir, _opts['clf_result_dir'])
if not os.path.exists(clf_plot_dir): os.makedirs(clf_plot_dir)
if not os.path.exists(clf_result_dir): os.makedirs(clf_result_dir)
def start_logging(log_file='', log_level='INFO', verbose=False):
"""Start logging information into the log directory.
If os.devnull is specified as the log_file then the log file will
not actually be written to a file.
"""
if not log_file:
# create log directory if it doesn't exist
log_dir = os.path.abspath('log') + '/'
if not os.path.isdir(log_dir):
os.mkdir(log_dir)
# path to new log file
log_file = log_dir + 'log.run.' + str(datetime.datetime.now()).replace(':', '.') + '.txt'
# logger options
lvl = logging.DEBUG if log_level.upper() == 'DEBUG' else logging.INFO
# ignore warnings if not in debug
if log_level.upper() != 'DEBUG':
warnings.filterwarnings('ignore', category=DeprecationWarning)
# scikit-learn ignores warning so need to alter
# warning function to always return None
def noerror(*arg, **kwargs): return None
warnings.warn = noerror
# define logging format
if verbose:
myformat = '%(asctime)s - %(name)s - %(levelname)s \n>>> %(message)s'
else:
myformat = '%(message)s'
# create logger
if not log_file == 'stdout':
# normal logging to a regular file
logging.basicConfig(level=lvl,
format=myformat,
filename=log_file,
filemode='w')
else:
# logging to stdout
root = logging.getLogger()
root.setLevel(lvl)
stdout_stream = logging.StreamHandler(sys.stdout)
stdout_stream.setLevel(lvl)
formatter = logging.Formatter(myformat)
stdout_stream.setFormatter(formatter)
root.addHandler(stdout_stream)
root.propagate = True
def log_error_decorator(f):
"""Writes exception to log file if occured in decorated function.
This decorator wrapper is needed for multiprocess logging since otherwise
the python multiprocessing module will obscure the actual line of the error.
"""
@wraps(f)
def wrapper(*args, **kwds):
try:
result = f(*args, **kwds)
return result
except KeyboardInterrupt:
logger.info('Ctrl-C stopped a process.')
except Exception as e:
logger.exception(e)
raise
return wrapper
def keyboard_exit_wrapper(func):
def wrap(self, timeout=None):
# Note: the timeout of 1 googol seconds introduces a rather subtle
# bug for Python scripts intended to run many times the age of the universe.
return func(self, timeout=timeout if timeout is not None else 1e100)
return wrap
def read_oncogenes():
"""Reads in the oncogenes from vogelsteins' science paper.
Oncogenes from supplementary 2A:
http://www.sciencemag.org/content/339/6127/1546.full
Returns
-------
oncogenes : tuple
tuple of gene names considered oncogenes
"""
cfg_opts = get_input_config('input')
onco_path = os.path.join(proj_dir, cfg_opts['oncogene'])
with open(onco_path, 'r') as handle:
oncogenes = tuple(gene.strip() for gene in handle.readlines())
return oncogenes
def read_tsgs():
"""Reads in the tumor suppressor genes from vogelsteins' science paper.
TSGs from supplementary 2A:
http://www.sciencemag.org/content/339/6127/1546.full
Returns
-------
tsgs : tuple
tuple of gene names considered as tumor suppressors
"""
cfg_opts = get_input_config('input')
tsg_path = os.path.join(proj_dir, cfg_opts['tsg'])
with open(tsg_path, 'r') as handle:
tsgs = tuple(gene.strip() for gene in handle.readlines())
return tsgs
def classify_gene(gene):
"""Return whether the gene is an oncogene, tsg, or other.
Parameters
----------
gene : str
Official gene name
Returns
-------
Str, ['oncogene' | 'tsg' | 'other']
"""
if gene in oncogene_set:
return 'oncogene'
elif gene in tsg_set:
return 'tsg'
else:
return 'other'
def get_mutation_types(mut_iterable,
dna_series=None,
known_type=None,
kind='amino acid'):
"""Classify each protein HGVS mutation as a certain type.
Parameters
----------
mut_iterable : iterable
iterable container with HGVS mutaiton strings. If amino acids,
a secon dna_iterable is needed to identify splice mutations.
dna_series : pd.Series
optional, only required to find splice mutations when a list of
amino acids is given for mut_iterable
known_type : pd.Series
contains list of mutation types
Returns
-------
mut_type_series : pd.Series
container of protein mutation types in same order as input
"""
mut_type = []
if kind == 'amino acid':
if dna_series is None:
# dna iterable required
raise ValueError('DNA should be specified to identify splice mutations.')
for i, hgvs_aa in enumerate(mut_iterable):
aa = AminoAcid(hgvs=hgvs_aa)
nuc = Nucleotide(hgvs=dna_series.iloc[i])
if nuc.is_splicing_mutation:
# check if mutation in splice site
mut_type.append('Splice_Site')
elif known_type is not None and known_type.iloc[i]=='Splice_Site':
mut_type.append('Splice_Site')
else:
# if not in splice site, just add
mut_type.append(aa.mutation_type)
elif kind == 'nucleotide':
for hgvs_nuc in mut_iterable:
nuc = Nucleotide(hgvs=hgvs_nuc)
mut_type.append(nuc.mutation_type)
mut_type_series = pd.Series(mut_type)
return mut_type_series
def count_mutation_types(hgvs_iterable, dna_series=None, known_type=None, kind='amino acid'):
"""Count mutation types from HGVS protein strings (missense, indels, etc.)
and DNA strings (substitutions, indels).
Parameters
----------
hgvs_iterable : iterable
An iterable object containing protein HGVS
dna_iterable : iterable
contains hgvs DNA mutations to classify splice mutations
for amino acid. Only required if hgvs_iterable is AA mutations.
known_type : pd.Series
known mutation consequence type
Returns
-------
unique_cts : pd.Series
A pandas series object counting protein mutation types
"""
mut_type_series = get_mutation_types(hgvs_iterable,
dna_series=dna_series,
known_type=known_type,
kind=kind) # get mutation types
unique_cts = mut_type_series.value_counts() # count mutation types
return unique_cts
def get_output_config(section):
"""Returns the config object to output.cfg."""
cfg = ConfigParser.ConfigParser()
cfg.read(config_dir + 'output.cfg')
cfg_options = dict(cfg.items(section))
return cfg_options
def get_db_config(section):
"""Return the config object to db.cfg."""
cfg = ConfigParser.ConfigParser()
cfg.read(config_dir + 'db.cfg')
cfg_options = dict(cfg.items(section))
return cfg_options
def read_cosmic_tsv_by_gene(gene_name):
"""Reads the stored flat file corresponding to the gene_name.
NOTE: Assumes cosmic flat files are in cosmic_dir specified by input.cfg
and are sorted into alphabetical directories (eg. 'A'...'Z').
Parameters
----------
gene_name : str
gene name
Returns
-------
df : pd.DataFrame
tsv file as a pandas dataframe
"""
cfg_opt = get_input_config('input')
database_dir = cfg_opt['cosmic_dir'] # COSMIC_nuc database directory
gene_dir = gene_name[0].upper() + '/' # gene tsv in alphabetical directory listing
tsv_path = database_dir + gene_dir + gene_name + '.tsv' # path to tsv file
df = pd.read_csv(tsv_path, sep='\t')
return df
def drop_table(tbl_name,
genes_db_path='',
kind='sqlite'):
"""Drop a table from database if exists.
**Note:** This function was written because pandas has a bug.
If pandas was working then the write_frame method could just
replace existing contents with out the need for me to drop the
table. The bug is found here:
https://github.com/pydata/pandas/issues/2971
Parameters
----------
tbl_name : str
name of table to drop
kind : str, ['sqlite' | 'mysql']
type of database
"""
if not genes_db_path:
# if db not specified, use config file
genes_db_path = get_db_config('2020plus')['db']
if kind == 'sqlite':
with sqlite3.connect(genes_db_path) as cur:
sql = "DROP TABLE IF EXISTS %s" % tbl_name
cur.execute(sql)
elif kind == 'mysql':
raise NotImplementedError('MySQL is not currently supported')
#with get_cosmic_db() as cur:
#sql = "DROP TABLE IF EXISTS %s" % tbl_name
#cur.execute(sql)
def create_empty_table(tbl_name, db_path, colnames, coltypes):
# drop table if exists
drop_table(tbl_name, db_path, kind='sqlite')
# make empty maf_mutation table
conn = sqlite3.connect(db_path) # open connection
cur = conn.cursor()
col_info_list = [' '.join(x) for x in zip(colnames, coltypes)]
col_info_str = ', '.join(col_info_list)
sql = "CREATE TABLE {0}({1});".format(tbl_name, col_info_str)
cur.execute(sql)
conn.commit()
def get_gene_length():
# get db
db_path = get_db_config('2020plus')['db']
# query for gene length
conn = sqlite3.connect(db_path)
sql = "SELECT gene, gene_length FROM gene_features"
df = psql.frame_query(sql, con=conn)
df = df.set_index('gene')
conn.close()
return df
# set up vogelstein oncogenes/tsgs
oncogene_list = read_oncogenes()
tsg_list = read_tsgs()
oncogene_set = set(oncogene_list)
tsg_set = set(tsg_list)
|
apache-2.0
|
Refefer/pylearn2
|
pylearn2/train_extensions/roc_auc.py
|
15
|
4888
|
"""
TrainExtension subclass for calculating ROC AUC scores on monitoring
dataset(s), reported via monitor channels.
"""
__author__ = "Steven Kearnes"
__copyright__ = "Copyright 2014, Stanford University"
__license__ = "3-clause BSD"
__maintainer__ = "Steven Kearnes"
import numpy as np
try:
from sklearn.metrics import roc_auc_score
except ImportError:
roc_auc_score = None
import theano
from theano import gof, config
from theano import tensor as T
from pylearn2.train_extensions import TrainExtension
class RocAucScoreOp(gof.Op):
"""
Theano Op wrapping sklearn.metrics.roc_auc_score.
Parameters
----------
name : str, optional (default 'roc_auc')
Name of this Op.
use_c_code : WRITEME
"""
def __init__(self, name='roc_auc', use_c_code=theano.config.cxx):
super(RocAucScoreOp, self).__init__(use_c_code)
self.name = name
def make_node(self, y_true, y_score):
"""
Calculate ROC AUC score.
Parameters
----------
y_true : tensor_like
Target class labels.
y_score : tensor_like
Predicted class labels or probabilities for positive class.
"""
y_true = T.as_tensor_variable(y_true)
y_score = T.as_tensor_variable(y_score)
output = [T.scalar(name=self.name, dtype=config.floatX)]
return gof.Apply(self, [y_true, y_score], output)
def perform(self, node, inputs, output_storage):
"""
Calculate ROC AUC score.
Parameters
----------
node : Apply instance
Symbolic inputs and outputs.
inputs : list
Sequence of inputs.
output_storage : list
List of mutable 1-element lists.
"""
if roc_auc_score is None:
raise RuntimeError("Could not import from sklearn.")
y_true, y_score = inputs
try:
roc_auc = roc_auc_score(y_true, y_score)
except ValueError:
roc_auc = np.nan
output_storage[0][0] = theano._asarray(roc_auc, dtype=config.floatX)
class RocAucChannel(TrainExtension):
"""
Adds a ROC AUC channel to the monitor for each monitoring dataset.
This monitor will return nan unless both classes are represented in
y_true. For this reason, it is recommended to set monitoring_batches
to 1, especially when using unbalanced datasets.
Parameters
----------
channel_name_suffix : str, optional (default 'roc_auc')
Channel name suffix.
positive_class_index : int, optional (default 1)
Index of positive class in predicted values.
negative_class_index : int or None, optional (default None)
Index of negative class in predicted values for calculation of
one vs. one performance. If None, uses all examples not in the
positive class (one vs. the rest).
"""
def __init__(self, channel_name_suffix='roc_auc', positive_class_index=1,
negative_class_index=None):
self.channel_name_suffix = channel_name_suffix
self.positive_class_index = positive_class_index
self.negative_class_index = negative_class_index
def setup(self, model, dataset, algorithm):
"""
Add ROC AUC channels for monitoring dataset(s) to model.monitor.
Parameters
----------
model : object
The model being trained.
dataset : object
Training dataset.
algorithm : object
Training algorithm.
"""
m_space, m_source = model.get_monitoring_data_specs()
state, target = m_space.make_theano_batch()
y = T.argmax(target, axis=1)
y_hat = model.fprop(state)[:, self.positive_class_index]
# one vs. the rest
if self.negative_class_index is None:
y = T.eq(y, self.positive_class_index)
# one vs. one
else:
pos = T.eq(y, self.positive_class_index)
neg = T.eq(y, self.negative_class_index)
keep = T.add(pos, neg).nonzero()
y = T.eq(y[keep], self.positive_class_index)
y_hat = y_hat[keep]
roc_auc = RocAucScoreOp(self.channel_name_suffix)(y, y_hat)
roc_auc = T.cast(roc_auc, config.floatX)
for dataset_name, dataset in algorithm.monitoring_dataset.items():
if dataset_name:
channel_name = '{0}_{1}'.format(dataset_name,
self.channel_name_suffix)
else:
channel_name = self.channel_name_suffix
model.monitor.add_channel(name=channel_name,
ipt=(state, target),
val=roc_auc,
data_specs=(m_space, m_source),
dataset=dataset)
|
bsd-3-clause
|
limitlessv/osx-dev-setup
|
bootstrap/init/profile_pyspark/ipython_qtconsole_config.py
|
1
|
24677
|
# Configuration file for ipython-qtconsole.
c = get_config()
#------------------------------------------------------------------------------
# IPythonQtConsoleApp configuration
#------------------------------------------------------------------------------
# IPythonQtConsoleApp will inherit config from: BaseIPythonApplication,
# Application, IPythonConsoleApp, ConnectionFileMixin
# Set the kernel's IP address [default localhost]. If the IP address is
# something other than localhost, then Consoles on other machines will be able
# to connect to the Kernel, so be careful!
# c.IPythonQtConsoleApp.ip = u''
# Create a massive crash report when IPython encounters what may be an internal
# error. The default is to append a short message to the usual traceback
# c.IPythonQtConsoleApp.verbose_crash = False
# Start the console window maximized.
# c.IPythonQtConsoleApp.maximize = False
# The date format used by logging formatters for %(asctime)s
# c.IPythonQtConsoleApp.log_datefmt = '%Y-%m-%d %H:%M:%S'
# set the shell (ROUTER) port [default: random]
# c.IPythonQtConsoleApp.shell_port = 0
# The SSH server to use to connect to the kernel.
# c.IPythonQtConsoleApp.sshserver = ''
# set the stdin (DEALER) port [default: random]
# c.IPythonQtConsoleApp.stdin_port = 0
# Set the log level by value or name.
# c.IPythonQtConsoleApp.log_level = 30
# Path to the ssh key to use for logging in to the ssh server.
# c.IPythonQtConsoleApp.sshkey = ''
# Path to an extra config file to load.
#
# If specified, load this config file in addition to any other IPython config.
# c.IPythonQtConsoleApp.extra_config_file = u''
# Whether to create profile dir if it doesn't exist
# c.IPythonQtConsoleApp.auto_create = False
# path to a custom CSS stylesheet
# c.IPythonQtConsoleApp.stylesheet = ''
# set the heartbeat port [default: random]
# c.IPythonQtConsoleApp.hb_port = 0
# Whether to overwrite existing config files when copying
# c.IPythonQtConsoleApp.overwrite = False
# set the iopub (PUB) port [default: random]
# c.IPythonQtConsoleApp.iopub_port = 0
# The IPython profile to use.
# c.IPythonQtConsoleApp.profile = u'default'
# JSON file in which to store connection info [default: kernel-<pid>.json]
#
# This file will contain the IP, ports, and authentication key needed to connect
# clients to this kernel. By default, this file will be created in the security-
# dir of the current profile, but can be specified by absolute path.
# c.IPythonQtConsoleApp.connection_file = ''
# Set to display confirmation dialog on exit. You can always use 'exit' or
# 'quit', to force a direct exit without any confirmation.
# c.IPythonQtConsoleApp.confirm_exit = True
# The name of the IPython directory. This directory is used for logging
# configuration (through profiles), history storage, etc. The default is usually
# $HOME/.ipython. This options can also be specified through the environment
# variable IPYTHONDIR.
# c.IPythonQtConsoleApp.ipython_dir = u''
# Whether to install the default config files into the profile dir. If a new
# profile is being created, and IPython contains config files for that profile,
# then they will be staged into the new directory. Otherwise, default config
# files will be automatically generated.
# c.IPythonQtConsoleApp.copy_config_files = False
# Connect to an already running kernel
# c.IPythonQtConsoleApp.existing = ''
# Use a plaintext widget instead of rich text (plain can't print/save).
# c.IPythonQtConsoleApp.plain = False
# Start the console window with the menu bar hidden.
# c.IPythonQtConsoleApp.hide_menubar = False
# The Logging format template
# c.IPythonQtConsoleApp.log_format = '[%(name)s]%(highlevel)s %(message)s'
#
# c.IPythonQtConsoleApp.transport = 'tcp'
#------------------------------------------------------------------------------
# IPythonWidget configuration
#------------------------------------------------------------------------------
# A FrontendWidget for an IPython kernel.
# IPythonWidget will inherit config from: FrontendWidget, HistoryConsoleWidget,
# ConsoleWidget
# The type of completer to use. Valid values are:
#
# 'plain' : Show the available completion as a text list
# Below the editing area.
# 'droplist': Show the completion in a drop down list navigable
# by the arrow keys, and from which you can select
# completion by pressing Return.
# 'ncurses' : Show the completion as a text list which is navigable by
# `tab` and arrow keys.
# c.IPythonWidget.gui_completion = 'ncurses'
# Whether to process ANSI escape codes.
# c.IPythonWidget.ansi_codes = True
# A CSS stylesheet. The stylesheet can contain classes for:
# 1. Qt: QPlainTextEdit, QFrame, QWidget, etc
# 2. Pygments: .c, .k, .o, etc. (see PygmentsHighlighter)
# 3. IPython: .error, .in-prompt, .out-prompt, etc
# c.IPythonWidget.style_sheet = u''
# The height of the console at start time in number of characters (will double
# with `vsplit` paging)
# c.IPythonWidget.height = 25
#
# c.IPythonWidget.out_prompt = 'Out[<span class="out-prompt-number">%i</span>]: '
#
# c.IPythonWidget.input_sep = '\n'
# Whether to draw information calltips on open-parentheses.
# c.IPythonWidget.enable_calltips = True
#
# c.IPythonWidget.in_prompt = 'In [<span class="in-prompt-number">%i</span>]: '
# The width of the console at start time in number of characters (will double
# with `hsplit` paging)
# c.IPythonWidget.width = 81
# A command for invoking a system text editor. If the string contains a
# {filename} format specifier, it will be used. Otherwise, the filename will be
# appended to the end the command.
# c.IPythonWidget.editor = ''
# If not empty, use this Pygments style for syntax highlighting. Otherwise, the
# style sheet is queried for Pygments style information.
# c.IPythonWidget.syntax_style = u''
# The font family to use for the console. On OSX this defaults to Monaco, on
# Windows the default is Consolas with fallback of Courier, and on other
# platforms the default is Monospace.
# c.IPythonWidget.font_family = u''
# The pygments lexer class to use.
# c.IPythonWidget.lexer_class = <IPython.utils.traitlets.Undefined object at 0x10258ded0>
#
# c.IPythonWidget.output_sep2 = ''
# Whether to automatically execute on syntactically complete input.
#
# If False, Shift-Enter is required to submit each execution. Disabling this is
# mainly useful for non-Python kernels, where the completion check would be
# wrong.
# c.IPythonWidget.execute_on_complete_input = True
# The maximum number of lines of text before truncation. Specifying a non-
# positive number disables text truncation (not recommended).
# c.IPythonWidget.buffer_size = 500
#
# c.IPythonWidget.history_lock = False
#
# c.IPythonWidget.banner = u''
# The type of underlying text widget to use. Valid values are 'plain', which
# specifies a QPlainTextEdit, and 'rich', which specifies a QTextEdit.
# c.IPythonWidget.kind = 'plain'
# Whether to ask for user confirmation when restarting kernel
# c.IPythonWidget.confirm_restart = True
# The font size. If unconfigured, Qt will be entrusted with the size of the
# font.
# c.IPythonWidget.font_size = 0
# The editor command to use when a specific line number is requested. The string
# should contain two format specifiers: {line} and {filename}. If this parameter
# is not specified, the line number option to the %edit magic will be ignored.
# c.IPythonWidget.editor_line = u''
# Whether to clear the console when the kernel is restarted
# c.IPythonWidget.clear_on_kernel_restart = True
# The type of paging to use. Valid values are:
#
# 'inside'
# The widget pages like a traditional terminal.
# 'hsplit'
# When paging is requested, the widget is split horizontally. The top
# pane contains the console, and the bottom pane contains the paged text.
# 'vsplit'
# Similar to 'hsplit', except that a vertical splitter is used.
# 'custom'
# No action is taken by the widget beyond emitting a
# 'custom_page_requested(str)' signal.
# 'none'
# The text is written directly to the console.
# c.IPythonWidget.paging = 'inside'
#
# c.IPythonWidget.output_sep = ''
#------------------------------------------------------------------------------
# IPKernelApp configuration
#------------------------------------------------------------------------------
# IPython: an enhanced interactive Python shell.
# IPKernelApp will inherit config from: BaseIPythonApplication, Application,
# InteractiveShellApp
# Run the file referenced by the PYTHONSTARTUP environment variable at IPython
# startup.
# c.IPKernelApp.exec_PYTHONSTARTUP = True
# The importstring for the DisplayHook factory
# c.IPKernelApp.displayhook_class = 'IPython.kernel.zmq.displayhook.ZMQDisplayHook'
# Set the IP or interface on which the kernel will listen.
# c.IPKernelApp.ip = u''
# Pre-load matplotlib and numpy for interactive use, selecting a particular
# matplotlib backend and loop integration.
# c.IPKernelApp.pylab = None
# Create a massive crash report when IPython encounters what may be an internal
# error. The default is to append a short message to the usual traceback
# c.IPKernelApp.verbose_crash = False
# The Kernel subclass to be used.
#
# This should allow easy re-use of the IPKernelApp entry point to configure and
# launch kernels other than IPython's own.
# c.IPKernelApp.kernel_class = 'IPython.kernel.zmq.ipkernel.Kernel'
# Run the module as a script.
# c.IPKernelApp.module_to_run = ''
# The date format used by logging formatters for %(asctime)s
# c.IPKernelApp.log_datefmt = '%Y-%m-%d %H:%M:%S'
# set the shell (ROUTER) port [default: random]
# c.IPKernelApp.shell_port = 0
# set the control (ROUTER) port [default: random]
# c.IPKernelApp.control_port = 0
# Whether to overwrite existing config files when copying
# c.IPKernelApp.overwrite = False
# Execute the given command string.
# c.IPKernelApp.code_to_run = ''
# set the stdin (ROUTER) port [default: random]
# c.IPKernelApp.stdin_port = 0
# Set the log level by value or name.
# c.IPKernelApp.log_level = 30
# lines of code to run at IPython startup.
# c.IPKernelApp.exec_lines = []
# Path to an extra config file to load.
#
# If specified, load this config file in addition to any other IPython config.
# c.IPKernelApp.extra_config_file = u''
# The importstring for the OutStream factory
# c.IPKernelApp.outstream_class = 'IPython.kernel.zmq.iostream.OutStream'
# Whether to create profile dir if it doesn't exist
# c.IPKernelApp.auto_create = False
# set the heartbeat port [default: random]
# c.IPKernelApp.hb_port = 0
#
# c.IPKernelApp.transport = 'tcp'
# redirect stdout to the null device
# c.IPKernelApp.no_stdout = False
# Should variables loaded at startup (by startup files, exec_lines, etc.) be
# hidden from tools like %who?
# c.IPKernelApp.hide_initial_ns = True
# dotted module name of an IPython extension to load.
# c.IPKernelApp.extra_extension = ''
# A file to be run
# c.IPKernelApp.file_to_run = ''
# The IPython profile to use.
# c.IPKernelApp.profile = u'default'
#
# c.IPKernelApp.parent_appname = u''
# kill this process if its parent dies. On Windows, the argument specifies the
# HANDLE of the parent process, otherwise it is simply boolean.
# c.IPKernelApp.parent_handle = 0
# JSON file in which to store connection info [default: kernel-<pid>.json]
#
# This file will contain the IP, ports, and authentication key needed to connect
# clients to this kernel. By default, this file will be created in the security
# dir of the current profile, but can be specified by absolute path.
# c.IPKernelApp.connection_file = ''
# If true, IPython will populate the user namespace with numpy, pylab, etc. and
# an ``import *`` is done from numpy and pylab, when using pylab mode.
#
# When False, pylab mode should not import any names into the user namespace.
# c.IPKernelApp.pylab_import_all = True
# The name of the IPython directory. This directory is used for logging
# configuration (through profiles), history storage, etc. The default is usually
# $HOME/.ipython. This options can also be specified through the environment
# variable IPYTHONDIR.
# c.IPKernelApp.ipython_dir = u''
# Configure matplotlib for interactive use with the default matplotlib backend.
# c.IPKernelApp.matplotlib = None
# ONLY USED ON WINDOWS Interrupt this process when the parent is signaled.
# c.IPKernelApp.interrupt = 0
# Whether to install the default config files into the profile dir. If a new
# profile is being created, and IPython contains config files for that profile,
# then they will be staged into the new directory. Otherwise, default config
# files will be automatically generated.
# c.IPKernelApp.copy_config_files = False
# List of files to run at IPython startup.
# c.IPKernelApp.exec_files = []
# Enable GUI event loop integration with any of ('glut', 'gtk', 'gtk3', 'none',
# 'osx', 'pyglet', 'qt', 'qt4', 'tk', 'wx').
# c.IPKernelApp.gui = None
# A list of dotted module names of IPython extensions to load.
# c.IPKernelApp.extensions = []
# redirect stderr to the null device
# c.IPKernelApp.no_stderr = False
# The Logging format template
# c.IPKernelApp.log_format = '[%(name)s]%(highlevel)s %(message)s'
# set the iopub (PUB) port [default: random]
# c.IPKernelApp.iopub_port = 0
#------------------------------------------------------------------------------
# ZMQInteractiveShell configuration
#------------------------------------------------------------------------------
# A subclass of InteractiveShell for ZMQ.
# ZMQInteractiveShell will inherit config from: InteractiveShell
# Use colors for displaying information about objects. Because this information
# is passed through a pager (like 'less'), and some pagers get confused with
# color codes, this capability can be turned off.
# c.ZMQInteractiveShell.color_info = True
# A list of ast.NodeTransformer subclass instances, which will be applied to
# user input before code is run.
# c.ZMQInteractiveShell.ast_transformers = []
#
# c.ZMQInteractiveShell.history_length = 10000
# Don't call post-execute functions that have failed in the past.
# c.ZMQInteractiveShell.disable_failing_post_execute = False
# Show rewritten input, e.g. for autocall.
# c.ZMQInteractiveShell.show_rewritten_input = True
# Set the color scheme (NoColor, Linux, or LightBG).
# c.ZMQInteractiveShell.colors = 'LightBG'
#
# c.ZMQInteractiveShell.separate_in = '\n'
# Deprecated, use PromptManager.in2_template
# c.ZMQInteractiveShell.prompt_in2 = ' .\\D.: '
#
# c.ZMQInteractiveShell.separate_out = ''
# Deprecated, use PromptManager.in_template
# c.ZMQInteractiveShell.prompt_in1 = 'In [\\#]: '
# Enable deep (recursive) reloading by default. IPython can use the deep_reload
# module which reloads changes in modules recursively (it replaces the reload()
# function, so you don't need to change anything to use it). deep_reload()
# forces a full reload of modules whose code may have changed, which the default
# reload() function does not. When deep_reload is off, IPython will use the
# normal reload(), but deep_reload will still be available as dreload().
# c.ZMQInteractiveShell.deep_reload = False
# Make IPython automatically call any callable object even if you didn't type
# explicit parentheses. For example, 'str 43' becomes 'str(43)' automatically.
# The value can be '0' to disable the feature, '1' for 'smart' autocall, where
# it is not applied if there are no more arguments on the line, and '2' for
# 'full' autocall, where all callable objects are automatically called (even if
# no arguments are present).
# c.ZMQInteractiveShell.autocall = 0
#
# c.ZMQInteractiveShell.separate_out2 = ''
# Deprecated, use PromptManager.justify
# c.ZMQInteractiveShell.prompts_pad_left = True
#
# c.ZMQInteractiveShell.readline_parse_and_bind = ['tab: complete', '"\\C-l": clear-screen', 'set show-all-if-ambiguous on', '"\\C-o": tab-insert', '"\\C-r": reverse-search-history', '"\\C-s": forward-search-history', '"\\C-p": history-search-backward', '"\\C-n": history-search-forward', '"\\e[A": history-search-backward', '"\\e[B": history-search-forward', '"\\C-k": kill-line', '"\\C-u": unix-line-discard']
# Enable magic commands to be called without the leading %.
# c.ZMQInteractiveShell.automagic = True
#
# c.ZMQInteractiveShell.debug = False
#
# c.ZMQInteractiveShell.object_info_string_level = 0
#
# c.ZMQInteractiveShell.ipython_dir = ''
#
# c.ZMQInteractiveShell.readline_remove_delims = '-/~'
# Start logging to the default log file.
# c.ZMQInteractiveShell.logstart = False
# The name of the logfile to use.
# c.ZMQInteractiveShell.logfile = ''
#
# c.ZMQInteractiveShell.wildcards_case_sensitive = True
# Save multi-line entries as one entry in readline history
# c.ZMQInteractiveShell.multiline_history = True
# Start logging to the given file in append mode.
# c.ZMQInteractiveShell.logappend = ''
#
# c.ZMQInteractiveShell.xmode = 'Context'
#
# c.ZMQInteractiveShell.quiet = False
# Deprecated, use PromptManager.out_template
# c.ZMQInteractiveShell.prompt_out = 'Out[\\#]: '
# Set the size of the output cache. The default is 1000, you can change it
# permanently in your config file. Setting it to 0 completely disables the
# caching system, and the minimum value accepted is 20 (if you provide a value
# less than 20, it is reset to 0 and a warning is issued). This limit is
# defined because otherwise you'll spend more time re-flushing a too small cache
# than working
# c.ZMQInteractiveShell.cache_size = 1000
# 'all', 'last', 'last_expr' or 'none', specifying which nodes should be run
# interactively (displaying output from expressions).
# c.ZMQInteractiveShell.ast_node_interactivity = 'last_expr'
# Automatically call the pdb debugger after every exception.
# c.ZMQInteractiveShell.pdb = False
#------------------------------------------------------------------------------
# KernelManager configuration
#------------------------------------------------------------------------------
# Manages a single kernel in a subprocess on this host.
#
# This version starts kernels with Popen.
# KernelManager will inherit config from: ConnectionFileMixin
# The Popen Command to launch the kernel. Override this if you have a custom
# kernel. If kernel_cmd is specified in a configuration file, IPython does not
# pass any arguments to the kernel, because it cannot make any assumptions about
# the arguments that the kernel understands. In particular, this means that the
# kernel does not receive the option --debug if it given on the IPython command
# line.
# c.KernelManager.kernel_cmd = []
# Set the kernel's IP address [default localhost]. If the IP address is
# something other than localhost, then Consoles on other machines will be able
# to connect to the Kernel, so be careful!
# c.KernelManager.ip = u''
#
# c.KernelManager.transport = 'tcp'
# Should we autorestart the kernel if it dies.
# c.KernelManager.autorestart = False
#------------------------------------------------------------------------------
# ProfileDir configuration
#------------------------------------------------------------------------------
# An object to manage the profile directory and its resources.
#
# The profile directory is used by all IPython applications, to manage
# configuration, logging and security.
#
# This object knows how to find, create and manage these directories. This
# should be used by any code that wants to handle profiles.
# Set the profile location directly. This overrides the logic used by the
# `profile` option.
# c.ProfileDir.location = u''
#------------------------------------------------------------------------------
# Session configuration
#------------------------------------------------------------------------------
# Object for handling serialization and sending of messages.
#
# The Session object handles building messages and sending them with ZMQ sockets
# or ZMQStream objects. Objects can communicate with each other over the
# network via Session objects, and only need to work with the dict-based IPython
# message spec. The Session will handle serialization/deserialization, security,
# and metadata.
#
# Sessions support configurable serialization via packer/unpacker traits, and
# signing with HMAC digests via the key/keyfile traits.
#
# Parameters ----------
#
# debug : bool
# whether to trigger extra debugging statements
# packer/unpacker : str : 'json', 'pickle' or import_string
# importstrings for methods to serialize message parts. If just
# 'json' or 'pickle', predefined JSON and pickle packers will be used.
# Otherwise, the entire importstring must be used.
#
# The functions must accept at least valid JSON input, and output *bytes*.
#
# For example, to use msgpack:
# packer = 'msgpack.packb', unpacker='msgpack.unpackb'
# pack/unpack : callables
# You can also set the pack/unpack callables for serialization directly.
# session : bytes
# the ID of this Session object. The default is to generate a new UUID.
# username : unicode
# username added to message headers. The default is to ask the OS.
# key : bytes
# The key used to initialize an HMAC signature. If unset, messages
# will not be signed or checked.
# keyfile : filepath
# The file containing a key. If this is set, `key` will be initialized
# to the contents of the file.
# Username for the Session. Default is your system username.
# c.Session.username = u'dmartin'
# The name of the unpacker for unserializing messages. Only used with custom
# functions for `packer`.
# c.Session.unpacker = 'json'
# Threshold (in bytes) beyond which a buffer should be sent without copying.
# c.Session.copy_threshold = 65536
# The name of the packer for serializing messages. Should be one of 'json',
# 'pickle', or an import name for a custom callable serializer.
# c.Session.packer = 'json'
# The maximum number of digests to remember.
#
# The digest history will be culled when it exceeds this value.
# c.Session.digest_history_size = 65536
# The UUID identifying this session.
# c.Session.session = u''
# The digest scheme used to construct the message signatures. Must have the form
# 'hmac-HASH'.
# c.Session.signature_scheme = 'hmac-sha256'
# execution key, for extra authentication.
# c.Session.key = ''
# Debug output in the Session
# c.Session.debug = False
# The maximum number of items for a container to be introspected for custom
# serialization. Containers larger than this are pickled outright.
# c.Session.item_threshold = 64
# path to file containing execution key.
# c.Session.keyfile = ''
# Threshold (in bytes) beyond which an object's buffer should be extracted to
# avoid pickling.
# c.Session.buffer_threshold = 1024
# Metadata dictionary, which serves as the default top-level metadata dict for
# each message.
# c.Session.metadata = {}
#------------------------------------------------------------------------------
# InlineBackend configuration
#------------------------------------------------------------------------------
# An object to store configuration of the inline backend.
# The figure format to enable (deprecated use `figure_formats` instead)
# c.InlineBackend.figure_format = u''
# A set of figure formats to enable: 'png', 'retina', 'jpeg', 'svg', 'pdf'.
# c.InlineBackend.figure_formats = set(['png'])
# Extra kwargs to be passed to fig.canvas.print_figure.
#
# Logical examples include: bbox_inches, quality (for jpeg figures), etc.
# c.InlineBackend.print_figure_kwargs = {'bbox_inches': 'tight'}
# Close all figures at the end of each cell.
#
# When True, ensures that each cell starts with no active figures, but it also
# means that one must keep track of references in order to edit or redraw
# figures in subsequent cells. This mode is ideal for the notebook, where
# residual plots from other cells might be surprising.
#
# When False, one must call figure() to create new figures. This means that
# gcf() and getfigs() can reference figures created in other cells, and the
# active figure can continue to be edited with pylab/pyplot methods that
# reference the current active figure. This mode facilitates iterative editing
# of figures, and behaves most consistently with other matplotlib backends, but
# figure barriers between cells must be explicit.
# c.InlineBackend.close_figures = True
# Subset of matplotlib rcParams that should be different for the inline backend.
# c.InlineBackend.rc = {'font.size': 10, 'figure.figsize': (6.0, 4.0), 'figure.facecolor': (1, 1, 1, 0), 'savefig.dpi': 72, 'figure.subplot.bottom': 0.125, 'figure.edgecolor': (1, 1, 1, 0)}
|
mit
|
deepakantony/sms-tools
|
lectures/04-STFT/plots-code/stft-system.py
|
5
|
1461
|
import numpy as np
import time, os, sys
sys.path.append(os.path.join(os.path.dirname(os.path.realpath(__file__)), '../../../software/models/'))
import stft as STFT
import utilFunctions as UF
import matplotlib.pyplot as plt
from scipy.signal import hamming
(fs, x) = UF.wavread('../../../sounds/piano.wav')
w = np.hamming(1024)
N = 1024
H = 512
mX, pX = STFT.stftAnal(x, w, N, H)
y = STFT.stftSynth(mX, pX, w.size, H)
plt.figure(1, figsize=(9.5, 7))
plt.subplot(411)
plt.plot(np.arange(x.size)/float(fs), x, 'b')
plt.title('x (piano.wav)')
plt.axis([0,x.size/float(fs),min(x),max(x)])
plt.subplot(412)
numFrames = int(mX[:,0].size)
frmTime = H*np.arange(numFrames)/float(fs)
binFreq = np.arange(mX[0,:].size)*float(fs)/N
plt.pcolormesh(frmTime, binFreq, np.transpose(mX))
plt.title('mX, M=1024, N=1024, H=512')
plt.autoscale(tight=True)
plt.subplot(413)
numFrames = int(pX[:,0].size)
frmTime = H*np.arange(numFrames)/float(fs)
binFreq = np.arange(pX[0,:].size)*float(fs)/N
plt.pcolormesh(frmTime, binFreq, np.diff(np.transpose(pX),axis=0))
plt.title('pX derivative, M=1024, N=1024, H=512')
plt.autoscale(tight=True)
plt.subplot(414)
plt.plot(np.arange(y.size)/float(fs), y,'b')
plt.axis([0,y.size/float(fs),min(y),max(y)])
plt.title('y')
plt.tight_layout()
plt.savefig('stft-system.png')
UF.wavwrite(y, fs, 'piano-stft.wav')
plt.show()
|
agpl-3.0
|
ssaeger/scikit-learn
|
sklearn/feature_extraction/text.py
|
7
|
50272
|
# -*- coding: utf-8 -*-
# Authors: Olivier Grisel <[email protected]>
# Mathieu Blondel <[email protected]>
# Lars Buitinck <[email protected]>
# Robert Layton <[email protected]>
# Jochen Wersdörfer <[email protected]>
# Roman Sinayev <[email protected]>
#
# License: BSD 3 clause
"""
The :mod:`sklearn.feature_extraction.text` submodule gathers utilities to
build feature vectors from text documents.
"""
from __future__ import unicode_literals
import array
from collections import Mapping, defaultdict
import numbers
from operator import itemgetter
import re
import unicodedata
import numpy as np
import scipy.sparse as sp
from ..base import BaseEstimator, TransformerMixin
from ..externals import six
from ..externals.six.moves import xrange
from ..preprocessing import normalize
from .hashing import FeatureHasher
from .stop_words import ENGLISH_STOP_WORDS
from ..utils import deprecated
from ..utils.fixes import frombuffer_empty, bincount
from ..utils.validation import check_is_fitted
__all__ = ['CountVectorizer',
'ENGLISH_STOP_WORDS',
'TfidfTransformer',
'TfidfVectorizer',
'strip_accents_ascii',
'strip_accents_unicode',
'strip_tags']
def strip_accents_unicode(s):
"""Transform accentuated unicode symbols into their simple counterpart
Warning: the python-level loop and join operations make this
implementation 20 times slower than the strip_accents_ascii basic
normalization.
See also
--------
strip_accents_ascii
Remove accentuated char for any unicode symbol that has a direct
ASCII equivalent.
"""
return ''.join([c for c in unicodedata.normalize('NFKD', s)
if not unicodedata.combining(c)])
def strip_accents_ascii(s):
"""Transform accentuated unicode symbols into ascii or nothing
Warning: this solution is only suited for languages that have a direct
transliteration to ASCII symbols.
See also
--------
strip_accents_unicode
Remove accentuated char for any unicode symbol.
"""
nkfd_form = unicodedata.normalize('NFKD', s)
return nkfd_form.encode('ASCII', 'ignore').decode('ASCII')
def strip_tags(s):
"""Basic regexp based HTML / XML tag stripper function
For serious HTML/XML preprocessing you should rather use an external
library such as lxml or BeautifulSoup.
"""
return re.compile(r"<([^>]+)>", flags=re.UNICODE).sub(" ", s)
def _check_stop_list(stop):
if stop == "english":
return ENGLISH_STOP_WORDS
elif isinstance(stop, six.string_types):
raise ValueError("not a built-in stop list: %s" % stop)
elif stop is None:
return None
else: # assume it's a collection
return frozenset(stop)
class VectorizerMixin(object):
"""Provides common code for text vectorizers (tokenization logic)."""
_white_spaces = re.compile(r"\s\s+")
def decode(self, doc):
"""Decode the input into a string of unicode symbols
The decoding strategy depends on the vectorizer parameters.
"""
if self.input == 'filename':
with open(doc, 'rb') as fh:
doc = fh.read()
elif self.input == 'file':
doc = doc.read()
if isinstance(doc, bytes):
doc = doc.decode(self.encoding, self.decode_error)
if doc is np.nan:
raise ValueError("np.nan is an invalid document, expected byte or "
"unicode string.")
return doc
def _word_ngrams(self, tokens, stop_words=None):
"""Turn tokens into a sequence of n-grams after stop words filtering"""
# handle stop words
if stop_words is not None:
tokens = [w for w in tokens if w not in stop_words]
# handle token n-grams
min_n, max_n = self.ngram_range
if max_n != 1:
original_tokens = tokens
tokens = []
n_original_tokens = len(original_tokens)
for n in xrange(min_n,
min(max_n + 1, n_original_tokens + 1)):
for i in xrange(n_original_tokens - n + 1):
tokens.append(" ".join(original_tokens[i: i + n]))
return tokens
def _char_ngrams(self, text_document):
"""Tokenize text_document into a sequence of character n-grams"""
# normalize white spaces
text_document = self._white_spaces.sub(" ", text_document)
text_len = len(text_document)
ngrams = []
min_n, max_n = self.ngram_range
for n in xrange(min_n, min(max_n + 1, text_len + 1)):
for i in xrange(text_len - n + 1):
ngrams.append(text_document[i: i + n])
return ngrams
def _char_wb_ngrams(self, text_document):
"""Whitespace sensitive char-n-gram tokenization.
Tokenize text_document into a sequence of character n-grams
excluding any whitespace (operating only inside word boundaries)"""
# normalize white spaces
text_document = self._white_spaces.sub(" ", text_document)
min_n, max_n = self.ngram_range
ngrams = []
for w in text_document.split():
w = ' ' + w + ' '
w_len = len(w)
for n in xrange(min_n, max_n + 1):
offset = 0
ngrams.append(w[offset:offset + n])
while offset + n < w_len:
offset += 1
ngrams.append(w[offset:offset + n])
if offset == 0: # count a short word (w_len < n) only once
break
return ngrams
def build_preprocessor(self):
"""Return a function to preprocess the text before tokenization"""
if self.preprocessor is not None:
return self.preprocessor
# unfortunately python functools package does not have an efficient
# `compose` function that would have allowed us to chain a dynamic
# number of functions. However the cost of a lambda call is a few
# hundreds of nanoseconds which is negligible when compared to the
# cost of tokenizing a string of 1000 chars for instance.
noop = lambda x: x
# accent stripping
if not self.strip_accents:
strip_accents = noop
elif callable(self.strip_accents):
strip_accents = self.strip_accents
elif self.strip_accents == 'ascii':
strip_accents = strip_accents_ascii
elif self.strip_accents == 'unicode':
strip_accents = strip_accents_unicode
else:
raise ValueError('Invalid value for "strip_accents": %s' %
self.strip_accents)
if self.lowercase:
return lambda x: strip_accents(x.lower())
else:
return strip_accents
def build_tokenizer(self):
"""Return a function that splits a string into a sequence of tokens"""
if self.tokenizer is not None:
return self.tokenizer
token_pattern = re.compile(self.token_pattern)
return lambda doc: token_pattern.findall(doc)
def get_stop_words(self):
"""Build or fetch the effective stop words list"""
return _check_stop_list(self.stop_words)
def build_analyzer(self):
"""Return a callable that handles preprocessing and tokenization"""
if callable(self.analyzer):
return self.analyzer
preprocess = self.build_preprocessor()
if self.analyzer == 'char':
return lambda doc: self._char_ngrams(preprocess(self.decode(doc)))
elif self.analyzer == 'char_wb':
return lambda doc: self._char_wb_ngrams(
preprocess(self.decode(doc)))
elif self.analyzer == 'word':
stop_words = self.get_stop_words()
tokenize = self.build_tokenizer()
return lambda doc: self._word_ngrams(
tokenize(preprocess(self.decode(doc))), stop_words)
else:
raise ValueError('%s is not a valid tokenization scheme/analyzer' %
self.analyzer)
def _validate_vocabulary(self):
vocabulary = self.vocabulary
if vocabulary is not None:
if isinstance(vocabulary, set):
vocabulary = sorted(vocabulary)
if not isinstance(vocabulary, Mapping):
vocab = {}
for i, t in enumerate(vocabulary):
if vocab.setdefault(t, i) != i:
msg = "Duplicate term in vocabulary: %r" % t
raise ValueError(msg)
vocabulary = vocab
else:
indices = set(six.itervalues(vocabulary))
if len(indices) != len(vocabulary):
raise ValueError("Vocabulary contains repeated indices.")
for i in xrange(len(vocabulary)):
if i not in indices:
msg = ("Vocabulary of size %d doesn't contain index "
"%d." % (len(vocabulary), i))
raise ValueError(msg)
if not vocabulary:
raise ValueError("empty vocabulary passed to fit")
self.fixed_vocabulary_ = True
self.vocabulary_ = dict(vocabulary)
else:
self.fixed_vocabulary_ = False
def _check_vocabulary(self):
"""Check if vocabulary is empty or missing (not fit-ed)"""
msg = "%(name)s - Vocabulary wasn't fitted."
check_is_fitted(self, 'vocabulary_', msg=msg),
if len(self.vocabulary_) == 0:
raise ValueError("Vocabulary is empty")
@property
@deprecated("The `fixed_vocabulary` attribute is deprecated and will be "
"removed in 0.18. Please use `fixed_vocabulary_` instead.")
def fixed_vocabulary(self):
return self.fixed_vocabulary_
class HashingVectorizer(BaseEstimator, VectorizerMixin):
"""Convert a collection of text documents to a matrix of token occurrences
It turns a collection of text documents into a scipy.sparse matrix holding
token occurrence counts (or binary occurrence information), possibly
normalized as token frequencies if norm='l1' or projected on the euclidean
unit sphere if norm='l2'.
This text vectorizer implementation uses the hashing trick to find the
token string name to feature integer index mapping.
This strategy has several advantages:
- it is very low memory scalable to large datasets as there is no need to
store a vocabulary dictionary in memory
- it is fast to pickle and un-pickle as it holds no state besides the
constructor parameters
- it can be used in a streaming (partial fit) or parallel pipeline as there
is no state computed during fit.
There are also a couple of cons (vs using a CountVectorizer with an
in-memory vocabulary):
- there is no way to compute the inverse transform (from feature indices to
string feature names) which can be a problem when trying to introspect
which features are most important to a model.
- there can be collisions: distinct tokens can be mapped to the same
feature index. However in practice this is rarely an issue if n_features
is large enough (e.g. 2 ** 18 for text classification problems).
- no IDF weighting as this would render the transformer stateful.
The hash function employed is the signed 32-bit version of Murmurhash3.
Read more in the :ref:`User Guide <text_feature_extraction>`.
Parameters
----------
input : string {'filename', 'file', 'content'}
If 'filename', the sequence passed as an argument to fit is
expected to be a list of filenames that need reading to fetch
the raw content to analyze.
If 'file', the sequence items must have a 'read' method (file-like
object) that is called to fetch the bytes in memory.
Otherwise the input is expected to be the sequence strings or
bytes items are expected to be analyzed directly.
encoding : string, default='utf-8'
If bytes or files are given to analyze, this encoding is used to
decode.
decode_error : {'strict', 'ignore', 'replace'}
Instruction on what to do if a byte sequence is given to analyze that
contains characters not of the given `encoding`. By default, it is
'strict', meaning that a UnicodeDecodeError will be raised. Other
values are 'ignore' and 'replace'.
strip_accents : {'ascii', 'unicode', None}
Remove accents during the preprocessing step.
'ascii' is a fast method that only works on characters that have
an direct ASCII mapping.
'unicode' is a slightly slower method that works on any characters.
None (default) does nothing.
analyzer : string, {'word', 'char', 'char_wb'} or callable
Whether the feature should be made of word or character n-grams.
Option 'char_wb' creates character n-grams only from text inside
word boundaries.
If a callable is passed it is used to extract the sequence of features
out of the raw, unprocessed input.
preprocessor : callable or None (default)
Override the preprocessing (string transformation) stage while
preserving the tokenizing and n-grams generation steps.
tokenizer : callable or None (default)
Override the string tokenization step while preserving the
preprocessing and n-grams generation steps.
Only applies if ``analyzer == 'word'``.
ngram_range : tuple (min_n, max_n), default=(1, 1)
The lower and upper boundary of the range of n-values for different
n-grams to be extracted. All values of n such that min_n <= n <= max_n
will be used.
stop_words : string {'english'}, list, or None (default)
If 'english', a built-in stop word list for English is used.
If a list, that list is assumed to contain stop words, all of which
will be removed from the resulting tokens.
Only applies if ``analyzer == 'word'``.
lowercase : boolean, default=True
Convert all characters to lowercase before tokenizing.
token_pattern : string
Regular expression denoting what constitutes a "token", only used
if ``analyzer == 'word'``. The default regexp selects tokens of 2
or more alphanumeric characters (punctuation is completely ignored
and always treated as a token separator).
n_features : integer, default=(2 ** 20)
The number of features (columns) in the output matrices. Small numbers
of features are likely to cause hash collisions, but large numbers
will cause larger coefficient dimensions in linear learners.
norm : 'l1', 'l2' or None, optional
Norm used to normalize term vectors. None for no normalization.
binary: boolean, default=False.
If True, all non zero counts are set to 1. This is useful for discrete
probabilistic models that model binary events rather than integer
counts.
dtype: type, optional
Type of the matrix returned by fit_transform() or transform().
non_negative : boolean, default=False
Whether output matrices should contain non-negative values only;
effectively calls abs on the matrix prior to returning it.
When True, output values can be interpreted as frequencies.
When False, output values will have expected value zero.
See also
--------
CountVectorizer, TfidfVectorizer
"""
def __init__(self, input='content', encoding='utf-8',
decode_error='strict', strip_accents=None,
lowercase=True, preprocessor=None, tokenizer=None,
stop_words=None, token_pattern=r"(?u)\b\w\w+\b",
ngram_range=(1, 1), analyzer='word', n_features=(2 ** 20),
binary=False, norm='l2', non_negative=False,
dtype=np.float64):
self.input = input
self.encoding = encoding
self.decode_error = decode_error
self.strip_accents = strip_accents
self.preprocessor = preprocessor
self.tokenizer = tokenizer
self.analyzer = analyzer
self.lowercase = lowercase
self.token_pattern = token_pattern
self.stop_words = stop_words
self.n_features = n_features
self.ngram_range = ngram_range
self.binary = binary
self.norm = norm
self.non_negative = non_negative
self.dtype = dtype
def partial_fit(self, X, y=None):
"""Does nothing: this transformer is stateless.
This method is just there to mark the fact that this transformer
can work in a streaming setup.
"""
return self
def fit(self, X, y=None):
"""Does nothing: this transformer is stateless."""
# triggers a parameter validation
self._get_hasher().fit(X, y=y)
return self
def transform(self, X, y=None):
"""Transform a sequence of documents to a document-term matrix.
Parameters
----------
X : iterable over raw text documents, length = n_samples
Samples. Each sample must be a text document (either bytes or
unicode strings, file name or file object depending on the
constructor argument) which will be tokenized and hashed.
y : (ignored)
Returns
-------
X : scipy.sparse matrix, shape = (n_samples, self.n_features)
Document-term matrix.
"""
analyzer = self.build_analyzer()
X = self._get_hasher().transform(analyzer(doc) for doc in X)
if self.binary:
X.data.fill(1)
if self.norm is not None:
X = normalize(X, norm=self.norm, copy=False)
return X
# Alias transform to fit_transform for convenience
fit_transform = transform
def _get_hasher(self):
return FeatureHasher(n_features=self.n_features,
input_type='string', dtype=self.dtype,
non_negative=self.non_negative)
def _document_frequency(X):
"""Count the number of non-zero values for each feature in sparse X."""
if sp.isspmatrix_csr(X):
return bincount(X.indices, minlength=X.shape[1])
else:
return np.diff(sp.csc_matrix(X, copy=False).indptr)
class CountVectorizer(BaseEstimator, VectorizerMixin):
"""Convert a collection of text documents to a matrix of token counts
This implementation produces a sparse representation of the counts using
scipy.sparse.coo_matrix.
If you do not provide an a-priori dictionary and you do not use an analyzer
that does some kind of feature selection then the number of features will
be equal to the vocabulary size found by analyzing the data.
Read more in the :ref:`User Guide <text_feature_extraction>`.
Parameters
----------
input : string {'filename', 'file', 'content'}
If 'filename', the sequence passed as an argument to fit is
expected to be a list of filenames that need reading to fetch
the raw content to analyze.
If 'file', the sequence items must have a 'read' method (file-like
object) that is called to fetch the bytes in memory.
Otherwise the input is expected to be the sequence strings or
bytes items are expected to be analyzed directly.
encoding : string, 'utf-8' by default.
If bytes or files are given to analyze, this encoding is used to
decode.
decode_error : {'strict', 'ignore', 'replace'}
Instruction on what to do if a byte sequence is given to analyze that
contains characters not of the given `encoding`. By default, it is
'strict', meaning that a UnicodeDecodeError will be raised. Other
values are 'ignore' and 'replace'.
strip_accents : {'ascii', 'unicode', None}
Remove accents during the preprocessing step.
'ascii' is a fast method that only works on characters that have
an direct ASCII mapping.
'unicode' is a slightly slower method that works on any characters.
None (default) does nothing.
analyzer : string, {'word', 'char', 'char_wb'} or callable
Whether the feature should be made of word or character n-grams.
Option 'char_wb' creates character n-grams only from text inside
word boundaries.
If a callable is passed it is used to extract the sequence of features
out of the raw, unprocessed input.
preprocessor : callable or None (default)
Override the preprocessing (string transformation) stage while
preserving the tokenizing and n-grams generation steps.
tokenizer : callable or None (default)
Override the string tokenization step while preserving the
preprocessing and n-grams generation steps.
Only applies if ``analyzer == 'word'``.
ngram_range : tuple (min_n, max_n)
The lower and upper boundary of the range of n-values for different
n-grams to be extracted. All values of n such that min_n <= n <= max_n
will be used.
stop_words : string {'english'}, list, or None (default)
If 'english', a built-in stop word list for English is used.
If a list, that list is assumed to contain stop words, all of which
will be removed from the resulting tokens.
Only applies if ``analyzer == 'word'``.
If None, no stop words will be used. max_df can be set to a value
in the range [0.7, 1.0) to automatically detect and filter stop
words based on intra corpus document frequency of terms.
lowercase : boolean, True by default
Convert all characters to lowercase before tokenizing.
token_pattern : string
Regular expression denoting what constitutes a "token", only used
if ``analyzer == 'word'``. The default regexp select tokens of 2
or more alphanumeric characters (punctuation is completely ignored
and always treated as a token separator).
max_df : float in range [0.0, 1.0] or int, default=1.0
When building the vocabulary ignore terms that have a document
frequency strictly higher than the given threshold (corpus-specific
stop words).
If float, the parameter represents a proportion of documents, integer
absolute counts.
This parameter is ignored if vocabulary is not None.
min_df : float in range [0.0, 1.0] or int, default=1
When building the vocabulary ignore terms that have a document
frequency strictly lower than the given threshold. This value is also
called cut-off in the literature.
If float, the parameter represents a proportion of documents, integer
absolute counts.
This parameter is ignored if vocabulary is not None.
max_features : int or None, default=None
If not None, build a vocabulary that only consider the top
max_features ordered by term frequency across the corpus.
This parameter is ignored if vocabulary is not None.
vocabulary : Mapping or iterable, optional
Either a Mapping (e.g., a dict) where keys are terms and values are
indices in the feature matrix, or an iterable over terms. If not
given, a vocabulary is determined from the input documents. Indices
in the mapping should not be repeated and should not have any gap
between 0 and the largest index.
binary : boolean, default=False
If True, all non zero counts are set to 1. This is useful for discrete
probabilistic models that model binary events rather than integer
counts.
dtype : type, optional
Type of the matrix returned by fit_transform() or transform().
Attributes
----------
vocabulary_ : dict
A mapping of terms to feature indices.
stop_words_ : set
Terms that were ignored because they either:
- occurred in too many documents (`max_df`)
- occurred in too few documents (`min_df`)
- were cut off by feature selection (`max_features`).
This is only available if no vocabulary was given.
See also
--------
HashingVectorizer, TfidfVectorizer
Notes
-----
The ``stop_words_`` attribute can get large and increase the model size
when pickling. This attribute is provided only for introspection and can
be safely removed using delattr or set to None before pickling.
"""
def __init__(self, input='content', encoding='utf-8',
decode_error='strict', strip_accents=None,
lowercase=True, preprocessor=None, tokenizer=None,
stop_words=None, token_pattern=r"(?u)\b\w\w+\b",
ngram_range=(1, 1), analyzer='word',
max_df=1.0, min_df=1, max_features=None,
vocabulary=None, binary=False, dtype=np.int64):
self.input = input
self.encoding = encoding
self.decode_error = decode_error
self.strip_accents = strip_accents
self.preprocessor = preprocessor
self.tokenizer = tokenizer
self.analyzer = analyzer
self.lowercase = lowercase
self.token_pattern = token_pattern
self.stop_words = stop_words
self.max_df = max_df
self.min_df = min_df
if max_df < 0 or min_df < 0:
raise ValueError("negative value for max_df or min_df")
self.max_features = max_features
if max_features is not None:
if (not isinstance(max_features, numbers.Integral) or
max_features <= 0):
raise ValueError(
"max_features=%r, neither a positive integer nor None"
% max_features)
self.ngram_range = ngram_range
self.vocabulary = vocabulary
self.binary = binary
self.dtype = dtype
def _sort_features(self, X, vocabulary):
"""Sort features by name
Returns a reordered matrix and modifies the vocabulary in place
"""
sorted_features = sorted(six.iteritems(vocabulary))
map_index = np.empty(len(sorted_features), dtype=np.int32)
for new_val, (term, old_val) in enumerate(sorted_features):
map_index[new_val] = old_val
vocabulary[term] = new_val
return X[:, map_index]
def _limit_features(self, X, vocabulary, high=None, low=None,
limit=None):
"""Remove too rare or too common features.
Prune features that are non zero in more samples than high or less
documents than low, modifying the vocabulary, and restricting it to
at most the limit most frequent.
This does not prune samples with zero features.
"""
if high is None and low is None and limit is None:
return X, set()
# Calculate a mask based on document frequencies
dfs = _document_frequency(X)
tfs = np.asarray(X.sum(axis=0)).ravel()
mask = np.ones(len(dfs), dtype=bool)
if high is not None:
mask &= dfs <= high
if low is not None:
mask &= dfs >= low
if limit is not None and mask.sum() > limit:
mask_inds = (-tfs[mask]).argsort()[:limit]
new_mask = np.zeros(len(dfs), dtype=bool)
new_mask[np.where(mask)[0][mask_inds]] = True
mask = new_mask
new_indices = np.cumsum(mask) - 1 # maps old indices to new
removed_terms = set()
for term, old_index in list(six.iteritems(vocabulary)):
if mask[old_index]:
vocabulary[term] = new_indices[old_index]
else:
del vocabulary[term]
removed_terms.add(term)
kept_indices = np.where(mask)[0]
if len(kept_indices) == 0:
raise ValueError("After pruning, no terms remain. Try a lower"
" min_df or a higher max_df.")
return X[:, kept_indices], removed_terms
def _count_vocab(self, raw_documents, fixed_vocab):
"""Create sparse feature matrix, and vocabulary where fixed_vocab=False
"""
if fixed_vocab:
vocabulary = self.vocabulary_
else:
# Add a new value when a new vocabulary item is seen
vocabulary = defaultdict()
vocabulary.default_factory = vocabulary.__len__
analyze = self.build_analyzer()
j_indices = _make_int_array()
indptr = _make_int_array()
indptr.append(0)
for doc in raw_documents:
for feature in analyze(doc):
try:
j_indices.append(vocabulary[feature])
except KeyError:
# Ignore out-of-vocabulary items for fixed_vocab=True
continue
indptr.append(len(j_indices))
if not fixed_vocab:
# disable defaultdict behaviour
vocabulary = dict(vocabulary)
if not vocabulary:
raise ValueError("empty vocabulary; perhaps the documents only"
" contain stop words")
j_indices = frombuffer_empty(j_indices, dtype=np.intc)
indptr = np.frombuffer(indptr, dtype=np.intc)
values = np.ones(len(j_indices))
X = sp.csr_matrix((values, j_indices, indptr),
shape=(len(indptr) - 1, len(vocabulary)),
dtype=self.dtype)
X.sum_duplicates()
return vocabulary, X
def fit(self, raw_documents, y=None):
"""Learn a vocabulary dictionary of all tokens in the raw documents.
Parameters
----------
raw_documents : iterable
An iterable which yields either str, unicode or file objects.
Returns
-------
self
"""
self.fit_transform(raw_documents)
return self
def fit_transform(self, raw_documents, y=None):
"""Learn the vocabulary dictionary and return term-document matrix.
This is equivalent to fit followed by transform, but more efficiently
implemented.
Parameters
----------
raw_documents : iterable
An iterable which yields either str, unicode or file objects.
Returns
-------
X : array, [n_samples, n_features]
Document-term matrix.
"""
# We intentionally don't call the transform method to make
# fit_transform overridable without unwanted side effects in
# TfidfVectorizer.
self._validate_vocabulary()
max_df = self.max_df
min_df = self.min_df
max_features = self.max_features
vocabulary, X = self._count_vocab(raw_documents,
self.fixed_vocabulary_)
if self.binary:
X.data.fill(1)
if not self.fixed_vocabulary_:
X = self._sort_features(X, vocabulary)
n_doc = X.shape[0]
max_doc_count = (max_df
if isinstance(max_df, numbers.Integral)
else max_df * n_doc)
min_doc_count = (min_df
if isinstance(min_df, numbers.Integral)
else min_df * n_doc)
if max_doc_count < min_doc_count:
raise ValueError(
"max_df corresponds to < documents than min_df")
X, self.stop_words_ = self._limit_features(X, vocabulary,
max_doc_count,
min_doc_count,
max_features)
self.vocabulary_ = vocabulary
return X
def transform(self, raw_documents):
"""Transform documents to document-term matrix.
Extract token counts out of raw text documents using the vocabulary
fitted with fit or the one provided to the constructor.
Parameters
----------
raw_documents : iterable
An iterable which yields either str, unicode or file objects.
Returns
-------
X : sparse matrix, [n_samples, n_features]
Document-term matrix.
"""
if not hasattr(self, 'vocabulary_'):
self._validate_vocabulary()
self._check_vocabulary()
# use the same matrix-building strategy as fit_transform
_, X = self._count_vocab(raw_documents, fixed_vocab=True)
if self.binary:
X.data.fill(1)
return X
def inverse_transform(self, X):
"""Return terms per document with nonzero entries in X.
Parameters
----------
X : {array, sparse matrix}, shape = [n_samples, n_features]
Returns
-------
X_inv : list of arrays, len = n_samples
List of arrays of terms.
"""
self._check_vocabulary()
if sp.issparse(X):
# We need CSR format for fast row manipulations.
X = X.tocsr()
else:
# We need to convert X to a matrix, so that the indexing
# returns 2D objects
X = np.asmatrix(X)
n_samples = X.shape[0]
terms = np.array(list(self.vocabulary_.keys()))
indices = np.array(list(self.vocabulary_.values()))
inverse_vocabulary = terms[np.argsort(indices)]
return [inverse_vocabulary[X[i, :].nonzero()[1]].ravel()
for i in range(n_samples)]
def get_feature_names(self):
"""Array mapping from feature integer indices to feature name"""
self._check_vocabulary()
return [t for t, i in sorted(six.iteritems(self.vocabulary_),
key=itemgetter(1))]
def _make_int_array():
"""Construct an array.array of a type suitable for scipy.sparse indices."""
return array.array(str("i"))
class TfidfTransformer(BaseEstimator, TransformerMixin):
"""Transform a count matrix to a normalized tf or tf-idf representation
Tf means term-frequency while tf-idf means term-frequency times inverse
document-frequency. This is a common term weighting scheme in information
retrieval, that has also found good use in document classification.
The goal of using tf-idf instead of the raw frequencies of occurrence of a
token in a given document is to scale down the impact of tokens that occur
very frequently in a given corpus and that are hence empirically less
informative than features that occur in a small fraction of the training
corpus.
The actual formula used for tf-idf is tf * (idf + 1) = tf + tf * idf,
instead of tf * idf. The effect of this is that terms with zero idf, i.e.
that occur in all documents of a training set, will not be entirely
ignored. The formulas used to compute tf and idf depend on parameter
settings that correspond to the SMART notation used in IR, as follows:
Tf is "n" (natural) by default, "l" (logarithmic) when sublinear_tf=True.
Idf is "t" when use_idf is given, "n" (none) otherwise.
Normalization is "c" (cosine) when norm='l2', "n" (none) when norm=None.
Read more in the :ref:`User Guide <text_feature_extraction>`.
Parameters
----------
norm : 'l1', 'l2' or None, optional
Norm used to normalize term vectors. None for no normalization.
use_idf : boolean, default=True
Enable inverse-document-frequency reweighting.
smooth_idf : boolean, default=True
Smooth idf weights by adding one to document frequencies, as if an
extra document was seen containing every term in the collection
exactly once. Prevents zero divisions.
sublinear_tf : boolean, default=False
Apply sublinear tf scaling, i.e. replace tf with 1 + log(tf).
References
----------
.. [Yates2011] `R. Baeza-Yates and B. Ribeiro-Neto (2011). Modern
Information Retrieval. Addison Wesley, pp. 68-74.`
.. [MRS2008] `C.D. Manning, P. Raghavan and H. Schuetze (2008).
Introduction to Information Retrieval. Cambridge University
Press, pp. 118-120.`
"""
def __init__(self, norm='l2', use_idf=True, smooth_idf=True,
sublinear_tf=False):
self.norm = norm
self.use_idf = use_idf
self.smooth_idf = smooth_idf
self.sublinear_tf = sublinear_tf
def fit(self, X, y=None):
"""Learn the idf vector (global term weights)
Parameters
----------
X : sparse matrix, [n_samples, n_features]
a matrix of term/token counts
"""
if not sp.issparse(X):
X = sp.csc_matrix(X)
if self.use_idf:
n_samples, n_features = X.shape
df = _document_frequency(X)
# perform idf smoothing if required
df += int(self.smooth_idf)
n_samples += int(self.smooth_idf)
# log+1 instead of log makes sure terms with zero idf don't get
# suppressed entirely.
idf = np.log(float(n_samples) / df) + 1.0
self._idf_diag = sp.spdiags(idf,
diags=0, m=n_features, n=n_features)
return self
def transform(self, X, copy=True):
"""Transform a count matrix to a tf or tf-idf representation
Parameters
----------
X : sparse matrix, [n_samples, n_features]
a matrix of term/token counts
copy : boolean, default True
Whether to copy X and operate on the copy or perform in-place
operations.
Returns
-------
vectors : sparse matrix, [n_samples, n_features]
"""
if hasattr(X, 'dtype') and np.issubdtype(X.dtype, np.float):
# preserve float family dtype
X = sp.csr_matrix(X, copy=copy)
else:
# convert counts or binary occurrences to floats
X = sp.csr_matrix(X, dtype=np.float64, copy=copy)
n_samples, n_features = X.shape
if self.sublinear_tf:
np.log(X.data, X.data)
X.data += 1
if self.use_idf:
check_is_fitted(self, '_idf_diag', 'idf vector is not fitted')
expected_n_features = self._idf_diag.shape[0]
if n_features != expected_n_features:
raise ValueError("Input has n_features=%d while the model"
" has been trained with n_features=%d" % (
n_features, expected_n_features))
# *= doesn't work
X = X * self._idf_diag
if self.norm:
X = normalize(X, norm=self.norm, copy=False)
return X
@property
def idf_(self):
if hasattr(self, "_idf_diag"):
return np.ravel(self._idf_diag.sum(axis=0))
else:
return None
class TfidfVectorizer(CountVectorizer):
"""Convert a collection of raw documents to a matrix of TF-IDF features.
Equivalent to CountVectorizer followed by TfidfTransformer.
Read more in the :ref:`User Guide <text_feature_extraction>`.
Parameters
----------
input : string {'filename', 'file', 'content'}
If 'filename', the sequence passed as an argument to fit is
expected to be a list of filenames that need reading to fetch
the raw content to analyze.
If 'file', the sequence items must have a 'read' method (file-like
object) that is called to fetch the bytes in memory.
Otherwise the input is expected to be the sequence strings or
bytes items are expected to be analyzed directly.
encoding : string, 'utf-8' by default.
If bytes or files are given to analyze, this encoding is used to
decode.
decode_error : {'strict', 'ignore', 'replace'}
Instruction on what to do if a byte sequence is given to analyze that
contains characters not of the given `encoding`. By default, it is
'strict', meaning that a UnicodeDecodeError will be raised. Other
values are 'ignore' and 'replace'.
strip_accents : {'ascii', 'unicode', None}
Remove accents during the preprocessing step.
'ascii' is a fast method that only works on characters that have
an direct ASCII mapping.
'unicode' is a slightly slower method that works on any characters.
None (default) does nothing.
analyzer : string, {'word', 'char'} or callable
Whether the feature should be made of word or character n-grams.
If a callable is passed it is used to extract the sequence of features
out of the raw, unprocessed input.
preprocessor : callable or None (default)
Override the preprocessing (string transformation) stage while
preserving the tokenizing and n-grams generation steps.
tokenizer : callable or None (default)
Override the string tokenization step while preserving the
preprocessing and n-grams generation steps.
Only applies if ``analyzer == 'word'``.
ngram_range : tuple (min_n, max_n)
The lower and upper boundary of the range of n-values for different
n-grams to be extracted. All values of n such that min_n <= n <= max_n
will be used.
stop_words : string {'english'}, list, or None (default)
If a string, it is passed to _check_stop_list and the appropriate stop
list is returned. 'english' is currently the only supported string
value.
If a list, that list is assumed to contain stop words, all of which
will be removed from the resulting tokens.
Only applies if ``analyzer == 'word'``.
If None, no stop words will be used. max_df can be set to a value
in the range [0.7, 1.0) to automatically detect and filter stop
words based on intra corpus document frequency of terms.
lowercase : boolean, default True
Convert all characters to lowercase before tokenizing.
token_pattern : string
Regular expression denoting what constitutes a "token", only used
if ``analyzer == 'word'``. The default regexp selects tokens of 2
or more alphanumeric characters (punctuation is completely ignored
and always treated as a token separator).
max_df : float in range [0.0, 1.0] or int, default=1.0
When building the vocabulary ignore terms that have a document
frequency strictly higher than the given threshold (corpus-specific
stop words).
If float, the parameter represents a proportion of documents, integer
absolute counts.
This parameter is ignored if vocabulary is not None.
min_df : float in range [0.0, 1.0] or int, default=1
When building the vocabulary ignore terms that have a document
frequency strictly lower than the given threshold. This value is also
called cut-off in the literature.
If float, the parameter represents a proportion of documents, integer
absolute counts.
This parameter is ignored if vocabulary is not None.
max_features : int or None, default=None
If not None, build a vocabulary that only consider the top
max_features ordered by term frequency across the corpus.
This parameter is ignored if vocabulary is not None.
vocabulary : Mapping or iterable, optional
Either a Mapping (e.g., a dict) where keys are terms and values are
indices in the feature matrix, or an iterable over terms. If not
given, a vocabulary is determined from the input documents.
binary : boolean, default=False
If True, all non-zero term counts are set to 1. This does not mean
outputs will have only 0/1 values, only that the tf term in tf-idf
is binary. (Set idf and normalization to False to get 0/1 outputs.)
dtype : type, optional
Type of the matrix returned by fit_transform() or transform().
norm : 'l1', 'l2' or None, optional
Norm used to normalize term vectors. None for no normalization.
use_idf : boolean, default=True
Enable inverse-document-frequency reweighting.
smooth_idf : boolean, default=True
Smooth idf weights by adding one to document frequencies, as if an
extra document was seen containing every term in the collection
exactly once. Prevents zero divisions.
sublinear_tf : boolean, default=False
Apply sublinear tf scaling, i.e. replace tf with 1 + log(tf).
Attributes
----------
vocabulary_ : dict
A mapping of terms to feature indices.
idf_ : array, shape = [n_features], or None
The learned idf vector (global term weights)
when ``use_idf`` is set to True, None otherwise.
stop_words_ : set
Terms that were ignored because they either:
- occurred in too many documents (`max_df`)
- occurred in too few documents (`min_df`)
- were cut off by feature selection (`max_features`).
This is only available if no vocabulary was given.
See also
--------
CountVectorizer
Tokenize the documents and count the occurrences of token and return
them as a sparse matrix
TfidfTransformer
Apply Term Frequency Inverse Document Frequency normalization to a
sparse matrix of occurrence counts.
Notes
-----
The ``stop_words_`` attribute can get large and increase the model size
when pickling. This attribute is provided only for introspection and can
be safely removed using delattr or set to None before pickling.
"""
def __init__(self, input='content', encoding='utf-8',
decode_error='strict', strip_accents=None, lowercase=True,
preprocessor=None, tokenizer=None, analyzer='word',
stop_words=None, token_pattern=r"(?u)\b\w\w+\b",
ngram_range=(1, 1), max_df=1.0, min_df=1,
max_features=None, vocabulary=None, binary=False,
dtype=np.int64, norm='l2', use_idf=True, smooth_idf=True,
sublinear_tf=False):
super(TfidfVectorizer, self).__init__(
input=input, encoding=encoding, decode_error=decode_error,
strip_accents=strip_accents, lowercase=lowercase,
preprocessor=preprocessor, tokenizer=tokenizer, analyzer=analyzer,
stop_words=stop_words, token_pattern=token_pattern,
ngram_range=ngram_range, max_df=max_df, min_df=min_df,
max_features=max_features, vocabulary=vocabulary, binary=binary,
dtype=dtype)
self._tfidf = TfidfTransformer(norm=norm, use_idf=use_idf,
smooth_idf=smooth_idf,
sublinear_tf=sublinear_tf)
# Broadcast the TF-IDF parameters to the underlying transformer instance
# for easy grid search and repr
@property
def norm(self):
return self._tfidf.norm
@norm.setter
def norm(self, value):
self._tfidf.norm = value
@property
def use_idf(self):
return self._tfidf.use_idf
@use_idf.setter
def use_idf(self, value):
self._tfidf.use_idf = value
@property
def smooth_idf(self):
return self._tfidf.smooth_idf
@smooth_idf.setter
def smooth_idf(self, value):
self._tfidf.smooth_idf = value
@property
def sublinear_tf(self):
return self._tfidf.sublinear_tf
@sublinear_tf.setter
def sublinear_tf(self, value):
self._tfidf.sublinear_tf = value
@property
def idf_(self):
return self._tfidf.idf_
def fit(self, raw_documents, y=None):
"""Learn vocabulary and idf from training set.
Parameters
----------
raw_documents : iterable
an iterable which yields either str, unicode or file objects
Returns
-------
self : TfidfVectorizer
"""
X = super(TfidfVectorizer, self).fit_transform(raw_documents)
self._tfidf.fit(X)
return self
def fit_transform(self, raw_documents, y=None):
"""Learn vocabulary and idf, return term-document matrix.
This is equivalent to fit followed by transform, but more efficiently
implemented.
Parameters
----------
raw_documents : iterable
an iterable which yields either str, unicode or file objects
Returns
-------
X : sparse matrix, [n_samples, n_features]
Tf-idf-weighted document-term matrix.
"""
X = super(TfidfVectorizer, self).fit_transform(raw_documents)
self._tfidf.fit(X)
# X is already a transformed view of raw_documents so
# we set copy to False
return self._tfidf.transform(X, copy=False)
def transform(self, raw_documents, copy=True):
"""Transform documents to document-term matrix.
Uses the vocabulary and document frequencies (df) learned by fit (or
fit_transform).
Parameters
----------
raw_documents : iterable
an iterable which yields either str, unicode or file objects
copy : boolean, default True
Whether to copy X and operate on the copy or perform in-place
operations.
Returns
-------
X : sparse matrix, [n_samples, n_features]
Tf-idf-weighted document-term matrix.
"""
check_is_fitted(self, '_tfidf', 'The tfidf vector is not fitted')
X = super(TfidfVectorizer, self).transform(raw_documents)
return self._tfidf.transform(X, copy=False)
|
bsd-3-clause
|
wittrup/crap
|
python/kalman.py
|
1
|
2533
|
import numpy as np
import matplotlib.pyplot as plt
def kalman_xy(x, P, measurement, R,
motion = np.matrix('0. 0. 0. 0.').T,
Q = np.matrix(np.eye(4))):
"""
Parameters:
x: initial state 4-tuple of location and velocity: (x0, x1, x0_dot, x1_dot)
P: initial uncertainty convariance matrix
measurement: observed position
R: measurement noise
motion: external motion added to state vector x
Q: motion noise (same shape as P)
"""
return kalman(x, P, measurement, R, motion, Q,
F = np.matrix('''
1. 0. 1. 0.;
0. 1. 0. 1.;
0. 0. 1. 0.;
0. 0. 0. 1.
'''),
H = np.matrix('''
1. 0. 0. 0.;
0. 1. 0. 0.'''))
def kalman(x, P, measurement, R, motion, Q, F, H):
'''
Parameters:
x: initial state
P: initial uncertainty convariance matrix
measurement: observed position (same shape as H*x)
R: measurement noise (same shape as H)
motion: external motion added to state vector x
Q: motion noise (same shape as P)
F: next state function: x_prime = F*x
H: measurement function: position = H*x
Return: the updated and predicted new values for (x, P)
See also http://en.wikipedia.org/wiki/Kalman_filter
This version of kalman can be applied to many different situations by
appropriately defining F and H
'''
# UPDATE x, P based on measurement m
# distance between measured and current position-belief
y = np.matrix(measurement).T - H * x
S = H * P * H.T + R # residual convariance
K = P * H.T * S.I # Kalman gain
x = x + K*y
I = np.matrix(np.eye(F.shape[0])) # identity matrix
P = (I - K*H)*P
# PREDICT x, P based on motion
x = F*x + motion
P = F*P*F.T + Q
return x, P
def demo_kalman_xy():
x = np.matrix('0. 0. 0. 0.').T
P = np.matrix(np.eye(4))*1000 # initial uncertainty
N = 20
true_x = np.linspace(0.0, 10.0, N)
true_y = true_x**2
observed_x = true_x + 0.05*np.random.random(N)*true_x
observed_y = true_y + 0.05*np.random.random(N)*true_y
plt.plot(observed_x, observed_y, 'ro')
result = []
R = 0.01**2
for meas in zip(observed_x, observed_y):
x, P = kalman_xy(x, P, meas, R)
result.append((x[:2]).tolist())
kalman_x, kalman_y = zip(*result)
plt.plot(kalman_x, kalman_y, 'g-')
plt.show()
demo_kalman_xy()
|
mit
|
sepehr125/pybrain
|
examples/rl/environments/linear_fa/bicycle.py
|
26
|
14462
|
from __future__ import print_function
"""An attempt to implement Randlov and Alstrom (1998). They successfully
use reinforcement learning to balance a bicycle, and to control it to drive
to a specified goal location. Their work has been used since then by a few
researchers as a benchmark problem.
We only implement the balance task. This implementation differs at least
slightly, since Randlov and Alstrom did not mention anything about how they
annealed/decayed their learning rate, etc. As a result of differences, the
results do not match those obtained by Randlov and Alstrom.
"""
__author__ = 'Chris Dembia, Bruce Cam, Johnny Israeli'
from scipy import asarray
from numpy import sin, cos, tan, sqrt, arcsin, arctan, sign, clip, argwhere
from matplotlib import pyplot as plt
import pybrain.rl.environments
from pybrain.rl.environments.environment import Environment
from pybrain.rl.learners.valuebased.linearfa import SARSALambda_LinFA
from pybrain.rl.agents.linearfa import LinearFA_Agent
from pybrain.rl.experiments import EpisodicExperiment
from pybrain.utilities import one_to_n
class BicycleEnvironment(Environment):
"""Randlov and Alstrom's bicycle model. This code matches nearly exactly
some c code we found online for simulating Randlov and Alstrom's
bicycle. The bicycle travels at a fixed speed.
"""
# For superclass.
indim = 2
outdim = 10
# Environment parameters.
time_step = 0.01
# Goal position and radius
# Lagouakis (2002) uses angle to goal, not heading, as a state
max_distance = 1000.
# Acceleration on Earth's surface due to gravity (m/s^2):
g = 9.82
# See the paper for a description of these quantities:
# Distances (in meters):
c = 0.66
dCM = 0.30
h = 0.94
L = 1.11
r = 0.34
# Masses (in kilograms):
Mc = 15.0
Md = 1.7
Mp = 60.0
# Velocity of a bicycle (in meters per second), equal to 10 km/h:
v = 10.0 * 1000.0 / 3600.0
# Derived constants.
M = Mc + Mp # See Randlov's code.
Idc = Md * r**2
Idv = 1.5 * Md * r**2
Idl = 0.5 * Md * r**2
Itot = 13.0 / 3.0 * Mc * h**2 + Mp * (h + dCM)**2
sigmad = v / r
def __init__(self):
Environment.__init__(self)
self.reset()
self.actions = [0.0, 0.0]
self._save_wheel_contact_trajectories = False
def performAction(self, actions):
self.actions = actions
self.step()
def saveWheelContactTrajectories(self, opt):
self._save_wheel_contact_trajectories = opt
def step(self):
# Unpack the state and actions.
# -----------------------------
# Want to ignore the previous value of omegadd; it could only cause a
# bug if we assign to it.
(theta, thetad, omega, omegad, _,
xf, yf, xb, yb, psi) = self.sensors
(T, d) = self.actions
# For recordkeeping.
# ------------------
if self._save_wheel_contact_trajectories:
self.xfhist.append(xf)
self.yfhist.append(yf)
self.xbhist.append(xb)
self.ybhist.append(yb)
# Intermediate time-dependent quantities.
# ---------------------------------------
# Avoid divide-by-zero, just as Randlov did.
if theta == 0:
rf = 1e8
rb = 1e8
rCM = 1e8
else:
rf = self.L / np.abs(sin(theta))
rb = self.L / np.abs(tan(theta))
rCM = sqrt((self.L - self.c)**2 + self.L**2 / tan(theta)**2)
phi = omega + np.arctan(d / self.h)
# Equations of motion.
# --------------------
# Second derivative of angular acceleration:
omegadd = 1 / self.Itot * (self.M * self.h * self.g * sin(phi)
- cos(phi) * (self.Idc * self.sigmad * thetad
+ sign(theta) * self.v**2 * (
self.Md * self.r * (1.0 / rf + 1.0 / rb)
+ self.M * self.h / rCM)))
thetadd = (T - self.Idv * self.sigmad * omegad) / self.Idl
# Integrate equations of motion using Euler's method.
# ---------------------------------------------------
# yt+1 = yt + yd * dt.
# Must update omega based on PREVIOUS value of omegad.
omegad += omegadd * self.time_step
omega += omegad * self.time_step
thetad += thetadd * self.time_step
theta += thetad * self.time_step
# Handlebars can't be turned more than 80 degrees.
theta = np.clip(theta, -1.3963, 1.3963)
# Wheel ('tyre') contact positions.
# ---------------------------------
# Front wheel contact position.
front_temp = self.v * self.time_step / (2 * rf)
# See Randlov's code.
if front_temp > 1:
front_temp = sign(psi + theta) * 0.5 * np.pi
else:
front_temp = sign(psi + theta) * arcsin(front_temp)
xf += self.v * self.time_step * -sin(psi + theta + front_temp)
yf += self.v * self.time_step * cos(psi + theta + front_temp)
# Rear wheel.
back_temp = self.v * self.time_step / (2 * rb)
# See Randlov's code.
if back_temp > 1:
back_temp = np.sign(psi) * 0.5 * np.pi
else:
back_temp = np.sign(psi) * np.arcsin(back_temp)
xb += self.v * self.time_step * -sin(psi + back_temp)
yb += self.v * self.time_step * cos(psi + back_temp)
# Preventing numerical drift.
# ---------------------------
# Copying what Randlov did.
current_wheelbase = sqrt((xf - xb)**2 + (yf - yb)**2)
if np.abs(current_wheelbase - self.L) > 0.01:
relative_error = self.L / current_wheelbase - 1.0
xb += (xb - xf) * relative_error
yb += (yb - yf) * relative_error
# Update heading, psi.
# --------------------
delta_y = yf - yb
if (xf == xb) and delta_y < 0.0:
psi = np.pi
else:
if delta_y > 0.0:
psi = arctan((xb - xf) / delta_y)
else:
psi = sign(xb - xf) * 0.5 * np.pi - arctan(delta_y / (xb - xf))
self.sensors = np.array([theta, thetad, omega, omegad, omegadd,
xf, yf, xb, yb, psi])
def reset(self):
theta = 0
thetad = 0
omega = 0
omegad = 0
omegadd = 0
xf = 0
yf = self.L
xb = 0
yb = 0
psi = np.arctan((xb - xf) / (yf - yb))
self.sensors = np.array([theta, thetad, omega, omegad, omegadd,
xf, yf, xb, yb, psi])
self.xfhist = []
self.yfhist = []
self.xbhist = []
self.ybhist = []
def getSteer(self):
return self.sensors[0]
def getTilt(self):
return self.sensors[2]
def get_xfhist(self):
return self.xfhist
def get_yfhist(self):
return self.yfhist
def get_xbhist(self):
return self.xbhist
def get_ybhist(self):
return self.ybhist
def getSensors(self):
return self.sensors
class BalanceTask(pybrain.rl.environments.EpisodicTask):
"""The rider is to simply balance the bicycle while moving with the
speed perscribed in the environment. This class uses a continuous 5
dimensional state space, and a discrete state space.
This class is heavily guided by
pybrain.rl.environments.cartpole.balancetask.BalanceTask.
"""
max_tilt = np.pi / 6.
nactions = 9
def __init__(self, max_time=1000.0):
super(BalanceTask, self).__init__(BicycleEnvironment())
self.max_time = max_time
# Keep track of time in case we want to end episodes based on number of
# time steps.
self.t = 0
@property
def indim(self):
return 1
@property
def outdim(self):
return 5
def reset(self):
super(BalanceTask, self).reset()
self.t = 0
def performAction(self, action):
"""Incoming action is an int between 0 and 8. The action we provide to
the environment consists of a torque T in {-2 N, 0, 2 N}, and a
displacement d in {-.02 m, 0, 0.02 m}.
"""
self.t += 1
assert round(action[0]) == action[0]
# -1 for action in {0, 1, 2}, 0 for action in {3, 4, 5}, 1 for
# action in {6, 7, 8}
torque_selector = np.floor(action[0] / 3.0) - 1.0
T = 2 * torque_selector
# Random number in [-1, 1]:
p = 2.0 * np.random.rand() - 1.0
# -1 for action in {0, 3, 6}, 0 for action in {1, 4, 7}, 1 for
# action in {2, 5, 8}
disp_selector = action[0] % 3 - 1.0
d = 0.02 * disp_selector + 0.02 * p
super(BalanceTask, self).performAction([T, d])
def getObservation(self):
(theta, thetad, omega, omegad, omegadd,
xf, yf, xb, yb, psi) = self.env.getSensors()
return self.env.getSensors()[0:5]
def isFinished(self):
# Criterion for ending an episode. From Randlov's paper:
# "When the agent can balance for 1000 seconds, the task is considered
# learned."
if np.abs(self.env.getTilt()) > self.max_tilt:
return True
elapsed_time = self.env.time_step * self.t
if elapsed_time > self.max_time:
return True
return False
def getReward(self):
# -1 reward for falling over; no reward otherwise.
if np.abs(self.env.getTilt()) > self.max_tilt:
return -1.0
return 0.0
class LinearFATileCoding3456BalanceTask(BalanceTask):
"""An attempt to exactly implement Randlov's function approximation. He
discretized (tiled) the state space into 3456 bins. We use the same action
space as in the superclass.
"""
# From Randlov, 1998:
theta_bounds = np.array(
[-0.5 * np.pi, -1.0, -0.2, 0, 0.2, 1.0, 0.5 * np.pi])
thetad_bounds = np.array(
[-np.inf, -2.0, 0, 2.0, np.inf])
omega_bounds = np.array(
[-BalanceTask.max_tilt, -0.15, -0.06, 0, 0.06, 0.15,
BalanceTask.max_tilt])
omegad_bounds = np.array(
[-np.inf, -0.5, -0.25, 0, 0.25, 0.5, np.inf])
omegadd_bounds = np.array(
[-np.inf, -2.0, 0, 2.0, np.inf])
# http://stackoverflow.com/questions/3257619/numpy-interconversion-between-multidimensional-and-linear-indexing
nbins_across_dims = [
len(theta_bounds) - 1,
len(thetad_bounds) - 1,
len(omega_bounds) - 1,
len(omegad_bounds) - 1,
len(omegadd_bounds) - 1]
# This array, when dotted with the 5-dim state vector, gives a 'linear'
# index between 0 and 3455.
magic_array = np.cumprod([1] + nbins_across_dims)[:-1]
@property
def outdim(self):
# Used when constructing LinearFALearner's.
return 3456
def getBin(self, theta, thetad, omega, omegad, omegadd):
bin_indices = [
np.digitize([theta], self.theta_bounds)[0] - 1,
np.digitize([thetad], self.thetad_bounds)[0] - 1,
np.digitize([omega], self.omega_bounds)[0] - 1,
np.digitize([omegad], self.omegad_bounds)[0] - 1,
np.digitize([omegadd], self.omegadd_bounds)[0] - 1,
]
return np.dot(self.magic_array, bin_indices)
def getBinIndices(self, linear_index):
"""Given a linear index (integer between 0 and outdim), returns the bin
indices for each of the state dimensions.
"""
return linear_index / self.magic_array % self.nbins_across_dims
def getObservation(self):
(theta, thetad, omega, omegad, omegadd,
xf, yf, xb, yb, psi) = self.env.getSensors()
state = one_to_n(self.getBin(theta, thetad, omega, omegad, omegadd),
self.outdim)
return state
class SARSALambda_LinFA_ReplacingTraces(SARSALambda_LinFA):
"""Randlov used replacing traces, but this doesn't exist in PyBrain's
SARSALambda.
"""
def _updateEtraces(self, state, action, responsibility=1.):
self._etraces *= self.rewardDiscount * self._lambda * responsibility
# This assumes that state is an identity vector (like, from one_to_n).
self._etraces[action] = clip(self._etraces[action] + state, -np.inf, 1.)
# Set the trace for all other actions in this state to 0:
action_bit = one_to_n(action, self.num_actions)
for argstate in argwhere(state == 1) :
self._etraces[argwhere(action_bit != 1), argstate] = 0.
task = LinearFATileCoding3456BalanceTask()
env = task.env
# The learning is very sensitive to the learning rate decay.
learner = SARSALambda_LinFA_ReplacingTraces(task.nactions, task.outdim,
learningRateDecay=2000)
learner._lambda = 0.95
task.discount = learner.rewardDiscount
agent = LinearFA_Agent(learner)
agent.logging = False
exp = EpisodicExperiment(task, agent)
performance_agent = LinearFA_Agent(learner)
performance_agent.logging = False
performance_agent.greedy = True
performance_agent.learning = False
env.saveWheelContactTrajectories(True)
plt.ion()
plt.figure(figsize=(8, 4))
ax1 = plt.subplot(1, 2, 1)
ax2 = plt.subplot(1, 2, 2)
def update_wheel_trajectories():
front_lines = ax2.plot(env.get_xfhist(), env.get_yfhist(), 'r')
back_lines = ax2.plot(env.get_xbhist(), env.get_ybhist(), 'b')
plt.axis('equal')
perform_cumrewards = []
for irehearsal in range(7000):
# Learn.
# ------
r = exp.doEpisodes(1)
# Discounted reward.
cumreward = exp.task.getTotalReward()
#print 'cumreward: %.4f; nsteps: %i; learningRate: %.4f' % (
# cumreward, len(r[0]), exp.agent.learner.learningRate)
if irehearsal % 50 == 0:
# Perform (no learning).
# ----------------------
# Swap out the agent.
exp.agent = performance_agent
# Perform.
r = exp.doEpisodes(1)
perform_cumreward = task.getTotalReward()
perform_cumrewards.append(perform_cumreward)
print('PERFORMANCE: cumreward:', perform_cumreward, 'nsteps:', len(r[0]))
# Swap back the learning agent.
performance_agent.reset()
exp.agent = agent
ax1.cla()
ax1.plot(perform_cumrewards, '.--')
# Wheel trajectories.
update_wheel_trajectories()
plt.pause(0.001)
|
bsd-3-clause
|
alphaBenj/zipline
|
zipline/assets/synthetic.py
|
3
|
9273
|
from itertools import product
from string import ascii_uppercase
import pandas as pd
from pandas.tseries.offsets import MonthBegin
from six import iteritems
from .futures import CME_CODE_TO_MONTH
def make_rotating_equity_info(num_assets,
first_start,
frequency,
periods_between_starts,
asset_lifetime):
"""
Create a DataFrame representing lifetimes of assets that are constantly
rotating in and out of existence.
Parameters
----------
num_assets : int
How many assets to create.
first_start : pd.Timestamp
The start date for the first asset.
frequency : str or pd.tseries.offsets.Offset (e.g. trading_day)
Frequency used to interpret next two arguments.
periods_between_starts : int
Create a new asset every `frequency` * `periods_between_new`
asset_lifetime : int
Each asset exists for `frequency` * `asset_lifetime` days.
Returns
-------
info : pd.DataFrame
DataFrame representing newly-created assets.
"""
return pd.DataFrame(
{
'symbol': [chr(ord('A') + i) for i in range(num_assets)],
# Start a new asset every `periods_between_starts` days.
'start_date': pd.date_range(
first_start,
freq=(periods_between_starts * frequency),
periods=num_assets,
),
# Each asset lasts for `asset_lifetime` days.
'end_date': pd.date_range(
first_start + (asset_lifetime * frequency),
freq=(periods_between_starts * frequency),
periods=num_assets,
),
'exchange': 'TEST',
'exchange_full': 'TEST FULL',
},
index=range(num_assets),
)
def make_simple_equity_info(sids,
start_date,
end_date,
symbols=None,
names=None):
"""
Create a DataFrame representing assets that exist for the full duration
between `start_date` and `end_date`.
Parameters
----------
sids : array-like of int
start_date : pd.Timestamp, optional
end_date : pd.Timestamp, optional
symbols : list, optional
Symbols to use for the assets.
If not provided, symbols are generated from the sequence 'A', 'B', ...
names : list, optional
Names to use for the assets.
If not provided, names are generated by adding " INC." to each of the
symbols (which might also be auto-generated).
Returns
-------
info : pd.DataFrame
DataFrame representing newly-created assets.
"""
num_assets = len(sids)
if symbols is None:
symbols = list(ascii_uppercase[:num_assets])
else:
symbols = list(symbols)
if names is None:
names = [str(s) + " INC." for s in symbols]
return pd.DataFrame(
{
'symbol': symbols,
'start_date': pd.to_datetime([start_date] * num_assets),
'end_date': pd.to_datetime([end_date] * num_assets),
'asset_name': list(names),
'exchange': 'TEST',
'exchange_full': 'TEST FULL',
},
index=sids,
columns=(
'start_date',
'end_date',
'symbol',
'exchange',
'exchange_full',
'asset_name',
),
)
def make_jagged_equity_info(num_assets,
start_date,
first_end,
frequency,
periods_between_ends,
auto_close_delta):
"""
Create a DataFrame representing assets that all begin at the same start
date, but have cascading end dates.
Parameters
----------
num_assets : int
How many assets to create.
start_date : pd.Timestamp
The start date for all the assets.
first_end : pd.Timestamp
The date at which the first equity will end.
frequency : str or pd.tseries.offsets.Offset (e.g. trading_day)
Frequency used to interpret the next argument.
periods_between_ends : int
Starting after the first end date, end each asset every
`frequency` * `periods_between_ends`.
Returns
-------
info : pd.DataFrame
DataFrame representing newly-created assets.
"""
frame = pd.DataFrame(
{
'symbol': [chr(ord('A') + i) for i in range(num_assets)],
'start_date': start_date,
'end_date': pd.date_range(
first_end,
freq=(periods_between_ends * frequency),
periods=num_assets,
),
'exchange': 'TEST',
'exchange_full': 'TEST FULL',
},
index=range(num_assets),
)
# Explicitly pass None to disable setting the auto_close_date column.
if auto_close_delta is not None:
frame['auto_close_date'] = frame['end_date'] + auto_close_delta
return frame
def make_future_info(first_sid,
root_symbols,
years,
notice_date_func,
expiration_date_func,
start_date_func,
month_codes=None):
"""
Create a DataFrame representing futures for `root_symbols` during `year`.
Generates a contract per triple of (symbol, year, month) supplied to
`root_symbols`, `years`, and `month_codes`.
Parameters
----------
first_sid : int
The first sid to use for assigning sids to the created contracts.
root_symbols : list[str]
A list of root symbols for which to create futures.
years : list[int or str]
Years (e.g. 2014), for which to produce individual contracts.
notice_date_func : (Timestamp) -> Timestamp
Function to generate notice dates from first of the month associated
with asset month code. Return NaT to simulate futures with no notice
date.
expiration_date_func : (Timestamp) -> Timestamp
Function to generate expiration dates from first of the month
associated with asset month code.
start_date_func : (Timestamp) -> Timestamp, optional
Function to generate start dates from first of the month associated
with each asset month code. Defaults to a start_date one year prior
to the month_code date.
month_codes : dict[str -> [1..12]], optional
Dictionary of month codes for which to create contracts. Entries
should be strings mapped to values from 1 (January) to 12 (December).
Default is zipline.futures.CME_CODE_TO_MONTH
Returns
-------
futures_info : pd.DataFrame
DataFrame of futures data suitable for passing to an AssetDBWriter.
"""
if month_codes is None:
month_codes = CME_CODE_TO_MONTH
year_strs = list(map(str, years))
years = [pd.Timestamp(s, tz='UTC') for s in year_strs]
# Pairs of string/date like ('K06', 2006-05-01)
contract_suffix_to_beginning_of_month = tuple(
(month_code + year_str[-2:], year + MonthBegin(month_num))
for ((year, year_str), (month_code, month_num))
in product(
zip(years, year_strs),
iteritems(month_codes),
)
)
contracts = []
parts = product(root_symbols, contract_suffix_to_beginning_of_month)
for sid, (root_sym, (suffix, month_begin)) in enumerate(parts, first_sid):
contracts.append({
'sid': sid,
'root_symbol': root_sym,
'symbol': root_sym + suffix,
'start_date': start_date_func(month_begin),
'notice_date': notice_date_func(month_begin),
'expiration_date': notice_date_func(month_begin),
'multiplier': 500,
'exchange': "TEST",
'exchange_full': 'TEST FULL',
})
return pd.DataFrame.from_records(contracts, index='sid')
def make_commodity_future_info(first_sid,
root_symbols,
years,
month_codes=None):
"""
Make futures testing data that simulates the notice/expiration date
behavior of physical commodities like oil.
Parameters
----------
first_sid : int
root_symbols : list[str]
years : list[int]
month_codes : dict[str -> int]
Expiration dates are on the 20th of the month prior to the month code.
Notice dates are are on the 20th two months prior to the month code.
Start dates are one year before the contract month.
See Also
--------
make_future_info
"""
nineteen_days = pd.Timedelta(days=19)
one_year = pd.Timedelta(days=365)
return make_future_info(
first_sid=first_sid,
root_symbols=root_symbols,
years=years,
notice_date_func=lambda dt: dt - MonthBegin(2) + nineteen_days,
expiration_date_func=lambda dt: dt - MonthBegin(1) + nineteen_days,
start_date_func=lambda dt: dt - one_year,
month_codes=month_codes,
)
|
apache-2.0
|
alexvmarch/atomic
|
exatomic/core/molecule.py
|
3
|
6451
|
# -*- coding: utf-8 -*-
# Copyright (c) 2015-2016, Exa Analytics Development Team
# Distributed under the terms of the Apache License 2.0
"""
Molecule Table
###################
"""
import numpy as np
import pandas as pd
import networkx as nx
import warnings
from networkx.algorithms.components import connected_components
from exa import DataFrame
from exatomic.base import sym2mass
from exatomic.formula import string_to_dict, dict_to_string
class Molecule(DataFrame):
"""
Description of molecules in the atomic universe.
"""
_index = 'molecule'
_categories = {'frame': np.int64, 'formula': str, 'classification': object}
#@property
#def _constructor(self):
# return Molecule
def classify(self, *classifiers):
"""
Classify molecules into arbitrary categories.
.. code-block:: Python
u.molecule.classify(('solute', 'Na'), ('solvent', 'H(2)O(1)'))
Args:
classifiers: Any number of tuples of the form ('label', 'identifier', exact) (see below)
Note:
A classifier has 3 parts, "label", e.g. "solvent", "identifier", e.g.
"H(2)O(1)", and exact (true or false). If exact is false (default),
classification is greedy and (in this example) molecules with formulas
"H(1)O(1)", "H(3)O(1)", etc. would get classified as "solvent". If,
instead, exact were set to true, those molecules would remain
unclassified.
Warning:
Classifiers are applied in the order passed; where identifiers overlap,
the latter classification is used.
See Also:
:func:`~exatomic.algorithms.nearest.compute_nearest_molecules`
"""
for c in classifiers:
n = len(c)
if n != 3 and n != 2:
raise ClassificationError()
self['classification'] = None
for classifier in classifiers:
identifier = string_to_dict(classifier[0])
classification = classifier[1]
exact = classifier[2] if len(classifier) == 3 else False
this = self
for symbol, count in identifier.items():
this = this[this[symbol] == count] if exact else this[this[symbol] >= 1]
if len(this) > 0:
self.ix[self.index.isin(this.index), 'classification'] = classification
else:
raise KeyError('No records found for {}, with identifier {}.'.format(classification, identifier))
self['classification'] = self['classification'].astype('category')
if len(self[self['classification'].isnull()]) > 0:
warnings.warn("Unclassified molecules remaining...")
def get_atom_count(self):
"""
Compute the number of atoms per molecule.
"""
symbols = self._get_symbols()
return self[symbols].sum(axis=1)
def get_formula(self, as_map=False):
"""
Compute the string representation of the molecule.
"""
symbols = self._get_symbols()
mcules = self[symbols].to_dict(orient='index')
ret = map(dict_to_string, mcules.values())
if as_map:
return ret
return list(ret)
def _get_symbols(self):
"""
Helper method to get atom symbols.
"""
return [col for col in self if len(col) < 3 and col[0].istitle()]
def compute_molecule(universe):
"""
Cluster atoms into molecules and create the :class:`~exatomic.molecule.Molecule`
table.
Args:
universe: Atomic universe
Returns:
molecule: Molecule table
Warning:
This function modifies the universe's atom (:class:`~exatomic.atom.Atom`)
table in place!
"""
nodes = universe.atom.index.values
bonded = universe.atom_two.ix[universe.atom_two['bond'] == True, ['atom0', 'atom1']]
edges = zip(bonded['atom0'].astype(np.int64), bonded['atom1'].astype(np.int64))
g = nx.Graph()
g.add_nodes_from(nodes)
g.add_edges_from(edges)
# generate molecule indices for the atom table
mapper = {}
i = 0
for k, v in g.degree(): # First handle single atom "molecules"
if v == 0:
mapper[k] = i
i += 1
for seht in connected_components(g): # Second handle multi atom molecules
for adx in seht:
mapper[adx] = i
i += 1
universe.atom['molecule'] = universe.atom.index.map(lambda x: mapper[x])
universe.atom['mass'] = universe.atom['symbol'].map(sym2mass)
grps = universe.atom.groupby('molecule')
molecule = grps['symbol'].value_counts().unstack().fillna(0).astype(np.int64)
molecule.columns.name = None
molecule['mass'] = grps['mass'].sum()
universe.atom['molecule'] = universe.atom['molecule'].astype('category')
del universe.atom['mass']
return molecule
def compute_molecule_count(universe):
"""
"""
if 'molecule' not in universe.atom.columns:
universe.compute_molecule()
universe.atom._revert_categories()
mapper = universe.atom.drop_duplicates('molecule').set_index('molecule')['frame']
universe.atom._set_categories()
universe.molecule['frame'] = universe.molecule.index.map(lambda x: mapper[x])
molecule_count = universe.molecule.groupby('frame').size()
del universe.molecule['frame']
return molecule_count
def compute_molecule_com(universe):
"""
Compute molecules' centers of mass.
"""
if 'molecule' not in universe.atom.columns:
universe.compute_molecule()
mass = universe.atom.get_element_masses()
if universe.frame.is_periodic():
xyz = universe.atom[['x', 'y', 'z']].copy()
xyz.update(universe.visual_atom)
else:
xyz = universe.atom[['x', 'y', 'z']]
xm = xyz['x'].mul(mass)
ym = xyz['y'].mul(mass)
zm = xyz['z'].mul(mass)
#rm = xm.add(ym).add(zm)
df = pd.DataFrame.from_dict({'xm': xm, 'ym': ym, 'zm': zm, 'mass': mass,
'molecule': universe.atom['molecule']})
groups = df.groupby('molecule')
sums = groups.sum()
cx = sums['xm'].div(sums['mass'])
cy = sums['ym'].div(sums['mass'])
cz = sums['zm'].div(sums['mass'])
return cx, cy, cz
|
apache-2.0
|
dhruve/spark
|
python/pyspark/sql/session.py
|
14
|
25557
|
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from __future__ import print_function
import sys
import warnings
from functools import reduce
from threading import RLock
if sys.version >= '3':
basestring = unicode = str
else:
from itertools import imap as map
from pyspark import since
from pyspark.rdd import RDD, ignore_unicode_prefix
from pyspark.sql.catalog import Catalog
from pyspark.sql.conf import RuntimeConfig
from pyspark.sql.dataframe import DataFrame
from pyspark.sql.readwriter import DataFrameReader
from pyspark.sql.streaming import DataStreamReader
from pyspark.sql.types import Row, DataType, StringType, StructType, _verify_type, \
_infer_schema, _has_nulltype, _merge_type, _create_converter, _parse_datatype_string
from pyspark.sql.utils import install_exception_handler
__all__ = ["SparkSession"]
def _monkey_patch_RDD(sparkSession):
def toDF(self, schema=None, sampleRatio=None):
"""
Converts current :class:`RDD` into a :class:`DataFrame`
This is a shorthand for ``spark.createDataFrame(rdd, schema, sampleRatio)``
:param schema: a :class:`pyspark.sql.types.StructType` or list of names of columns
:param samplingRatio: the sample ratio of rows used for inferring
:return: a DataFrame
>>> rdd.toDF().collect()
[Row(name=u'Alice', age=1)]
"""
return sparkSession.createDataFrame(self, schema, sampleRatio)
RDD.toDF = toDF
class SparkSession(object):
"""The entry point to programming Spark with the Dataset and DataFrame API.
A SparkSession can be used create :class:`DataFrame`, register :class:`DataFrame` as
tables, execute SQL over tables, cache tables, and read parquet files.
To create a SparkSession, use the following builder pattern:
>>> spark = SparkSession.builder \\
... .master("local") \\
... .appName("Word Count") \\
... .config("spark.some.config.option", "some-value") \\
... .getOrCreate()
"""
class Builder(object):
"""Builder for :class:`SparkSession`.
"""
_lock = RLock()
_options = {}
@since(2.0)
def config(self, key=None, value=None, conf=None):
"""Sets a config option. Options set using this method are automatically propagated to
both :class:`SparkConf` and :class:`SparkSession`'s own configuration.
For an existing SparkConf, use `conf` parameter.
>>> from pyspark.conf import SparkConf
>>> SparkSession.builder.config(conf=SparkConf())
<pyspark.sql.session...
For a (key, value) pair, you can omit parameter names.
>>> SparkSession.builder.config("spark.some.config.option", "some-value")
<pyspark.sql.session...
:param key: a key name string for configuration property
:param value: a value for configuration property
:param conf: an instance of :class:`SparkConf`
"""
with self._lock:
if conf is None:
self._options[key] = str(value)
else:
for (k, v) in conf.getAll():
self._options[k] = v
return self
@since(2.0)
def master(self, master):
"""Sets the Spark master URL to connect to, such as "local" to run locally, "local[4]"
to run locally with 4 cores, or "spark://master:7077" to run on a Spark standalone
cluster.
:param master: a url for spark master
"""
return self.config("spark.master", master)
@since(2.0)
def appName(self, name):
"""Sets a name for the application, which will be shown in the Spark web UI.
If no application name is set, a randomly generated name will be used.
:param name: an application name
"""
return self.config("spark.app.name", name)
@since(2.0)
def enableHiveSupport(self):
"""Enables Hive support, including connectivity to a persistent Hive metastore, support
for Hive serdes, and Hive user-defined functions.
"""
return self.config("spark.sql.catalogImplementation", "hive")
@since(2.0)
def getOrCreate(self):
"""Gets an existing :class:`SparkSession` or, if there is no existing one, creates a
new one based on the options set in this builder.
This method first checks whether there is a valid global default SparkSession, and if
yes, return that one. If no valid global default SparkSession exists, the method
creates a new SparkSession and assigns the newly created SparkSession as the global
default.
>>> s1 = SparkSession.builder.config("k1", "v1").getOrCreate()
>>> s1.conf.get("k1") == s1.sparkContext.getConf().get("k1") == "v1"
True
In case an existing SparkSession is returned, the config options specified
in this builder will be applied to the existing SparkSession.
>>> s2 = SparkSession.builder.config("k2", "v2").getOrCreate()
>>> s1.conf.get("k1") == s2.conf.get("k1")
True
>>> s1.conf.get("k2") == s2.conf.get("k2")
True
"""
with self._lock:
from pyspark.context import SparkContext
from pyspark.conf import SparkConf
session = SparkSession._instantiatedSession
if session is None or session._sc._jsc is None:
sparkConf = SparkConf()
for key, value in self._options.items():
sparkConf.set(key, value)
sc = SparkContext.getOrCreate(sparkConf)
# This SparkContext may be an existing one.
for key, value in self._options.items():
# we need to propagate the confs
# before we create the SparkSession. Otherwise, confs like
# warehouse path and metastore url will not be set correctly (
# these confs cannot be changed once the SparkSession is created).
sc._conf.set(key, value)
session = SparkSession(sc)
for key, value in self._options.items():
session._jsparkSession.sessionState().conf().setConfString(key, value)
for key, value in self._options.items():
session.sparkContext._conf.set(key, value)
return session
builder = Builder()
_instantiatedSession = None
@ignore_unicode_prefix
def __init__(self, sparkContext, jsparkSession=None):
"""Creates a new SparkSession.
>>> from datetime import datetime
>>> spark = SparkSession(sc)
>>> allTypes = sc.parallelize([Row(i=1, s="string", d=1.0, l=1,
... b=True, list=[1, 2, 3], dict={"s": 0}, row=Row(a=1),
... time=datetime(2014, 8, 1, 14, 1, 5))])
>>> df = allTypes.toDF()
>>> df.createOrReplaceTempView("allTypes")
>>> spark.sql('select i+1, d+1, not b, list[1], dict["s"], time, row.a '
... 'from allTypes where b and i > 0').collect()
[Row((i + CAST(1 AS BIGINT))=2, (d + CAST(1 AS DOUBLE))=2.0, (NOT b)=False, list[1]=2, \
dict[s]=0, time=datetime.datetime(2014, 8, 1, 14, 1, 5), a=1)]
>>> df.rdd.map(lambda x: (x.i, x.s, x.d, x.l, x.b, x.time, x.row.a, x.list)).collect()
[(1, u'string', 1.0, 1, True, datetime.datetime(2014, 8, 1, 14, 1, 5), 1, [1, 2, 3])]
"""
from pyspark.sql.context import SQLContext
self._sc = sparkContext
self._jsc = self._sc._jsc
self._jvm = self._sc._jvm
if jsparkSession is None:
jsparkSession = self._jvm.SparkSession(self._jsc.sc())
self._jsparkSession = jsparkSession
self._jwrapped = self._jsparkSession.sqlContext()
self._wrapped = SQLContext(self._sc, self, self._jwrapped)
_monkey_patch_RDD(self)
install_exception_handler()
# If we had an instantiated SparkSession attached with a SparkContext
# which is stopped now, we need to renew the instantiated SparkSession.
# Otherwise, we will use invalid SparkSession when we call Builder.getOrCreate.
if SparkSession._instantiatedSession is None \
or SparkSession._instantiatedSession._sc._jsc is None:
SparkSession._instantiatedSession = self
def _repr_html_(self):
return """
<div>
<p><b>SparkSession - {catalogImplementation}</b></p>
{sc_HTML}
</div>
""".format(
catalogImplementation=self.conf.get("spark.sql.catalogImplementation"),
sc_HTML=self.sparkContext._repr_html_()
)
@since(2.0)
def newSession(self):
"""
Returns a new SparkSession as new session, that has separate SQLConf,
registered temporary views and UDFs, but shared SparkContext and
table cache.
"""
return self.__class__(self._sc, self._jsparkSession.newSession())
@property
@since(2.0)
def sparkContext(self):
"""Returns the underlying :class:`SparkContext`."""
return self._sc
@property
@since(2.0)
def version(self):
"""The version of Spark on which this application is running."""
return self._jsparkSession.version()
@property
@since(2.0)
def conf(self):
"""Runtime configuration interface for Spark.
This is the interface through which the user can get and set all Spark and Hadoop
configurations that are relevant to Spark SQL. When getting the value of a config,
this defaults to the value set in the underlying :class:`SparkContext`, if any.
"""
if not hasattr(self, "_conf"):
self._conf = RuntimeConfig(self._jsparkSession.conf())
return self._conf
@property
@since(2.0)
def catalog(self):
"""Interface through which the user may create, drop, alter or query underlying
databases, tables, functions etc.
"""
if not hasattr(self, "_catalog"):
self._catalog = Catalog(self)
return self._catalog
@property
@since(2.0)
def udf(self):
"""Returns a :class:`UDFRegistration` for UDF registration.
:return: :class:`UDFRegistration`
"""
from pyspark.sql.context import UDFRegistration
return UDFRegistration(self._wrapped)
@since(2.0)
def range(self, start, end=None, step=1, numPartitions=None):
"""
Create a :class:`DataFrame` with single :class:`pyspark.sql.types.LongType` column named
``id``, containing elements in a range from ``start`` to ``end`` (exclusive) with
step value ``step``.
:param start: the start value
:param end: the end value (exclusive)
:param step: the incremental step (default: 1)
:param numPartitions: the number of partitions of the DataFrame
:return: :class:`DataFrame`
>>> spark.range(1, 7, 2).collect()
[Row(id=1), Row(id=3), Row(id=5)]
If only one argument is specified, it will be used as the end value.
>>> spark.range(3).collect()
[Row(id=0), Row(id=1), Row(id=2)]
"""
if numPartitions is None:
numPartitions = self._sc.defaultParallelism
if end is None:
jdf = self._jsparkSession.range(0, int(start), int(step), int(numPartitions))
else:
jdf = self._jsparkSession.range(int(start), int(end), int(step), int(numPartitions))
return DataFrame(jdf, self._wrapped)
def _inferSchemaFromList(self, data):
"""
Infer schema from list of Row or tuple.
:param data: list of Row or tuple
:return: :class:`pyspark.sql.types.StructType`
"""
if not data:
raise ValueError("can not infer schema from empty dataset")
first = data[0]
if type(first) is dict:
warnings.warn("inferring schema from dict is deprecated,"
"please use pyspark.sql.Row instead")
schema = reduce(_merge_type, map(_infer_schema, data))
if _has_nulltype(schema):
raise ValueError("Some of types cannot be determined after inferring")
return schema
def _inferSchema(self, rdd, samplingRatio=None):
"""
Infer schema from an RDD of Row or tuple.
:param rdd: an RDD of Row or tuple
:param samplingRatio: sampling ratio, or no sampling (default)
:return: :class:`pyspark.sql.types.StructType`
"""
first = rdd.first()
if not first:
raise ValueError("The first row in RDD is empty, "
"can not infer schema")
if type(first) is dict:
warnings.warn("Using RDD of dict to inferSchema is deprecated. "
"Use pyspark.sql.Row instead")
if samplingRatio is None:
schema = _infer_schema(first)
if _has_nulltype(schema):
for row in rdd.take(100)[1:]:
schema = _merge_type(schema, _infer_schema(row))
if not _has_nulltype(schema):
break
else:
raise ValueError("Some of types cannot be determined by the "
"first 100 rows, please try again with sampling")
else:
if samplingRatio < 0.99:
rdd = rdd.sample(False, float(samplingRatio))
schema = rdd.map(_infer_schema).reduce(_merge_type)
return schema
def _createFromRDD(self, rdd, schema, samplingRatio):
"""
Create an RDD for DataFrame from an existing RDD, returns the RDD and schema.
"""
if schema is None or isinstance(schema, (list, tuple)):
struct = self._inferSchema(rdd, samplingRatio)
converter = _create_converter(struct)
rdd = rdd.map(converter)
if isinstance(schema, (list, tuple)):
for i, name in enumerate(schema):
struct.fields[i].name = name
struct.names[i] = name
schema = struct
elif not isinstance(schema, StructType):
raise TypeError("schema should be StructType or list or None, but got: %s" % schema)
# convert python objects to sql data
rdd = rdd.map(schema.toInternal)
return rdd, schema
def _createFromLocal(self, data, schema):
"""
Create an RDD for DataFrame from a list or pandas.DataFrame, returns
the RDD and schema.
"""
# make sure data could consumed multiple times
if not isinstance(data, list):
data = list(data)
if schema is None or isinstance(schema, (list, tuple)):
struct = self._inferSchemaFromList(data)
converter = _create_converter(struct)
data = map(converter, data)
if isinstance(schema, (list, tuple)):
for i, name in enumerate(schema):
struct.fields[i].name = name
struct.names[i] = name
schema = struct
elif not isinstance(schema, StructType):
raise TypeError("schema should be StructType or list or None, but got: %s" % schema)
# convert python objects to sql data
data = [schema.toInternal(row) for row in data]
return self._sc.parallelize(data), schema
@since(2.0)
@ignore_unicode_prefix
def createDataFrame(self, data, schema=None, samplingRatio=None, verifySchema=True):
"""
Creates a :class:`DataFrame` from an :class:`RDD`, a list or a :class:`pandas.DataFrame`.
When ``schema`` is a list of column names, the type of each column
will be inferred from ``data``.
When ``schema`` is ``None``, it will try to infer the schema (column names and types)
from ``data``, which should be an RDD of :class:`Row`,
or :class:`namedtuple`, or :class:`dict`.
When ``schema`` is :class:`pyspark.sql.types.DataType` or a datatype string, it must match
the real data, or an exception will be thrown at runtime. If the given schema is not
:class:`pyspark.sql.types.StructType`, it will be wrapped into a
:class:`pyspark.sql.types.StructType` as its only field, and the field name will be "value",
each record will also be wrapped into a tuple, which can be converted to row later.
If schema inference is needed, ``samplingRatio`` is used to determined the ratio of
rows used for schema inference. The first row will be used if ``samplingRatio`` is ``None``.
:param data: an RDD of any kind of SQL data representation(e.g. row, tuple, int, boolean,
etc.), or :class:`list`, or :class:`pandas.DataFrame`.
:param schema: a :class:`pyspark.sql.types.DataType` or a datatype string or a list of
column names, default is ``None``. The data type string format equals to
:class:`pyspark.sql.types.DataType.simpleString`, except that top level struct type can
omit the ``struct<>`` and atomic types use ``typeName()`` as their format, e.g. use
``byte`` instead of ``tinyint`` for :class:`pyspark.sql.types.ByteType`. We can also use
``int`` as a short name for ``IntegerType``.
:param samplingRatio: the sample ratio of rows used for inferring
:param verifySchema: verify data types of every row against schema.
:return: :class:`DataFrame`
.. versionchanged:: 2.1
Added verifySchema.
>>> l = [('Alice', 1)]
>>> spark.createDataFrame(l).collect()
[Row(_1=u'Alice', _2=1)]
>>> spark.createDataFrame(l, ['name', 'age']).collect()
[Row(name=u'Alice', age=1)]
>>> d = [{'name': 'Alice', 'age': 1}]
>>> spark.createDataFrame(d).collect()
[Row(age=1, name=u'Alice')]
>>> rdd = sc.parallelize(l)
>>> spark.createDataFrame(rdd).collect()
[Row(_1=u'Alice', _2=1)]
>>> df = spark.createDataFrame(rdd, ['name', 'age'])
>>> df.collect()
[Row(name=u'Alice', age=1)]
>>> from pyspark.sql import Row
>>> Person = Row('name', 'age')
>>> person = rdd.map(lambda r: Person(*r))
>>> df2 = spark.createDataFrame(person)
>>> df2.collect()
[Row(name=u'Alice', age=1)]
>>> from pyspark.sql.types import *
>>> schema = StructType([
... StructField("name", StringType(), True),
... StructField("age", IntegerType(), True)])
>>> df3 = spark.createDataFrame(rdd, schema)
>>> df3.collect()
[Row(name=u'Alice', age=1)]
>>> spark.createDataFrame(df.toPandas()).collect() # doctest: +SKIP
[Row(name=u'Alice', age=1)]
>>> spark.createDataFrame(pandas.DataFrame([[1, 2]])).collect() # doctest: +SKIP
[Row(0=1, 1=2)]
>>> spark.createDataFrame(rdd, "a: string, b: int").collect()
[Row(a=u'Alice', b=1)]
>>> rdd = rdd.map(lambda row: row[1])
>>> spark.createDataFrame(rdd, "int").collect()
[Row(value=1)]
>>> spark.createDataFrame(rdd, "boolean").collect() # doctest: +IGNORE_EXCEPTION_DETAIL
Traceback (most recent call last):
...
Py4JJavaError: ...
"""
if isinstance(data, DataFrame):
raise TypeError("data is already a DataFrame")
if isinstance(schema, basestring):
schema = _parse_datatype_string(schema)
try:
import pandas
has_pandas = True
except Exception:
has_pandas = False
if has_pandas and isinstance(data, pandas.DataFrame):
if schema is None:
schema = [str(x) for x in data.columns]
data = [r.tolist() for r in data.to_records(index=False)]
verify_func = _verify_type if verifySchema else lambda _, t: True
if isinstance(schema, StructType):
def prepare(obj):
verify_func(obj, schema)
return obj
elif isinstance(schema, DataType):
dataType = schema
schema = StructType().add("value", schema)
def prepare(obj):
verify_func(obj, dataType)
return obj,
else:
if isinstance(schema, list):
schema = [x.encode('utf-8') if not isinstance(x, str) else x for x in schema]
prepare = lambda obj: obj
if isinstance(data, RDD):
rdd, schema = self._createFromRDD(data.map(prepare), schema, samplingRatio)
else:
rdd, schema = self._createFromLocal(map(prepare, data), schema)
jrdd = self._jvm.SerDeUtil.toJavaArray(rdd._to_java_object_rdd())
jdf = self._jsparkSession.applySchemaToPythonRDD(jrdd.rdd(), schema.json())
df = DataFrame(jdf, self._wrapped)
df._schema = schema
return df
@ignore_unicode_prefix
@since(2.0)
def sql(self, sqlQuery):
"""Returns a :class:`DataFrame` representing the result of the given query.
:return: :class:`DataFrame`
>>> df.createOrReplaceTempView("table1")
>>> df2 = spark.sql("SELECT field1 AS f1, field2 as f2 from table1")
>>> df2.collect()
[Row(f1=1, f2=u'row1'), Row(f1=2, f2=u'row2'), Row(f1=3, f2=u'row3')]
"""
return DataFrame(self._jsparkSession.sql(sqlQuery), self._wrapped)
@since(2.0)
def table(self, tableName):
"""Returns the specified table as a :class:`DataFrame`.
:return: :class:`DataFrame`
>>> df.createOrReplaceTempView("table1")
>>> df2 = spark.table("table1")
>>> sorted(df.collect()) == sorted(df2.collect())
True
"""
return DataFrame(self._jsparkSession.table(tableName), self._wrapped)
@property
@since(2.0)
def read(self):
"""
Returns a :class:`DataFrameReader` that can be used to read data
in as a :class:`DataFrame`.
:return: :class:`DataFrameReader`
"""
return DataFrameReader(self._wrapped)
@property
@since(2.0)
def readStream(self):
"""
Returns a :class:`DataStreamReader` that can be used to read data streams
as a streaming :class:`DataFrame`.
.. note:: Evolving.
:return: :class:`DataStreamReader`
"""
return DataStreamReader(self._wrapped)
@property
@since(2.0)
def streams(self):
"""Returns a :class:`StreamingQueryManager` that allows managing all the
:class:`StreamingQuery` StreamingQueries active on `this` context.
.. note:: Evolving.
:return: :class:`StreamingQueryManager`
"""
from pyspark.sql.streaming import StreamingQueryManager
return StreamingQueryManager(self._jsparkSession.streams())
@since(2.0)
def stop(self):
"""Stop the underlying :class:`SparkContext`.
"""
self._sc.stop()
SparkSession._instantiatedSession = None
@since(2.0)
def __enter__(self):
"""
Enable 'with SparkSession.builder.(...).getOrCreate() as session: app' syntax.
"""
return self
@since(2.0)
def __exit__(self, exc_type, exc_val, exc_tb):
"""
Enable 'with SparkSession.builder.(...).getOrCreate() as session: app' syntax.
Specifically stop the SparkSession on exit of the with block.
"""
self.stop()
def _test():
import os
import doctest
from pyspark.context import SparkContext
from pyspark.sql import Row
import pyspark.sql.session
os.chdir(os.environ["SPARK_HOME"])
globs = pyspark.sql.session.__dict__.copy()
sc = SparkContext('local[4]', 'PythonTest')
globs['sc'] = sc
globs['spark'] = SparkSession(sc)
globs['rdd'] = rdd = sc.parallelize(
[Row(field1=1, field2="row1"),
Row(field1=2, field2="row2"),
Row(field1=3, field2="row3")])
globs['df'] = rdd.toDF()
(failure_count, test_count) = doctest.testmod(
pyspark.sql.session, globs=globs,
optionflags=doctest.ELLIPSIS | doctest.NORMALIZE_WHITESPACE)
globs['sc'].stop()
if failure_count:
exit(-1)
if __name__ == "__main__":
_test()
|
apache-2.0
|
seancug/python-example
|
fatiando-0.2/fatiando/gridder.py
|
1
|
12677
|
"""
Create and operate on grids and profiles.
**Grid generation**
* :func:`~fatiando.gridder.regular`
* :func:`~fatiando.gridder.scatter`
**Grid operations**
* :func:`~fatiando.gridder.cut`
* :func:`~fatiando.gridder.profile`
**Interpolation**
* :func:`~fatiando.gridder.interp`
* :func:`~fatiando.gridder.interp_at`
* :func:`~fatiando.gridder.extrapolate_nans`
**Input/Output**
* :func:`~fatiando.gridder.load_surfer`: Read a Surfer grid file and return
three 1d numpy arrays and the grid shape
**Misc**
* :func:`~fatiando.gridder.spacing`
----
"""
import numpy
import scipy.interpolate
import matplotlib.mlab
def load_surfer(fname, fmt='ascii'):
"""
Read a Surfer grid file and return three 1d numpy arrays and the grid shape
Surfer is a contouring, gridding and surface mapping software
from GoldenSoftware. The names and logos for Surfer and Golden
Software are registered trademarks of Golden Software, Inc.
http://www.goldensoftware.com/products/surfer
According to Surfer structure, x and y are horizontal and vertical
screen-based coordinates respectively. If the grid is in geographic
coordinates, x will be longitude and y latitude. If the coordinates
are cartesian, x will be the easting and y the norting coordinates.
WARNING: This is opposite to the convention used for Fatiando.
See io_surfer.py in cookbook.
Parameters:
* fname : str
Name of the Surfer grid file
* fmt : str
File type, can be 'ascii' or 'binary'
Returns:
* x : 1d-array
Value of the horizontal coordinate of each grid point.
* y : 1d-array
Value of the vertical coordinate of each grid point.
* grd : 1d-array
Values of the field in each grid point. Field can be for example
topography, gravity anomaly etc
* shape : tuple = (ny, nx)
The number of points in the vertical and horizontal grid dimensions,
respectively
"""
assert fmt in ['ascii', 'binary'], "Invalid grid format '%s'. Should be \
'ascii' or 'binary'." % (fmt)
if fmt == 'ascii':
# Surfer ASCII grid structure
# DSAA Surfer ASCII GRD ID
# nCols nRows number of columns and rows
# xMin xMax X min max
# yMin yMax Y min max
# zMin zMax Z min max
# z11 z21 z31 ... List of Z values
with open(fname) as ftext:
# DSAA is a Surfer ASCII GRD ID
id = ftext.readline()
# Read the number of columns (nx) and rows (ny)
nx, ny = [int(s) for s in ftext.readline().split()]
# Read the min/max value of x (columns/longitue)
xmin, xmax = [float(s) for s in ftext.readline().split()]
# Read the min/max value of y(rows/latitude)
ymin, ymax = [float(s) for s in ftext.readline().split()]
# Read the min/max value of grd
zmin, zmax = [float(s) for s in ftext.readline().split()]
data = numpy.fromiter((float(i) for line in ftext for i in
line.split()), dtype='f')
grd = numpy.ma.masked_greater_equal(data, 1.70141e+38)
# Create x and y numpy arrays
x = numpy.linspace(xmin, xmax, nx)
y = numpy.linspace(ymin, ymax, ny)
x, y = [tmp.ravel() for tmp in numpy.meshgrid(x, y)]
if fmt == 'binary':
raise NotImplementedError(
"Binary file support is not implemented yet.")
return x, y, grd, (ny,nx)
def regular(area, shape, z=None):
"""
Create a regular grid. Order of the output grid is x varies first, then y.
Parameters:
* area
``(x1, x2, y1, y2)``: Borders of the grid
* shape
Shape of the regular grid, ie ``(ny, nx)``.
* z
Optional. z coordinate of the grid points. If given, will return an
array with the value *z*.
Returns:
* ``[xcoords, ycoords]``
Numpy arrays with the x and y coordinates of the grid points
* ``[xcoords, ycoords, zcoords]``
If *z* given. Numpy arrays with the x, y, and z coordinates of the grid
points
"""
ny, nx = shape
x1, x2, y1, y2 = area
dy, dx = spacing(area, shape)
x_range = numpy.arange(x1, x2, dx)
y_range = numpy.arange(y1, y2, dy)
# Need to make sure that the number of points in the grid is correct because
# of rounding errors in arange. Sometimes x2 and y2 are included, sometimes
# not
if len(x_range) < nx:
x_range = numpy.append(x_range, x2)
if len(y_range) < ny:
y_range = numpy.append(y_range, y2)
assert len(x_range) == nx, "Failed! x_range doesn't have nx points"
assert len(y_range) == ny, "Failed! y_range doesn't have ny points"
xcoords, ycoords = [mat.ravel() for mat in numpy.meshgrid(x_range, y_range)]
if z is not None:
zcoords = z*numpy.ones_like(xcoords)
return [xcoords, ycoords, zcoords]
else:
return [xcoords, ycoords]
def scatter(area, n, z=None, seed=None):
"""
Create an irregular grid with a random scattering of points.
Parameters:
* area
``(x1, x2, y1, y2)``: Borders of the grid
* n
Number of points
* z
Optional. z coordinate of the points. If given, will return an
array with the value *z*.
* seed : None or int
Seed used to generate the pseudo-random numbers. If `None`, will use a
different seed every time. Use the same seed to generate the same
random points.
Returns:
* ``[xcoords, ycoords]``
Numpy arrays with the x and y coordinates of the points
* ``[xcoords, ycoords, zcoords]``
If *z* given. Arrays with the x, y, and z coordinates of the points
"""
x1, x2, y1, y2 = area
numpy.random.seed(seed)
xcoords = numpy.random.uniform(x1, x2, n)
ycoords = numpy.random.uniform(y1, y2, n)
numpy.random.seed()
if z is not None:
zcoords = z*numpy.ones(n)
return [xcoords, ycoords, zcoords]
else:
return [xcoords, ycoords]
def spacing(area, shape):
"""
Returns the spacing between grid nodes
Parameters:
* area
``(x1, x2, y1, y2)``: Borders of the grid
* shape
Shape of the regular grid, ie ``(ny, nx)``.
Returns:
* ``[dy, dx]``
Spacing the y and x directions
"""
x1, x2, y1, y2 = area
ny, nx = shape
dx = float(x2 - x1)/float(nx - 1)
dy = float(y2 - y1)/float(ny - 1)
return [dy, dx]
def interp(x, y, v, shape, area=None, algorithm='cubic', extrapolate=False):
"""
Interpolate data onto a regular grid.
Parameters:
* x, y : 1D arrays
Arrays with the x and y coordinates of the data points.
* v : 1D array
Array with the scalar value assigned to the data points.
* shape : tuple = (ny, nx)
Shape of the interpolated regular grid, ie (ny, nx).
* area : tuple = (x1, x2, y1, y2)
The are where the data will be interpolated. If None, then will get the
area from *x* and *y*.
* algorithm : string
Interpolation algorithm. Either ``'cubic'``, ``'nearest'``,
``'linear'`` (see scipy.interpolate.griddata), or ``'nn'`` for nearest
neighbors (using matplotlib.mlab.griddata)
* extrapolate : True or False
If True, will extrapolate values outside of the convex hull of the data
points.
Returns:
* ``[x, y, v]``
Three 1D arrays with the interpolated x, y, and v
"""
if algorithm not in ['cubic', 'linear', 'nearest', 'nn']:
raise ValueError("Invalid interpolation algorithm: " + str(algorithm))
ny, nx = shape
if area is None:
area = (x.min(), x.max(), y.min(), y.max())
x1, x2, y1, y2 = area
xs = numpy.linspace(x1, x2, nx)
ys = numpy.linspace(y1, y2, ny)
xp, yp = [i.ravel() for i in numpy.meshgrid(xs, ys)]
if algorithm == 'nn':
grid = matplotlib.mlab.griddata(x, y, v, numpy.reshape(xp, shape),
numpy.reshape(yp, shape), interp='nn').ravel()
if extrapolate and numpy.ma.is_masked(grid):
grid = extrapolate_nans(xp, yp, grid)
else:
grid = interp_at(x, y, v, xp, yp, algorithm=algorithm,
extrapolate=extrapolate)
return [xp, yp, grid]
def interp_at(x, y, v, xp, yp, algorithm='cubic', extrapolate=False):
"""
Interpolate data onto the specified points.
Parameters:
* x, y : 1D arrays
Arrays with the x and y coordinates of the data points.
* v : 1D array
Array with the scalar value assigned to the data points.
* xp, yp : 1D arrays
Points where the data values will be interpolated
* algorithm : string
Interpolation algorithm. Either ``'cubic'``, ``'nearest'``,
``'linear'`` (see scipy.interpolate.griddata)
* extrapolate : True or False
If True, will extrapolate values outside of the convex hull of the data
points.
Returns:
* v : 1D array
1D array with the interpolated v values.
"""
if algorithm not in ['cubic', 'linear', 'nearest']:
raise ValueError("Invalid interpolation algorithm: " + str(algorithm))
grid = scipy.interpolate.griddata((x, y), v, (xp, yp),
method=algorithm).ravel()
if extrapolate and algorithm != 'nearest' and numpy.any(numpy.isnan(grid)):
grid = extrapolate_nans(xp, yp, grid)
return grid
def profile(x, y, v, point1, point2, size, extrapolate=False):
"""
Extract a data profile between 2 points.
Uses interpolation to calculate the data values at the profile points.
Parameters:
* x, y : 1D arrays
Arrays with the x and y coordinates of the data points.
* v : 1D array
Array with the scalar value assigned to the data points.
* point1, point2 : lists = [x, y]
Lists the x, y coordinates of the 2 points between which the profile
will be extracted.
* size : int
Number of points along the profile.
* extrapolate : True or False
If True, will extrapolate values outside of the convex hull of the data
points.
Returns:
* [xp, yp, distances, vp] : 1d arrays
``xp`` and ``yp`` are the x, y coordinates of the points along the
profile.
``distances`` are the distances of the profile points to ``point1``
``vp`` are the data points along the profile.
"""
x1, y1 = point1
x2, y2 = point2
maxdist = numpy.sqrt((x1 - x2)**2 + (y1 - y2)**2)
distances = numpy.linspace(0, maxdist, size)
angle = numpy.arctan2(y2 - y1, x2 - x1)
xp = x1 + distances*numpy.cos(angle)
yp = y1 + distances*numpy.sin(angle)
vp = interp_at(x, y, v, xp, yp, algorithm='cubic', extrapolate=extrapolate)
return xp, yp, distances, vp
def extrapolate_nans(x, y, v):
""""
Extrapolate the NaNs or masked values in a grid INPLACE using nearest
value.
.. warning:: Replaces the NaN or masked values of the original array!
Parameters:
* x, y : 1D arrays
Arrays with the x and y coordinates of the data points.
* v : 1D array
Array with the scalar value assigned to the data points.
Returns:
* v : 1D array
The array with NaNs or masked values extrapolated.
"""
if numpy.ma.is_masked(v):
nans = v.mask
else:
nans = numpy.isnan(v)
notnans = numpy.logical_not(nans)
v[nans] = scipy.interpolate.griddata((x[notnans], y[notnans]), v[notnans],
(x[nans], y[nans]), method='nearest').ravel()
return v
def cut(x, y, scalars, area):
"""
Return a subsection of a grid.
The returned subsection is not a copy! In technical terms, returns a slice
of the numpy arrays. So changes made to the subsection reflect on the
original grid. Use numpy.copy to make copies of the subsections and avoid
this.
Parameters:
* x, y
Arrays with the x and y coordinates of the data points.
* scalars
List of arrays with the scalar values assigned to the grid points.
* area
``(x1, x2, y1, y2)``: Borders of the subsection
Returns:
* ``[subx, suby, subscalars]``
Arrays with x and y coordinates and scalar values of the subsection.
"""
xmin, xmax, ymin, ymax = area
if len(x) != len(y):
raise ValueError("x and y must have the same length")
inside = [i for i in xrange(len(x))
if x[i] >= xmin and x[i] <= xmax and y[i] >= ymin and y[i] <= ymax]
return [x[inside], y[inside], [s[inside] for s in scalars]]
|
gpl-2.0
|
yyjiang/scikit-learn
|
sklearn/linear_model/tests/test_base.py
|
120
|
10082
|
# Author: Alexandre Gramfort <[email protected]>
# Fabian Pedregosa <[email protected]>
#
# License: BSD 3 clause
import numpy as np
from scipy import sparse
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_equal
from sklearn.linear_model.base import LinearRegression
from sklearn.linear_model.base import center_data, sparse_center_data
from sklearn.utils import check_random_state
from sklearn.datasets.samples_generator import make_sparse_uncorrelated
from sklearn.datasets.samples_generator import make_regression
def test_linear_regression():
# Test LinearRegression on a simple dataset.
# a simple dataset
X = [[1], [2]]
Y = [1, 2]
clf = LinearRegression()
clf.fit(X, Y)
assert_array_almost_equal(clf.coef_, [1])
assert_array_almost_equal(clf.intercept_, [0])
assert_array_almost_equal(clf.predict(X), [1, 2])
# test it also for degenerate input
X = [[1]]
Y = [0]
clf = LinearRegression()
clf.fit(X, Y)
assert_array_almost_equal(clf.coef_, [0])
assert_array_almost_equal(clf.intercept_, [0])
assert_array_almost_equal(clf.predict(X), [0])
def test_fit_intercept():
# Test assertions on betas shape.
X2 = np.array([[0.38349978, 0.61650022],
[0.58853682, 0.41146318]])
X3 = np.array([[0.27677969, 0.70693172, 0.01628859],
[0.08385139, 0.20692515, 0.70922346]])
y = np.array([1, 1])
lr2_without_intercept = LinearRegression(fit_intercept=False).fit(X2, y)
lr2_with_intercept = LinearRegression(fit_intercept=True).fit(X2, y)
lr3_without_intercept = LinearRegression(fit_intercept=False).fit(X3, y)
lr3_with_intercept = LinearRegression(fit_intercept=True).fit(X3, y)
assert_equal(lr2_with_intercept.coef_.shape,
lr2_without_intercept.coef_.shape)
assert_equal(lr3_with_intercept.coef_.shape,
lr3_without_intercept.coef_.shape)
assert_equal(lr2_without_intercept.coef_.ndim,
lr3_without_intercept.coef_.ndim)
def test_linear_regression_sparse(random_state=0):
"Test that linear regression also works with sparse data"
random_state = check_random_state(random_state)
for i in range(10):
n = 100
X = sparse.eye(n, n)
beta = random_state.rand(n)
y = X * beta[:, np.newaxis]
ols = LinearRegression()
ols.fit(X, y.ravel())
assert_array_almost_equal(beta, ols.coef_ + ols.intercept_)
assert_array_almost_equal(ols.residues_, 0)
def test_linear_regression_multiple_outcome(random_state=0):
"Test multiple-outcome linear regressions"
X, y = make_regression(random_state=random_state)
Y = np.vstack((y, y)).T
n_features = X.shape[1]
clf = LinearRegression(fit_intercept=True)
clf.fit((X), Y)
assert_equal(clf.coef_.shape, (2, n_features))
Y_pred = clf.predict(X)
clf.fit(X, y)
y_pred = clf.predict(X)
assert_array_almost_equal(np.vstack((y_pred, y_pred)).T, Y_pred, decimal=3)
def test_linear_regression_sparse_multiple_outcome(random_state=0):
"Test multiple-outcome linear regressions with sparse data"
random_state = check_random_state(random_state)
X, y = make_sparse_uncorrelated(random_state=random_state)
X = sparse.coo_matrix(X)
Y = np.vstack((y, y)).T
n_features = X.shape[1]
ols = LinearRegression()
ols.fit(X, Y)
assert_equal(ols.coef_.shape, (2, n_features))
Y_pred = ols.predict(X)
ols.fit(X, y.ravel())
y_pred = ols.predict(X)
assert_array_almost_equal(np.vstack((y_pred, y_pred)).T, Y_pred, decimal=3)
def test_center_data():
n_samples = 200
n_features = 2
rng = check_random_state(0)
X = rng.rand(n_samples, n_features)
y = rng.rand(n_samples)
expected_X_mean = np.mean(X, axis=0)
# XXX: currently scaled to variance=n_samples
expected_X_std = np.std(X, axis=0) * np.sqrt(X.shape[0])
expected_y_mean = np.mean(y, axis=0)
Xt, yt, X_mean, y_mean, X_std = center_data(X, y, fit_intercept=False,
normalize=False)
assert_array_almost_equal(X_mean, np.zeros(n_features))
assert_array_almost_equal(y_mean, 0)
assert_array_almost_equal(X_std, np.ones(n_features))
assert_array_almost_equal(Xt, X)
assert_array_almost_equal(yt, y)
Xt, yt, X_mean, y_mean, X_std = center_data(X, y, fit_intercept=True,
normalize=False)
assert_array_almost_equal(X_mean, expected_X_mean)
assert_array_almost_equal(y_mean, expected_y_mean)
assert_array_almost_equal(X_std, np.ones(n_features))
assert_array_almost_equal(Xt, X - expected_X_mean)
assert_array_almost_equal(yt, y - expected_y_mean)
Xt, yt, X_mean, y_mean, X_std = center_data(X, y, fit_intercept=True,
normalize=True)
assert_array_almost_equal(X_mean, expected_X_mean)
assert_array_almost_equal(y_mean, expected_y_mean)
assert_array_almost_equal(X_std, expected_X_std)
assert_array_almost_equal(Xt, (X - expected_X_mean) / expected_X_std)
assert_array_almost_equal(yt, y - expected_y_mean)
def test_center_data_multioutput():
n_samples = 200
n_features = 3
n_outputs = 2
rng = check_random_state(0)
X = rng.rand(n_samples, n_features)
y = rng.rand(n_samples, n_outputs)
expected_y_mean = np.mean(y, axis=0)
args = [(center_data, X), (sparse_center_data, sparse.csc_matrix(X))]
for center, X in args:
_, yt, _, y_mean, _ = center(X, y, fit_intercept=False,
normalize=False)
assert_array_almost_equal(y_mean, np.zeros(n_outputs))
assert_array_almost_equal(yt, y)
_, yt, _, y_mean, _ = center(X, y, fit_intercept=True,
normalize=False)
assert_array_almost_equal(y_mean, expected_y_mean)
assert_array_almost_equal(yt, y - y_mean)
_, yt, _, y_mean, _ = center(X, y, fit_intercept=True,
normalize=True)
assert_array_almost_equal(y_mean, expected_y_mean)
assert_array_almost_equal(yt, y - y_mean)
def test_center_data_weighted():
n_samples = 200
n_features = 2
rng = check_random_state(0)
X = rng.rand(n_samples, n_features)
y = rng.rand(n_samples)
sample_weight = rng.rand(n_samples)
expected_X_mean = np.average(X, axis=0, weights=sample_weight)
expected_y_mean = np.average(y, axis=0, weights=sample_weight)
# XXX: if normalize=True, should we expect a weighted standard deviation?
# Currently not weighted, but calculated with respect to weighted mean
# XXX: currently scaled to variance=n_samples
expected_X_std = (np.sqrt(X.shape[0]) *
np.mean((X - expected_X_mean) ** 2, axis=0) ** .5)
Xt, yt, X_mean, y_mean, X_std = center_data(X, y, fit_intercept=True,
normalize=False,
sample_weight=sample_weight)
assert_array_almost_equal(X_mean, expected_X_mean)
assert_array_almost_equal(y_mean, expected_y_mean)
assert_array_almost_equal(X_std, np.ones(n_features))
assert_array_almost_equal(Xt, X - expected_X_mean)
assert_array_almost_equal(yt, y - expected_y_mean)
Xt, yt, X_mean, y_mean, X_std = center_data(X, y, fit_intercept=True,
normalize=True,
sample_weight=sample_weight)
assert_array_almost_equal(X_mean, expected_X_mean)
assert_array_almost_equal(y_mean, expected_y_mean)
assert_array_almost_equal(X_std, expected_X_std)
assert_array_almost_equal(Xt, (X - expected_X_mean) / expected_X_std)
assert_array_almost_equal(yt, y - expected_y_mean)
def test_sparse_center_data():
n_samples = 200
n_features = 2
rng = check_random_state(0)
# random_state not supported yet in sparse.rand
X = sparse.rand(n_samples, n_features, density=.5) # , random_state=rng
X = X.tolil()
y = rng.rand(n_samples)
XA = X.toarray()
# XXX: currently scaled to variance=n_samples
expected_X_std = np.std(XA, axis=0) * np.sqrt(X.shape[0])
Xt, yt, X_mean, y_mean, X_std = sparse_center_data(X, y,
fit_intercept=False,
normalize=False)
assert_array_almost_equal(X_mean, np.zeros(n_features))
assert_array_almost_equal(y_mean, 0)
assert_array_almost_equal(X_std, np.ones(n_features))
assert_array_almost_equal(Xt.A, XA)
assert_array_almost_equal(yt, y)
Xt, yt, X_mean, y_mean, X_std = sparse_center_data(X, y,
fit_intercept=True,
normalize=False)
assert_array_almost_equal(X_mean, np.mean(XA, axis=0))
assert_array_almost_equal(y_mean, np.mean(y, axis=0))
assert_array_almost_equal(X_std, np.ones(n_features))
assert_array_almost_equal(Xt.A, XA)
assert_array_almost_equal(yt, y - np.mean(y, axis=0))
Xt, yt, X_mean, y_mean, X_std = sparse_center_data(X, y,
fit_intercept=True,
normalize=True)
assert_array_almost_equal(X_mean, np.mean(XA, axis=0))
assert_array_almost_equal(y_mean, np.mean(y, axis=0))
assert_array_almost_equal(X_std, expected_X_std)
assert_array_almost_equal(Xt.A, XA / expected_X_std)
assert_array_almost_equal(yt, y - np.mean(y, axis=0))
def test_csr_sparse_center_data():
# Test output format of sparse_center_data, when input is csr
X, y = make_regression()
X[X < 2.5] = 0.0
csr = sparse.csr_matrix(X)
csr_, y, _, _, _ = sparse_center_data(csr, y, True)
assert_equal(csr_.getformat(), 'csr')
|
bsd-3-clause
|
mikecroucher/GPy
|
GPy/testing/plotting_tests.py
|
3
|
21811
|
#===============================================================================
# Copyright (c) 2015, Max Zwiessele
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# * Neither the name of GPy nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#===============================================================================
#===============================================================================
# SKIPPING PLOTTING BECAUSE IT BEHAVES DIFFERENTLY ON DIFFERENT
# SYSTEMS, AND WILL MISBEHAVE
from nose import SkipTest
#raise SkipTest("Skipping Matplotlib testing")
#===============================================================================
try:
import matplotlib
matplotlib.use('agg')
except ImportError:
# matplotlib not installed
from nose import SkipTest
raise SkipTest("Skipping Matplotlib testing")
from unittest.case import TestCase
import numpy as np
import GPy, os
from GPy.util.config import config
from GPy.plotting import change_plotting_library, plotting_library
class ConfigTest(TestCase):
def tearDown(self):
change_plotting_library('matplotlib')
def test_change_plotting(self):
self.assertRaises(ValueError, change_plotting_library, 'not+in9names')
change_plotting_library('none')
self.assertRaises(RuntimeError, plotting_library)
change_plotting_library('matplotlib')
if config.get('plotting', 'library') != 'matplotlib':
raise SkipTest("Matplotlib not installed, not testing plots")
try:
from matplotlib import cbook, pyplot as plt
from matplotlib.testing.compare import compare_images
from matplotlib.testing.noseclasses import ImageComparisonFailure
except ImportError:
raise SkipTest("Matplotlib not installed, not testing plots")
extensions = ['npz']
basedir = os.path.dirname(os.path.relpath(os.path.abspath(__file__)))
def _image_directories():
"""
Compute the baseline and result image directories for testing *func*.
Create the result directory if it doesn't exist.
"""
#module_name = __init__.__module__
#mods = module_name.split('.')
#basedir = os.path.join(*mods)
result_dir = os.path.join(basedir, 'testresult','.')
baseline_dir = os.path.join(basedir, 'baseline','.')
if not os.path.exists(result_dir):
cbook.mkdirs(result_dir)
return baseline_dir, result_dir
baseline_dir, result_dir = _image_directories()
if not os.path.exists(baseline_dir):
raise SkipTest("Not installed from source, baseline not available. Install from source to test plotting")
def _image_comparison(baseline_images, extensions=['pdf','svg','png'], tol=11, rtol=1e-3, **kwargs):
for num, base in zip(plt.get_fignums(), baseline_images):
for ext in extensions:
fig = plt.figure(num)
fig.canvas.draw()
#fig.axes[0].set_axis_off()
#fig.set_frameon(False)
if ext in ['npz']:
figdict = flatten_axis(fig)
np.savez_compressed(os.path.join(result_dir, "{}.{}".format(base, ext)), **figdict)
fig.savefig(os.path.join(result_dir, "{}.{}".format(base, 'png')),
transparent=True,
edgecolor='none',
facecolor='none',
#bbox='tight'
)
else:
fig.savefig(os.path.join(result_dir, "{}.{}".format(base, ext)),
transparent=True,
edgecolor='none',
facecolor='none',
#bbox='tight'
)
for num, base in zip(plt.get_fignums(), baseline_images):
for ext in extensions:
#plt.close(num)
actual = os.path.join(result_dir, "{}.{}".format(base, ext))
expected = os.path.join(baseline_dir, "{}.{}".format(base, ext))
if ext == 'npz':
def do_test():
if not os.path.exists(expected):
import shutil
shutil.copy2(actual, expected)
#shutil.copy2(os.path.join(result_dir, "{}.{}".format(base, 'png')), os.path.join(baseline_dir, "{}.{}".format(base, 'png')))
raise IOError("Baseline file {} not found, copying result {}".format(expected, actual))
else:
exp_dict = dict(np.load(expected).items())
act_dict = dict(np.load(actual).items())
for name in act_dict:
if name in exp_dict:
try:
np.testing.assert_allclose(exp_dict[name], act_dict[name], err_msg="Mismatch in {}.{}".format(base, name), rtol=rtol, **kwargs)
except AssertionError as e:
raise SkipTest(e)
else:
def do_test():
err = compare_images(expected, actual, tol, in_decorator=True)
if err:
raise SkipTest("Error between {} and {} is {:.5f}, which is bigger then the tolerance of {:.5f}".format(actual, expected, err['rms'], tol))
yield do_test
plt.close('all')
def flatten_axis(ax, prevname=''):
import inspect
members = inspect.getmembers(ax)
arrays = {}
def _flatten(l, pre):
arr = {}
if isinstance(l, np.ndarray):
if l.size:
arr[pre] = np.asarray(l)
elif isinstance(l, dict):
for _n in l:
_tmp = _flatten(l, pre+"."+_n+".")
for _nt in _tmp.keys():
arrays[_nt] = _tmp[_nt]
elif isinstance(l, list) and len(l)>0:
for i in range(len(l)):
_tmp = _flatten(l[i], pre+"[{}]".format(i))
for _n in _tmp:
arr["{}".format(_n)] = _tmp[_n]
else:
return flatten_axis(l, pre+'.')
return arr
for name, l in members:
if isinstance(l, np.ndarray):
arrays[prevname+name] = np.asarray(l)
elif isinstance(l, list) and len(l)>0:
for i in range(len(l)):
_tmp = _flatten(l[i], prevname+name+"[{}]".format(i))
for _n in _tmp:
arrays["{}".format(_n)] = _tmp[_n]
return arrays
def _a(x,y,decimal):
np.testing.assert_array_almost_equal(x, y, decimal)
def compare_axis_dicts(x, y, decimal=6):
try:
assert(len(x)==len(y))
for name in x:
_a(x[name], y[name], decimal)
except AssertionError as e:
raise SkipTest(e.message)
def test_figure():
np.random.seed(1239847)
from GPy.plotting import plotting_library as pl
#import matplotlib
matplotlib.rcParams.update(matplotlib.rcParamsDefault)
#matplotlib.rcParams[u'figure.figsize'] = (4,3)
matplotlib.rcParams[u'text.usetex'] = False
import warnings
with warnings.catch_warnings():
warnings.simplefilter("ignore")
ax, _ = pl().new_canvas(num="imshow_interact")
def test_func(x):
return x[:, 0].reshape(3,3)
pl().imshow_interact(ax, test_func, extent=(-1,1,-1,1), resolution=3)
ax, _ = pl().new_canvas()
def test_func_2(x):
y = x[:, 0].reshape(3,3)
anno = np.argmax(x, axis=1).reshape(3,3)
return y, anno
pl().annotation_heatmap_interact(ax, test_func_2, extent=(-1,1,-1,1), resolution=3)
pl().annotation_heatmap_interact(ax, test_func_2, extent=(-1,1,-1,1), resolution=3, imshow_kwargs=dict(interpolation='nearest'))
ax, _ = pl().new_canvas(figsize=(4,3))
x = np.linspace(0,1,100)
y = [0,1,2]
array = np.array([.4,.5])
cmap = matplotlib.colors.LinearSegmentedColormap.from_list('WhToColor', ('r', 'b'), N=array.size)
pl().fill_gradient(ax, x, y, facecolors=['r', 'g'], array=array, cmap=cmap)
ax, _ = pl().new_canvas(num="3d_plot", figsize=(4,3), projection='3d', xlabel='x', ylabel='y', zlabel='z', title='awsome title', xlim=(-1,1), ylim=(-1,1), zlim=(-3,3))
z = 2-np.abs(np.linspace(-2,2,(100)))+1
x, y = z*np.sin(np.linspace(-2*np.pi,2*np.pi,(100))), z*np.cos(np.linspace(-np.pi,np.pi,(100)))
pl().plot(ax, x, y, z, linewidth=2)
for do_test in _image_comparison(
baseline_images=['coverage_{}'.format(sub) for sub in ["imshow_interact",'annotation_interact','gradient','3d_plot',]],
extensions=extensions):
yield (do_test, )
def test_kernel():
np.random.seed(1239847)
#import matplotlib
matplotlib.rcParams.update(matplotlib.rcParamsDefault)
#matplotlib.rcParams[u'figure.figsize'] = (4,3)
matplotlib.rcParams[u'text.usetex'] = False
import warnings
with warnings.catch_warnings():
warnings.simplefilter("ignore")
k = GPy.kern.RBF(5, ARD=True) * GPy.kern.Linear(3, active_dims=[0,2,4], ARD=True) + GPy.kern.Bias(2)
k.randomize()
k2 = GPy.kern.RBF(5, ARD=True) * GPy.kern.Linear(3, active_dims=[0,2,4], ARD=True) + GPy.kern.Bias(2) + GPy.kern.White(4)
k2[:-1] = k[:]
k2.plot_ARD(['rbf', 'linear', 'bias'], legend=True)
k2.plot_covariance(visible_dims=[0, 3], plot_limits=(-1,3))
k2.plot_covariance(visible_dims=[2], plot_limits=(-1, 3))
k2.plot_covariance(visible_dims=[2, 4], plot_limits=((-1, 0), (5, 3)), projection='3d', rstride=10, cstride=10)
k2.plot_covariance(visible_dims=[1, 4])
for do_test in _image_comparison(
baseline_images=['kern_{}'.format(sub) for sub in ["ARD", 'cov_2d', 'cov_1d', 'cov_3d', 'cov_no_lim']],
extensions=extensions):
yield (do_test, )
def test_plot():
np.random.seed(111)
import matplotlib
matplotlib.rcParams.update(matplotlib.rcParamsDefault)
#matplotlib.rcParams[u'figure.figsize'] = (4,3)
matplotlib.rcParams[u'text.usetex'] = False
import warnings
with warnings.catch_warnings():
warnings.simplefilter("ignore")
X = np.random.uniform(-2, 2, (40, 1))
f = .2 * np.sin(1.3*X) + 1.3*np.cos(2*X)
Y = f+np.random.normal(0, .1, f.shape)
m = GPy.models.SparseGPRegression(X, Y, X_variance=np.ones_like(X)*[0.06])
#m.optimize()
m.plot_data()
m.plot_mean()
m.plot_confidence()
m.plot_density()
m.plot_errorbars_trainset()
m.plot_samples()
m.plot_data_error()
for do_test in _image_comparison(baseline_images=['gp_{}'.format(sub) for sub in ["data", "mean", 'conf',
'density',
'out_error',
'samples', 'in_error']], extensions=extensions):
yield (do_test, )
def test_twod():
np.random.seed(11111)
import matplotlib
matplotlib.rcParams.update(matplotlib.rcParamsDefault)
#matplotlib.rcParams[u'figure.figsize'] = (4,3)
matplotlib.rcParams[u'text.usetex'] = False
X = np.random.uniform(-2, 2, (40, 2))
f = .2 * np.sin(1.3*X[:,[0]]) + 1.3*np.cos(2*X[:,[1]])
Y = f+np.random.normal(0, .1, f.shape)
m = GPy.models.SparseGPRegression(X, Y, X_variance=np.ones_like(X)*[0.01, 0.2])
#m.optimize()
m.plot_data()
m.plot_mean()
m.plot_inducing(legend=False, marker='s')
#m.plot_errorbars_trainset()
m.plot_data_error()
for do_test in _image_comparison(baseline_images=['gp_2d_{}'.format(sub) for sub in ["data", "mean",
'inducing',
#'out_error',
'in_error',
]], extensions=extensions):
yield (do_test, )
def test_threed():
np.random.seed(11111)
import matplotlib
matplotlib.rcParams.update(matplotlib.rcParamsDefault)
#matplotlib.rcParams[u'figure.figsize'] = (4,3)
matplotlib.rcParams[u'text.usetex'] = False
X = np.random.uniform(-2, 2, (40, 2))
f = .2 * np.sin(1.3*X[:,[0]]) + 1.3*np.cos(2*X[:,[1]])
Y = f+np.random.normal(0, .1, f.shape)
m = GPy.models.SparseGPRegression(X, Y)
m.likelihood.variance = .1
#m.optimize()
m.plot_samples(projection='3d', samples=1)
m.plot_samples(projection='3d', plot_raw=False, samples=1)
plt.close('all')
m.plot_data(projection='3d')
m.plot_mean(projection='3d', rstride=10, cstride=10)
m.plot_inducing(projection='3d')
#m.plot_errorbars_trainset(projection='3d')
for do_test in _image_comparison(baseline_images=['gp_3d_{}'.format(sub) for sub in ["data", "mean", 'inducing',
#'error',
#"samples", "samples_lik"
]], extensions=extensions):
yield (do_test, )
def test_sparse():
np.random.seed(11111)
import matplotlib
matplotlib.rcParams.update(matplotlib.rcParamsDefault)
#matplotlib.rcParams[u'figure.figsize'] = (4,3)
matplotlib.rcParams[u'text.usetex'] = False
X = np.random.uniform(-2, 2, (40, 1))
f = .2 * np.sin(1.3*X) + 1.3*np.cos(2*X)
Y = f+np.random.normal(0, .1, f.shape)
m = GPy.models.SparseGPRegression(X, Y, X_variance=np.ones_like(X)*0.1)
#m.optimize()
#m.plot_inducing()
_, ax = plt.subplots()
m.plot_data(ax=ax)
m.plot_data_error(ax=ax)
for do_test in _image_comparison(baseline_images=['sparse_gp_{}'.format(sub) for sub in ['data_error']], extensions=extensions):
yield (do_test, )
def test_classification():
np.random.seed(11111)
import matplotlib
matplotlib.rcParams.update(matplotlib.rcParamsDefault)
#matplotlib.rcParams[u'figure.figsize'] = (4,3)
matplotlib.rcParams[u'text.usetex'] = False
X = np.random.uniform(-2, 2, (40, 1))
f = .2 * np.sin(1.3*X) + 1.3*np.cos(2*X)
Y = f+np.random.normal(0, .1, f.shape)
m = GPy.models.GPClassification(X, Y>Y.mean())
#m.optimize()
_, ax = plt.subplots()
m.plot(plot_raw=False, apply_link=False, ax=ax)
m.plot_errorbars_trainset(plot_raw=False, apply_link=False, ax=ax)
_, ax = plt.subplots()
m.plot(plot_raw=True, apply_link=False, ax=ax)
m.plot_errorbars_trainset(plot_raw=True, apply_link=False, ax=ax)
_, ax = plt.subplots()
m.plot(plot_raw=True, apply_link=True, ax=ax)
m.plot_errorbars_trainset(plot_raw=True, apply_link=True, ax=ax)
for do_test in _image_comparison(baseline_images=['gp_class_{}'.format(sub) for sub in ["likelihood", "raw", 'raw_link']], extensions=extensions):
yield (do_test, )
def test_sparse_classification():
np.random.seed(11111)
import matplotlib
matplotlib.rcParams.update(matplotlib.rcParamsDefault)
#matplotlib.rcParams[u'figure.figsize'] = (4,3)
matplotlib.rcParams[u'text.usetex'] = False
X = np.random.uniform(-2, 2, (40, 1))
f = .2 * np.sin(1.3*X) + 1.3*np.cos(2*X)
Y = f+np.random.normal(0, .1, f.shape)
m = GPy.models.SparseGPClassification(X, Y>Y.mean())
#m.optimize()
m.plot(plot_raw=False, apply_link=False, samples_likelihood=3)
np.random.seed(111)
m.plot(plot_raw=True, apply_link=False, samples=3)
np.random.seed(111)
m.plot(plot_raw=True, apply_link=True, samples=3)
for do_test in _image_comparison(baseline_images=['sparse_gp_class_{}'.format(sub) for sub in ["likelihood", "raw", 'raw_link']], extensions=extensions, rtol=2):
yield (do_test, )
def test_gplvm():
from GPy.models import GPLVM
np.random.seed(12345)
matplotlib.rcParams.update(matplotlib.rcParamsDefault)
#matplotlib.rcParams[u'figure.figsize'] = (4,3)
matplotlib.rcParams[u'text.usetex'] = False
#Q = 3
# Define dataset
#N = 60
#k1 = GPy.kern.RBF(5, variance=1, lengthscale=1./np.random.dirichlet(np.r_[10,10,10,0.1,0.1]), ARD=True)
#k2 = GPy.kern.RBF(5, variance=1, lengthscale=1./np.random.dirichlet(np.r_[10,0.1,10,0.1,10]), ARD=True)
#k3 = GPy.kern.RBF(5, variance=1, lengthscale=1./np.random.dirichlet(np.r_[0.1,0.1,10,10,10]), ARD=True)
#X = np.random.normal(0, 1, (N, 5))
#A = np.random.multivariate_normal(np.zeros(N), k1.K(X), Q).T
#B = np.random.multivariate_normal(np.zeros(N), k2.K(X), Q).T
#C = np.random.multivariate_normal(np.zeros(N), k3.K(X), Q).T
#Y = np.vstack((A,B,C))
#labels = np.hstack((np.zeros(A.shape[0]), np.ones(B.shape[0]), np.ones(C.shape[0])*2))
#k = RBF(Q, ARD=True, lengthscale=2) # + kern.white(Q, _np.exp(-2)) # + kern.bias(Q)
pars = np.load(os.path.join(basedir, 'b-gplvm-save.npz'))
Y = pars['Y']
Q = pars['Q']
labels = pars['labels']
import warnings
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter('always') # always print
m = GPLVM(Y, Q, initialize=False)
m.update_model(False)
m.initialize_parameter()
m[:] = pars['gplvm_p']
m.update_model(True)
#m.optimize(messages=0)
np.random.seed(111)
m.plot_latent(labels=labels)
np.random.seed(111)
m.plot_scatter(projection='3d', labels=labels)
np.random.seed(111)
m.plot_magnification(labels=labels)
m.plot_steepest_gradient_map(resolution=10, data_labels=labels)
for do_test in _image_comparison(baseline_images=['gplvm_{}'.format(sub) for sub in ["latent", "latent_3d", "magnification", 'gradient']],
extensions=extensions,
tol=12):
yield (do_test, )
def test_bayesian_gplvm():
from ..models import BayesianGPLVM
np.random.seed(12345)
matplotlib.rcParams.update(matplotlib.rcParamsDefault)
#matplotlib.rcParams[u'figure.figsize'] = (4,3)
matplotlib.rcParams[u'text.usetex'] = False
#Q = 3
# Define dataset
#N = 10
#k1 = GPy.kern.RBF(5, variance=1, lengthscale=1./np.random.dirichlet(np.r_[10,10,10,0.1,0.1]), ARD=True)
#k2 = GPy.kern.RBF(5, variance=1, lengthscale=1./np.random.dirichlet(np.r_[10,0.1,10,0.1,10]), ARD=True)
#k3 = GPy.kern.RBF(5, variance=1, lengthscale=1./np.random.dirichlet(np.r_[0.1,0.1,10,10,10]), ARD=True)
#X = np.random.normal(0, 1, (N, 5))
#A = np.random.multivariate_normal(np.zeros(N), k1.K(X), Q).T
#B = np.random.multivariate_normal(np.zeros(N), k2.K(X), Q).T
#C = np.random.multivariate_normal(np.zeros(N), k3.K(X), Q).T
#Y = np.vstack((A,B,C))
#labels = np.hstack((np.zeros(A.shape[0]), np.ones(B.shape[0]), np.ones(C.shape[0])*2))
#k = RBF(Q, ARD=True, lengthscale=2) # + kern.white(Q, _np.exp(-2)) # + kern.bias(Q)
pars = np.load(os.path.join(basedir, 'b-gplvm-save.npz'))
Y = pars['Y']
Q = pars['Q']
labels = pars['labels']
import warnings
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter('always') # always print
m = BayesianGPLVM(Y, Q, initialize=False)
m.update_model(False)
m.initialize_parameter()
m[:] = pars['bgplvm_p']
m.update_model(True)
#m.optimize(messages=0)
np.random.seed(111)
m.plot_inducing(projection='2d')
np.random.seed(111)
m.plot_inducing(projection='3d')
np.random.seed(111)
m.plot_latent(projection='2d', labels=labels)
np.random.seed(111)
m.plot_scatter(projection='3d', labels=labels)
np.random.seed(111)
m.plot_magnification(labels=labels)
np.random.seed(111)
m.plot_steepest_gradient_map(resolution=10, data_labels=labels)
for do_test in _image_comparison(baseline_images=['bayesian_gplvm_{}'.format(sub) for sub in ["inducing", "inducing_3d", "latent", "latent_3d", "magnification", 'gradient']], extensions=extensions):
yield (do_test, )
if __name__ == '__main__':
import nose
nose.main(defaultTest='./plotting_tests.py')
|
bsd-3-clause
|
rs2/bokeh
|
examples/models/file/colors.py
|
9
|
2059
|
from __future__ import print_function
from math import pi
import pandas as pd
from bokeh.models import (
Plot, ColumnDataSource, FactorRange, CategoricalAxis, TapTool, HoverTool, OpenURL, CategoricalScale)
from bokeh.models.glyphs import Rect
from bokeh.colors import groups
from bokeh.document import Document
from bokeh.embed import file_html
from bokeh.resources import INLINE
from bokeh.util.browser import view
data = []
for name in groups.__all__:
group = getattr(groups, name)
data.extend([(str(x), x.to_hex(), group.__name__) for x in group])
css3_colors = pd.DataFrame(data, columns=["Name", "Color", "Group"])
source = ColumnDataSource(dict(
names = list(css3_colors.Name),
groups = list(css3_colors.Group),
colors = list(css3_colors.Color),
))
xdr = FactorRange(factors=list(css3_colors.Group.unique()))
ydr = FactorRange(factors=list(reversed(css3_colors.Name)))
x_scale, y_scale = CategoricalScale(), CategoricalScale()
plot = Plot(x_range=xdr, y_range=ydr, x_scale=x_scale, y_scale=y_scale, plot_width=600, plot_height=2000)
plot.title.text = "CSS3 Color Names"
rect = Rect(x="groups", y="names", width=1, height=1, fill_color="colors", line_color=None)
rect_renderer = plot.add_glyph(source, rect)
xaxis_above = CategoricalAxis(major_label_orientation=pi/4)
plot.add_layout(xaxis_above, 'above')
xaxis_below = CategoricalAxis(major_label_orientation=pi/4)
plot.add_layout(xaxis_below, 'below')
plot.add_layout(CategoricalAxis(), 'left')
url = "http://www.colors.commutercreative.com/@names/"
tooltips = """Click the color to go to:<br /><a href="{url}">{url}</a>""".format(url=url)
tap = TapTool(renderers=[rect_renderer], callback=OpenURL(url=url))
hover = HoverTool(renderers=[rect_renderer], tooltips=tooltips)
plot.tools.extend([tap, hover])
doc = Document()
doc.add_root(plot)
if __name__ == "__main__":
doc.validate()
filename = "colors.html"
with open(filename, "w") as f:
f.write(file_html(doc, INLINE, "CSS3 Color Names"))
print("Wrote %s" % filename)
view(filename)
|
bsd-3-clause
|
rellermeyer/99tsp
|
python/neural/run.py
|
1
|
3885
|
from elastic import ElasticNet
from scipy.spatial.distance import pdist
import itertools
import numpy as np
import optparse
import os
import sys
import matplotlib.pyplot as plt
import seaborn as sns
def main():
options, args = parse_arguments()
# load in parameters
cities = load_instance(args[0])
n_iters = options.n_iters
neuron_factor = options.neuron_factor
alpha = options.alpha
beta = options.beta
radius = options.radius
plotting = options.plots
slides = options.slides
norm_cities = normalize_cities(cities)
elastic_net = ElasticNet(n_iters, neuron_factor, alpha, beta, radius, slides)
print()
print("Fitting Elastic Net having parameters: \
\n Iterations: {n_iters} \
\n neurons: {neurons} \
\n alpha: {alpha} \
\n beta: {beta} \
\n radius: {radius}".format(n_iters=n_iters, neurons=int(cities.shape[0]*neuron_factor),
alpha=alpha, beta=beta, radius=radius))
elastic_net.fit(norm_cities)
city_permutation = elastic_net.get_solution_permutation()
edges, tour_length = elastic_net.get_tour_length(city_permutation, cities)
print_solution(city_permutation, tour_length)
if plotting:
plot_solution(cities, edges)
def plot_solution(cities, edges):
fig, ax = plt.subplots()
ax.scatter(cities[:,0], cities[:,1])
for e in edges:
plt.plot([cities[e[0],0], cities[e[1],0]], [cities[e[0],1], cities[e[1],1]], c='green')
plt.show()
def print_solution(city_permutation, tour_length):
print()
print("---SOLUTION---")
print("Tour Length: %d" % tour_length)
def load_instance(path):
"""load instance of TSP from file"""
print("Loading Instance from %s..." % path.split("/")[-1])
cities = []
with open(path, 'r') as f:
line = ""
while line != "NODE_COORD_SECTION" and line != "DISPLAY_DATA_SECTION":
line = f.readline().strip()
for line in f:
line = line.strip().split()
if line[0] == "EOF" or line[0] == "TOUR_SECTION":
break
x, y = line[1], line[2]
cities.append((float(x), float(y)))
print("Finished Loading file")
return np.array(cities)
def normalize_cities(cities):
"""normalize cities to aid in convergence"""
min = np.min(cities, axis=0)
max = np.max(cities, axis=0)
return (cities - min) / (max - min)
def parse_arguments():
parser = optparse.OptionParser("Usage: %prog <file.tsp> [options]")
parser.add_option("-a",
type = float,
dest = "alpha",
default = 0.4)
parser.add_option("-b",
type = float,
dest = "beta",
default = 2.0)
parser.add_option("-i",
type = int,
dest = "n_iters",
default = 30)
parser.add_option("-f",
type = float,
dest = "neuron_factor",
default = 2.5)
parser.add_option("-r",
type = float,
dest = "radius",
default = 0.1)
parser.add_option("-p", "--plot",
action = "store_true",
dest = "plots",
default = False,
help = "Enable Plotting")
parser.add_option("-s", "--slideshow",
action = "store_true",
dest = "slides",
default = False,
help = "Show Neurons Every Iteration")
options, args = parser.parse_args()
if len(args) != 1:
print("Must pass in a filename")
sys.exit(-1)
return options, args
if __name__ == '__main__':
main()
|
bsd-3-clause
|
dsm054/pandas
|
pandas/tests/reshape/test_union_categoricals.py
|
1
|
14833
|
import pytest
import numpy as np
import pandas as pd
from pandas import Categorical, Series, CategoricalIndex
from pandas.core.dtypes.concat import union_categoricals
from pandas.util import testing as tm
class TestUnionCategoricals(object):
def test_union_categorical(self):
# GH 13361
data = [
(list('abc'), list('abd'), list('abcabd')),
([0, 1, 2], [2, 3, 4], [0, 1, 2, 2, 3, 4]),
([0, 1.2, 2], [2, 3.4, 4], [0, 1.2, 2, 2, 3.4, 4]),
(['b', 'b', np.nan, 'a'], ['a', np.nan, 'c'],
['b', 'b', np.nan, 'a', 'a', np.nan, 'c']),
(pd.date_range('2014-01-01', '2014-01-05'),
pd.date_range('2014-01-06', '2014-01-07'),
pd.date_range('2014-01-01', '2014-01-07')),
(pd.date_range('2014-01-01', '2014-01-05', tz='US/Central'),
pd.date_range('2014-01-06', '2014-01-07', tz='US/Central'),
pd.date_range('2014-01-01', '2014-01-07', tz='US/Central')),
(pd.period_range('2014-01-01', '2014-01-05'),
pd.period_range('2014-01-06', '2014-01-07'),
pd.period_range('2014-01-01', '2014-01-07')),
]
for a, b, combined in data:
for box in [Categorical, CategoricalIndex, Series]:
result = union_categoricals([box(Categorical(a)),
box(Categorical(b))])
expected = Categorical(combined)
tm.assert_categorical_equal(result, expected,
check_category_order=True)
# new categories ordered by appearance
s = Categorical(['x', 'y', 'z'])
s2 = Categorical(['a', 'b', 'c'])
result = union_categoricals([s, s2])
expected = Categorical(['x', 'y', 'z', 'a', 'b', 'c'],
categories=['x', 'y', 'z', 'a', 'b', 'c'])
tm.assert_categorical_equal(result, expected)
s = Categorical([0, 1.2, 2], ordered=True)
s2 = Categorical([0, 1.2, 2], ordered=True)
result = union_categoricals([s, s2])
expected = Categorical([0, 1.2, 2, 0, 1.2, 2], ordered=True)
tm.assert_categorical_equal(result, expected)
# must exactly match types
s = Categorical([0, 1.2, 2])
s2 = Categorical([2, 3, 4])
msg = 'dtype of categories must be the same'
with pytest.raises(TypeError, match=msg):
union_categoricals([s, s2])
msg = 'No Categoricals to union'
with pytest.raises(ValueError, match=msg):
union_categoricals([])
def test_union_categoricals_nan(self):
# GH 13759
res = union_categoricals([pd.Categorical([1, 2, np.nan]),
pd.Categorical([3, 2, np.nan])])
exp = Categorical([1, 2, np.nan, 3, 2, np.nan])
tm.assert_categorical_equal(res, exp)
res = union_categoricals([pd.Categorical(['A', 'B']),
pd.Categorical(['B', 'B', np.nan])])
exp = Categorical(['A', 'B', 'B', 'B', np.nan])
tm.assert_categorical_equal(res, exp)
val1 = [pd.Timestamp('2011-01-01'), pd.Timestamp('2011-03-01'),
pd.NaT]
val2 = [pd.NaT, pd.Timestamp('2011-01-01'),
pd.Timestamp('2011-02-01')]
res = union_categoricals([pd.Categorical(val1), pd.Categorical(val2)])
exp = Categorical(val1 + val2,
categories=[pd.Timestamp('2011-01-01'),
pd.Timestamp('2011-03-01'),
pd.Timestamp('2011-02-01')])
tm.assert_categorical_equal(res, exp)
# all NaN
res = union_categoricals([pd.Categorical(np.array([np.nan, np.nan],
dtype=object)),
pd.Categorical(['X'])])
exp = Categorical([np.nan, np.nan, 'X'])
tm.assert_categorical_equal(res, exp)
res = union_categoricals([pd.Categorical([np.nan, np.nan]),
pd.Categorical([np.nan, np.nan])])
exp = Categorical([np.nan, np.nan, np.nan, np.nan])
tm.assert_categorical_equal(res, exp)
def test_union_categoricals_empty(self):
# GH 13759
res = union_categoricals([pd.Categorical([]),
pd.Categorical([])])
exp = Categorical([])
tm.assert_categorical_equal(res, exp)
res = union_categoricals([Categorical([]),
Categorical(['1'])])
exp = Categorical(['1'])
tm.assert_categorical_equal(res, exp)
def test_union_categorical_same_category(self):
# check fastpath
c1 = Categorical([1, 2, 3, 4], categories=[1, 2, 3, 4])
c2 = Categorical([3, 2, 1, np.nan], categories=[1, 2, 3, 4])
res = union_categoricals([c1, c2])
exp = Categorical([1, 2, 3, 4, 3, 2, 1, np.nan],
categories=[1, 2, 3, 4])
tm.assert_categorical_equal(res, exp)
c1 = Categorical(['z', 'z', 'z'], categories=['x', 'y', 'z'])
c2 = Categorical(['x', 'x', 'x'], categories=['x', 'y', 'z'])
res = union_categoricals([c1, c2])
exp = Categorical(['z', 'z', 'z', 'x', 'x', 'x'],
categories=['x', 'y', 'z'])
tm.assert_categorical_equal(res, exp)
def test_union_categorical_same_categories_different_order(self):
# https://github.com/pandas-dev/pandas/issues/19096
c1 = Categorical(['a', 'b', 'c'], categories=['a', 'b', 'c'])
c2 = Categorical(['a', 'b', 'c'], categories=['b', 'a', 'c'])
result = union_categoricals([c1, c2])
expected = Categorical(['a', 'b', 'c', 'a', 'b', 'c'],
categories=['a', 'b', 'c'])
tm.assert_categorical_equal(result, expected)
def test_union_categoricals_ordered(self):
c1 = Categorical([1, 2, 3], ordered=True)
c2 = Categorical([1, 2, 3], ordered=False)
msg = 'Categorical.ordered must be the same'
with pytest.raises(TypeError, match=msg):
union_categoricals([c1, c2])
res = union_categoricals([c1, c1])
exp = Categorical([1, 2, 3, 1, 2, 3], ordered=True)
tm.assert_categorical_equal(res, exp)
c1 = Categorical([1, 2, 3, np.nan], ordered=True)
c2 = Categorical([3, 2], categories=[1, 2, 3], ordered=True)
res = union_categoricals([c1, c2])
exp = Categorical([1, 2, 3, np.nan, 3, 2], ordered=True)
tm.assert_categorical_equal(res, exp)
c1 = Categorical([1, 2, 3], ordered=True)
c2 = Categorical([1, 2, 3], categories=[3, 2, 1], ordered=True)
msg = "to union ordered Categoricals, all categories must be the same"
with pytest.raises(TypeError, match=msg):
union_categoricals([c1, c2])
def test_union_categoricals_ignore_order(self):
# GH 15219
c1 = Categorical([1, 2, 3], ordered=True)
c2 = Categorical([1, 2, 3], ordered=False)
res = union_categoricals([c1, c2], ignore_order=True)
exp = Categorical([1, 2, 3, 1, 2, 3])
tm.assert_categorical_equal(res, exp)
msg = 'Categorical.ordered must be the same'
with pytest.raises(TypeError, match=msg):
union_categoricals([c1, c2], ignore_order=False)
res = union_categoricals([c1, c1], ignore_order=True)
exp = Categorical([1, 2, 3, 1, 2, 3])
tm.assert_categorical_equal(res, exp)
res = union_categoricals([c1, c1], ignore_order=False)
exp = Categorical([1, 2, 3, 1, 2, 3],
categories=[1, 2, 3], ordered=True)
tm.assert_categorical_equal(res, exp)
c1 = Categorical([1, 2, 3, np.nan], ordered=True)
c2 = Categorical([3, 2], categories=[1, 2, 3], ordered=True)
res = union_categoricals([c1, c2], ignore_order=True)
exp = Categorical([1, 2, 3, np.nan, 3, 2])
tm.assert_categorical_equal(res, exp)
c1 = Categorical([1, 2, 3], ordered=True)
c2 = Categorical([1, 2, 3], categories=[3, 2, 1], ordered=True)
res = union_categoricals([c1, c2], ignore_order=True)
exp = Categorical([1, 2, 3, 1, 2, 3])
tm.assert_categorical_equal(res, exp)
res = union_categoricals([c2, c1], ignore_order=True,
sort_categories=True)
exp = Categorical([1, 2, 3, 1, 2, 3], categories=[1, 2, 3])
tm.assert_categorical_equal(res, exp)
c1 = Categorical([1, 2, 3], ordered=True)
c2 = Categorical([4, 5, 6], ordered=True)
result = union_categoricals([c1, c2], ignore_order=True)
expected = Categorical([1, 2, 3, 4, 5, 6])
tm.assert_categorical_equal(result, expected)
msg = "to union ordered Categoricals, all categories must be the same"
with pytest.raises(TypeError, match=msg):
union_categoricals([c1, c2], ignore_order=False)
with pytest.raises(TypeError, match=msg):
union_categoricals([c1, c2])
def test_union_categoricals_sort(self):
# GH 13846
c1 = Categorical(['x', 'y', 'z'])
c2 = Categorical(['a', 'b', 'c'])
result = union_categoricals([c1, c2], sort_categories=True)
expected = Categorical(['x', 'y', 'z', 'a', 'b', 'c'],
categories=['a', 'b', 'c', 'x', 'y', 'z'])
tm.assert_categorical_equal(result, expected)
# fastpath
c1 = Categorical(['a', 'b'], categories=['b', 'a', 'c'])
c2 = Categorical(['b', 'c'], categories=['b', 'a', 'c'])
result = union_categoricals([c1, c2], sort_categories=True)
expected = Categorical(['a', 'b', 'b', 'c'],
categories=['a', 'b', 'c'])
tm.assert_categorical_equal(result, expected)
c1 = Categorical(['a', 'b'], categories=['c', 'a', 'b'])
c2 = Categorical(['b', 'c'], categories=['c', 'a', 'b'])
result = union_categoricals([c1, c2], sort_categories=True)
expected = Categorical(['a', 'b', 'b', 'c'],
categories=['a', 'b', 'c'])
tm.assert_categorical_equal(result, expected)
# fastpath - skip resort
c1 = Categorical(['a', 'b'], categories=['a', 'b', 'c'])
c2 = Categorical(['b', 'c'], categories=['a', 'b', 'c'])
result = union_categoricals([c1, c2], sort_categories=True)
expected = Categorical(['a', 'b', 'b', 'c'],
categories=['a', 'b', 'c'])
tm.assert_categorical_equal(result, expected)
c1 = Categorical(['x', np.nan])
c2 = Categorical([np.nan, 'b'])
result = union_categoricals([c1, c2], sort_categories=True)
expected = Categorical(['x', np.nan, np.nan, 'b'],
categories=['b', 'x'])
tm.assert_categorical_equal(result, expected)
c1 = Categorical([np.nan])
c2 = Categorical([np.nan])
result = union_categoricals([c1, c2], sort_categories=True)
expected = Categorical([np.nan, np.nan])
tm.assert_categorical_equal(result, expected)
c1 = Categorical([])
c2 = Categorical([])
result = union_categoricals([c1, c2], sort_categories=True)
expected = Categorical([])
tm.assert_categorical_equal(result, expected)
c1 = Categorical(['b', 'a'], categories=['b', 'a', 'c'], ordered=True)
c2 = Categorical(['a', 'c'], categories=['b', 'a', 'c'], ordered=True)
with pytest.raises(TypeError):
union_categoricals([c1, c2], sort_categories=True)
def test_union_categoricals_sort_false(self):
# GH 13846
c1 = Categorical(['x', 'y', 'z'])
c2 = Categorical(['a', 'b', 'c'])
result = union_categoricals([c1, c2], sort_categories=False)
expected = Categorical(['x', 'y', 'z', 'a', 'b', 'c'],
categories=['x', 'y', 'z', 'a', 'b', 'c'])
tm.assert_categorical_equal(result, expected)
# fastpath
c1 = Categorical(['a', 'b'], categories=['b', 'a', 'c'])
c2 = Categorical(['b', 'c'], categories=['b', 'a', 'c'])
result = union_categoricals([c1, c2], sort_categories=False)
expected = Categorical(['a', 'b', 'b', 'c'],
categories=['b', 'a', 'c'])
tm.assert_categorical_equal(result, expected)
# fastpath - skip resort
c1 = Categorical(['a', 'b'], categories=['a', 'b', 'c'])
c2 = Categorical(['b', 'c'], categories=['a', 'b', 'c'])
result = union_categoricals([c1, c2], sort_categories=False)
expected = Categorical(['a', 'b', 'b', 'c'],
categories=['a', 'b', 'c'])
tm.assert_categorical_equal(result, expected)
c1 = Categorical(['x', np.nan])
c2 = Categorical([np.nan, 'b'])
result = union_categoricals([c1, c2], sort_categories=False)
expected = Categorical(['x', np.nan, np.nan, 'b'],
categories=['x', 'b'])
tm.assert_categorical_equal(result, expected)
c1 = Categorical([np.nan])
c2 = Categorical([np.nan])
result = union_categoricals([c1, c2], sort_categories=False)
expected = Categorical([np.nan, np.nan])
tm.assert_categorical_equal(result, expected)
c1 = Categorical([])
c2 = Categorical([])
result = union_categoricals([c1, c2], sort_categories=False)
expected = Categorical([])
tm.assert_categorical_equal(result, expected)
c1 = Categorical(['b', 'a'], categories=['b', 'a', 'c'], ordered=True)
c2 = Categorical(['a', 'c'], categories=['b', 'a', 'c'], ordered=True)
result = union_categoricals([c1, c2], sort_categories=False)
expected = Categorical(['b', 'a', 'a', 'c'],
categories=['b', 'a', 'c'], ordered=True)
tm.assert_categorical_equal(result, expected)
def test_union_categorical_unwrap(self):
# GH 14173
c1 = Categorical(['a', 'b'])
c2 = pd.Series(['b', 'c'], dtype='category')
result = union_categoricals([c1, c2])
expected = Categorical(['a', 'b', 'b', 'c'])
tm.assert_categorical_equal(result, expected)
c2 = CategoricalIndex(c2)
result = union_categoricals([c1, c2])
tm.assert_categorical_equal(result, expected)
c1 = Series(c1)
result = union_categoricals([c1, c2])
tm.assert_categorical_equal(result, expected)
with pytest.raises(TypeError):
union_categoricals([c1, ['a', 'b', 'c']])
|
bsd-3-clause
|
jeremymcrae/mupit
|
mupit/gtf.py
|
1
|
3111
|
"""
https://gist.github.com/slowkow/8101481
Kamil Slowikowski
December 24, 2013
Read GFF/GTF files. Works with gzip compressed files and pandas.
http://useast.ensembl.org/info/website/upload/gff.html
"""
from collections import defaultdict
import gzip
import re
import tempfile
try:
from urllib.request import urlretrieve
except ImportError:
from urllib import urlretrieve
import pandas
from mupit.util import is_url
GTF_HEADER = ['seqname', 'source', 'feature', 'start', 'end', 'score',
'strand', 'frame']
R_SEMICOLON = re.compile(r'\s*;\s*')
R_COMMA = re.compile(r'\s*,\s*')
R_KEYVALUE = re.compile(r'(\s+|\s*=\s*)')
def convert_gtf(path):
"""Open an optionally gzipped GTF file and return a pandas.DataFrame.
"""
# Each column is a list stored as a value in this dict.
result = defaultdict(list)
for i, line in enumerate(lines(path)):
for key in line.keys():
# This key has not been seen yet, so set it to None for all
# previous lines.
if key not in result:
result[key] = [None] * i
# Ensure this row has some value for each column.
for key in result.keys():
result[key].append(line.get(key, None))
return pandas.DataFrame(result)
def lines(path):
"""Open an optionally gzipped GTF file and generate a dict for each line.
"""
fn_open = gzip.open if path.endswith('.gz') else open
if is_url(path):
# if the path refers to a URL, download the file first
temp = tempfile.NamedTemporaryFile()
urlretrieve(path, temp.name)
path = temp.name
with fn_open(path) as handle:
for line in handle:
line = line.decode('utf8')
if line.startswith('#'):
continue
elif line.split('\t', 3)[2] != 'gene':
continue
else:
yield parse(line)
def parse(line):
"""Parse a single GTF line and return a dict.
"""
result = {}
fields = line.rstrip().split('\t')
for i, col in enumerate(GTF_HEADER):
result[col] = _get_value(fields[i])
# INFO field consists of "key1=value;key2=value;...".
infos = [x for x in re.split(R_SEMICOLON, fields[8]) if x.strip()]
for i, info in enumerate(infos, 1):
# It should be key="value".
try:
key, _, value = re.split(R_KEYVALUE, info, 1)
# But sometimes it is just "value".
except ValueError:
key = 'INFO{}'.format(i)
value = info
# Ignore the field if there is no value.
if value:
result[key] = _get_value(value)
return result
def _get_value(value):
if not value:
return None
# Strip double and single quotes.
value = value.strip('"\'')
# Return a list if the value has a comma.
if ',' in value:
value = re.split(R_COMMA, value)
# These values are equivalent to None.
elif value in ['', '.', 'NA']:
return None
return value
|
mit
|
equialgo/scikit-learn
|
examples/svm/plot_svm_anova.py
|
85
|
2024
|
"""
=================================================
SVM-Anova: SVM with univariate feature selection
=================================================
This example shows how to perform univariate feature selection before running a
SVC (support vector classifier) to improve the classification scores.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from sklearn import svm, datasets, feature_selection
from sklearn.model_selection import cross_val_score
from sklearn.pipeline import Pipeline
###############################################################################
# Import some data to play with
digits = datasets.load_digits()
y = digits.target
# Throw away data, to be in the curse of dimension settings
y = y[:200]
X = digits.data[:200]
n_samples = len(y)
X = X.reshape((n_samples, -1))
# add 200 non-informative features
X = np.hstack((X, 2 * np.random.random((n_samples, 200))))
###############################################################################
# Create a feature-selection transform and an instance of SVM that we
# combine together to have an full-blown estimator
transform = feature_selection.SelectPercentile(feature_selection.f_classif)
clf = Pipeline([('anova', transform), ('svc', svm.SVC(C=1.0))])
###############################################################################
# Plot the cross-validation score as a function of percentile of features
score_means = list()
score_stds = list()
percentiles = (1, 3, 6, 10, 15, 20, 30, 40, 60, 80, 100)
for percentile in percentiles:
clf.set_params(anova__percentile=percentile)
# Compute cross-validation score using 1 CPU
this_scores = cross_val_score(clf, X, y, n_jobs=1)
score_means.append(this_scores.mean())
score_stds.append(this_scores.std())
plt.errorbar(percentiles, score_means, np.array(score_stds))
plt.title(
'Performance of the SVM-Anova varying the percentile of features selected')
plt.xlabel('Percentile')
plt.ylabel('Prediction rate')
plt.axis('tight')
plt.show()
|
bsd-3-clause
|
petewarden/tensorflow_makefile
|
tensorflow/contrib/learn/python/learn/io/__init__.py
|
5
|
1709
|
"""Tools to allow different io formats."""
# Copyright 2015-present The Scikit Flow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.contrib.learn.python.learn.io.dask_io import extract_dask_data
from tensorflow.contrib.learn.python.learn.io.dask_io import extract_dask_labels
from tensorflow.contrib.learn.python.learn.io.dask_io import HAS_DASK
from tensorflow.contrib.learn.python.learn.io.graph_io import read_batch_examples
from tensorflow.contrib.learn.python.learn.io.graph_io import read_batch_features
from tensorflow.contrib.learn.python.learn.io.graph_io import read_batch_record_features
from tensorflow.contrib.learn.python.learn.io.pandas_io import extract_pandas_data
from tensorflow.contrib.learn.python.learn.io.pandas_io import extract_pandas_labels
from tensorflow.contrib.learn.python.learn.io.pandas_io import extract_pandas_matrix
from tensorflow.contrib.learn.python.learn.io.pandas_io import HAS_PANDAS
# pylint: disable=g-import-not-at-top
if HAS_PANDAS:
from tensorflow.contrib.learn.python.learn.io.pandas_io import pd
|
apache-2.0
|
alexeyum/scikit-learn
|
sklearn/covariance/tests/test_covariance.py
|
79
|
12193
|
# Author: Alexandre Gramfort <[email protected]>
# Gael Varoquaux <[email protected]>
# Virgile Fritsch <[email protected]>
#
# License: BSD 3 clause
import numpy as np
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_warns
from sklearn.utils.testing import assert_greater
from sklearn import datasets
from sklearn.covariance import empirical_covariance, EmpiricalCovariance, \
ShrunkCovariance, shrunk_covariance, \
LedoitWolf, ledoit_wolf, ledoit_wolf_shrinkage, OAS, oas
X = datasets.load_diabetes().data
X_1d = X[:, 0]
n_samples, n_features = X.shape
def test_covariance():
# Tests Covariance module on a simple dataset.
# test covariance fit from data
cov = EmpiricalCovariance()
cov.fit(X)
emp_cov = empirical_covariance(X)
assert_array_almost_equal(emp_cov, cov.covariance_, 4)
assert_almost_equal(cov.error_norm(emp_cov), 0)
assert_almost_equal(
cov.error_norm(emp_cov, norm='spectral'), 0)
assert_almost_equal(
cov.error_norm(emp_cov, norm='frobenius'), 0)
assert_almost_equal(
cov.error_norm(emp_cov, scaling=False), 0)
assert_almost_equal(
cov.error_norm(emp_cov, squared=False), 0)
assert_raises(NotImplementedError,
cov.error_norm, emp_cov, norm='foo')
# Mahalanobis distances computation test
mahal_dist = cov.mahalanobis(X)
assert_greater(np.amin(mahal_dist), 0)
# test with n_features = 1
X_1d = X[:, 0].reshape((-1, 1))
cov = EmpiricalCovariance()
cov.fit(X_1d)
assert_array_almost_equal(empirical_covariance(X_1d), cov.covariance_, 4)
assert_almost_equal(cov.error_norm(empirical_covariance(X_1d)), 0)
assert_almost_equal(
cov.error_norm(empirical_covariance(X_1d), norm='spectral'), 0)
# test with one sample
# Create X with 1 sample and 5 features
X_1sample = np.arange(5).reshape(1, 5)
cov = EmpiricalCovariance()
assert_warns(UserWarning, cov.fit, X_1sample)
assert_array_almost_equal(cov.covariance_,
np.zeros(shape=(5, 5), dtype=np.float64))
# test integer type
X_integer = np.asarray([[0, 1], [1, 0]])
result = np.asarray([[0.25, -0.25], [-0.25, 0.25]])
assert_array_almost_equal(empirical_covariance(X_integer), result)
# test centered case
cov = EmpiricalCovariance(assume_centered=True)
cov.fit(X)
assert_array_equal(cov.location_, np.zeros(X.shape[1]))
def test_shrunk_covariance():
# Tests ShrunkCovariance module on a simple dataset.
# compare shrunk covariance obtained from data and from MLE estimate
cov = ShrunkCovariance(shrinkage=0.5)
cov.fit(X)
assert_array_almost_equal(
shrunk_covariance(empirical_covariance(X), shrinkage=0.5),
cov.covariance_, 4)
# same test with shrinkage not provided
cov = ShrunkCovariance()
cov.fit(X)
assert_array_almost_equal(
shrunk_covariance(empirical_covariance(X)), cov.covariance_, 4)
# same test with shrinkage = 0 (<==> empirical_covariance)
cov = ShrunkCovariance(shrinkage=0.)
cov.fit(X)
assert_array_almost_equal(empirical_covariance(X), cov.covariance_, 4)
# test with n_features = 1
X_1d = X[:, 0].reshape((-1, 1))
cov = ShrunkCovariance(shrinkage=0.3)
cov.fit(X_1d)
assert_array_almost_equal(empirical_covariance(X_1d), cov.covariance_, 4)
# test shrinkage coeff on a simple data set (without saving precision)
cov = ShrunkCovariance(shrinkage=0.5, store_precision=False)
cov.fit(X)
assert(cov.precision_ is None)
def test_ledoit_wolf():
# Tests LedoitWolf module on a simple dataset.
# test shrinkage coeff on a simple data set
X_centered = X - X.mean(axis=0)
lw = LedoitWolf(assume_centered=True)
lw.fit(X_centered)
shrinkage_ = lw.shrinkage_
score_ = lw.score(X_centered)
assert_almost_equal(ledoit_wolf_shrinkage(X_centered,
assume_centered=True),
shrinkage_)
assert_almost_equal(ledoit_wolf_shrinkage(X_centered, assume_centered=True,
block_size=6),
shrinkage_)
# compare shrunk covariance obtained from data and from MLE estimate
lw_cov_from_mle, lw_shinkrage_from_mle = ledoit_wolf(X_centered,
assume_centered=True)
assert_array_almost_equal(lw_cov_from_mle, lw.covariance_, 4)
assert_almost_equal(lw_shinkrage_from_mle, lw.shrinkage_)
# compare estimates given by LW and ShrunkCovariance
scov = ShrunkCovariance(shrinkage=lw.shrinkage_, assume_centered=True)
scov.fit(X_centered)
assert_array_almost_equal(scov.covariance_, lw.covariance_, 4)
# test with n_features = 1
X_1d = X[:, 0].reshape((-1, 1))
lw = LedoitWolf(assume_centered=True)
lw.fit(X_1d)
lw_cov_from_mle, lw_shinkrage_from_mle = ledoit_wolf(X_1d,
assume_centered=True)
assert_array_almost_equal(lw_cov_from_mle, lw.covariance_, 4)
assert_almost_equal(lw_shinkrage_from_mle, lw.shrinkage_)
assert_array_almost_equal((X_1d ** 2).sum() / n_samples, lw.covariance_, 4)
# test shrinkage coeff on a simple data set (without saving precision)
lw = LedoitWolf(store_precision=False, assume_centered=True)
lw.fit(X_centered)
assert_almost_equal(lw.score(X_centered), score_, 4)
assert(lw.precision_ is None)
# Same tests without assuming centered data
# test shrinkage coeff on a simple data set
lw = LedoitWolf()
lw.fit(X)
assert_almost_equal(lw.shrinkage_, shrinkage_, 4)
assert_almost_equal(lw.shrinkage_, ledoit_wolf_shrinkage(X))
assert_almost_equal(lw.shrinkage_, ledoit_wolf(X)[1])
assert_almost_equal(lw.score(X), score_, 4)
# compare shrunk covariance obtained from data and from MLE estimate
lw_cov_from_mle, lw_shinkrage_from_mle = ledoit_wolf(X)
assert_array_almost_equal(lw_cov_from_mle, lw.covariance_, 4)
assert_almost_equal(lw_shinkrage_from_mle, lw.shrinkage_)
# compare estimates given by LW and ShrunkCovariance
scov = ShrunkCovariance(shrinkage=lw.shrinkage_)
scov.fit(X)
assert_array_almost_equal(scov.covariance_, lw.covariance_, 4)
# test with n_features = 1
X_1d = X[:, 0].reshape((-1, 1))
lw = LedoitWolf()
lw.fit(X_1d)
lw_cov_from_mle, lw_shinkrage_from_mle = ledoit_wolf(X_1d)
assert_array_almost_equal(lw_cov_from_mle, lw.covariance_, 4)
assert_almost_equal(lw_shinkrage_from_mle, lw.shrinkage_)
assert_array_almost_equal(empirical_covariance(X_1d), lw.covariance_, 4)
# test with one sample
# warning should be raised when using only 1 sample
X_1sample = np.arange(5).reshape(1, 5)
lw = LedoitWolf()
assert_warns(UserWarning, lw.fit, X_1sample)
assert_array_almost_equal(lw.covariance_,
np.zeros(shape=(5, 5), dtype=np.float64))
# test shrinkage coeff on a simple data set (without saving precision)
lw = LedoitWolf(store_precision=False)
lw.fit(X)
assert_almost_equal(lw.score(X), score_, 4)
assert(lw.precision_ is None)
def _naive_ledoit_wolf_shrinkage(X):
# A simple implementation of the formulas from Ledoit & Wolf
# The computation below achieves the following computations of the
# "O. Ledoit and M. Wolf, A Well-Conditioned Estimator for
# Large-Dimensional Covariance Matrices"
# beta and delta are given in the beginning of section 3.2
n_samples, n_features = X.shape
emp_cov = empirical_covariance(X, assume_centered=False)
mu = np.trace(emp_cov) / n_features
delta_ = emp_cov.copy()
delta_.flat[::n_features + 1] -= mu
delta = (delta_ ** 2).sum() / n_features
X2 = X ** 2
beta_ = 1. / (n_features * n_samples) \
* np.sum(np.dot(X2.T, X2) / n_samples - emp_cov ** 2)
beta = min(beta_, delta)
shrinkage = beta / delta
return shrinkage
def test_ledoit_wolf_small():
# Compare our blocked implementation to the naive implementation
X_small = X[:, :4]
lw = LedoitWolf()
lw.fit(X_small)
shrinkage_ = lw.shrinkage_
assert_almost_equal(shrinkage_, _naive_ledoit_wolf_shrinkage(X_small))
def test_ledoit_wolf_large():
# test that ledoit_wolf doesn't error on data that is wider than block_size
rng = np.random.RandomState(0)
# use a number of features that is larger than the block-size
X = rng.normal(size=(10, 20))
lw = LedoitWolf(block_size=10).fit(X)
# check that covariance is about diagonal (random normal noise)
assert_almost_equal(lw.covariance_, np.eye(20), 0)
cov = lw.covariance_
# check that the result is consistent with not splitting data into blocks.
lw = LedoitWolf(block_size=25).fit(X)
assert_almost_equal(lw.covariance_, cov)
def test_oas():
# Tests OAS module on a simple dataset.
# test shrinkage coeff on a simple data set
X_centered = X - X.mean(axis=0)
oa = OAS(assume_centered=True)
oa.fit(X_centered)
shrinkage_ = oa.shrinkage_
score_ = oa.score(X_centered)
# compare shrunk covariance obtained from data and from MLE estimate
oa_cov_from_mle, oa_shinkrage_from_mle = oas(X_centered,
assume_centered=True)
assert_array_almost_equal(oa_cov_from_mle, oa.covariance_, 4)
assert_almost_equal(oa_shinkrage_from_mle, oa.shrinkage_)
# compare estimates given by OAS and ShrunkCovariance
scov = ShrunkCovariance(shrinkage=oa.shrinkage_, assume_centered=True)
scov.fit(X_centered)
assert_array_almost_equal(scov.covariance_, oa.covariance_, 4)
# test with n_features = 1
X_1d = X[:, 0:1]
oa = OAS(assume_centered=True)
oa.fit(X_1d)
oa_cov_from_mle, oa_shinkrage_from_mle = oas(X_1d, assume_centered=True)
assert_array_almost_equal(oa_cov_from_mle, oa.covariance_, 4)
assert_almost_equal(oa_shinkrage_from_mle, oa.shrinkage_)
assert_array_almost_equal((X_1d ** 2).sum() / n_samples, oa.covariance_, 4)
# test shrinkage coeff on a simple data set (without saving precision)
oa = OAS(store_precision=False, assume_centered=True)
oa.fit(X_centered)
assert_almost_equal(oa.score(X_centered), score_, 4)
assert(oa.precision_ is None)
# Same tests without assuming centered data--------------------------------
# test shrinkage coeff on a simple data set
oa = OAS()
oa.fit(X)
assert_almost_equal(oa.shrinkage_, shrinkage_, 4)
assert_almost_equal(oa.score(X), score_, 4)
# compare shrunk covariance obtained from data and from MLE estimate
oa_cov_from_mle, oa_shinkrage_from_mle = oas(X)
assert_array_almost_equal(oa_cov_from_mle, oa.covariance_, 4)
assert_almost_equal(oa_shinkrage_from_mle, oa.shrinkage_)
# compare estimates given by OAS and ShrunkCovariance
scov = ShrunkCovariance(shrinkage=oa.shrinkage_)
scov.fit(X)
assert_array_almost_equal(scov.covariance_, oa.covariance_, 4)
# test with n_features = 1
X_1d = X[:, 0].reshape((-1, 1))
oa = OAS()
oa.fit(X_1d)
oa_cov_from_mle, oa_shinkrage_from_mle = oas(X_1d)
assert_array_almost_equal(oa_cov_from_mle, oa.covariance_, 4)
assert_almost_equal(oa_shinkrage_from_mle, oa.shrinkage_)
assert_array_almost_equal(empirical_covariance(X_1d), oa.covariance_, 4)
# test with one sample
# warning should be raised when using only 1 sample
X_1sample = np.arange(5).reshape(1, 5)
oa = OAS()
assert_warns(UserWarning, oa.fit, X_1sample)
assert_array_almost_equal(oa.covariance_,
np.zeros(shape=(5, 5), dtype=np.float64))
# test shrinkage coeff on a simple data set (without saving precision)
oa = OAS(store_precision=False)
oa.fit(X)
assert_almost_equal(oa.score(X), score_, 4)
assert(oa.precision_ is None)
|
bsd-3-clause
|
jmargeta/scikit-learn
|
examples/ensemble/plot_partial_dependence.py
|
4
|
4436
|
"""
========================
Partial Dependence Plots
========================
Partial dependence plots show the dependence between the target function [1]_
and a set of 'target' features, marginalizing over the
values of all other features (the complement features). Due to the limits
of human perception the size of the target feature set must be small (usually,
one or two) thus the target features are usually chosen among the most
important features
(see :attr:`~sklearn.ensemble.GradientBoostingRegressor.feature_importances_`).
This example shows how to obtain partial dependence plots from a
:class:`~sklearn.ensemble.GradientBoostingRegressor` trained on the California
housing dataset. The example is taken from [HTF2009]_.
The plot shows four one-way and one two-way partial dependence plots.
The target variables for the one-way PDP are:
median income (`MedInc`), avg. occupants per household (`AvgOccup`),
median house age (`HouseAge`), and avg. rooms per household (`AveRooms`).
We can clearly see that the median house price shows a linear relationship
with the median income (top left) and that the house price drops when the
avg. occupants per household increases (top middle).
The top right plot shows that the house age in a district does not have
a strong influence on the (median) house price; so does the average rooms
per household.
The tick marks on the x-axis represent the deciles of the feature values
in the trainig data.
Partial dependence plots with two target features enable us to visualize
interactions among them. The two-way partial dependence plot shows the
dependence of median house price on joint values of house age and avg.
occupants per household. We can clearly see an interaction between the
two features:
For an avg. occupancy greather than two, the house price is nearly independent
of the house age, whereas for values less than two there is a strong dependence
on age.
.. [HTF2009] T. Hastie, R. Tibshirani and J. Friedman,
"Elements of Statistical Learning Ed. 2", Springer, 2009.
.. [1] For classification you can think of it as the regression score before
the link function.
"""
print(__doc__)
import numpy as np
import pylab as pl
from mpl_toolkits.mplot3d import Axes3D
from sklearn.cross_validation import train_test_split
from sklearn.ensemble import GradientBoostingRegressor
from sklearn.ensemble.partial_dependence import plot_partial_dependence
from sklearn.ensemble.partial_dependence import partial_dependence
from sklearn.datasets.california_housing import fetch_california_housing
# fetch California housing dataset
cal_housing = fetch_california_housing()
# split 80/20 train-test
X_train, X_test, y_train, y_test = train_test_split(cal_housing.data,
cal_housing.target,
test_size=0.2,
random_state=1)
names = cal_housing.feature_names
print('_' * 80)
print("Training GBRT...")
clf = GradientBoostingRegressor(n_estimators=100, max_depth=4,
learning_rate=0.1, loss='huber',
random_state=1)
clf.fit(X_train, y_train)
print("done.")
print('_' * 80)
print('Convenience plot with ``partial_dependence_plots``')
print
features = [0, 5, 1, 2, (5, 1)]
fig, axs = plot_partial_dependence(clf, X_train, features, feature_names=names,
n_jobs=3, grid_resolution=50)
fig.suptitle('Partial dependence of house value on nonlocation features\n'
'for the California housing dataset')
pl.subplots_adjust(top=0.9) # tight_layout causes overlap with suptitle
print('_' * 80)
print('Custom 3d plot via ``partial_dependence``')
print
fig = pl.figure()
target_feature = (1, 5)
pdp, (x_axis, y_axis) = partial_dependence(clf, target_feature,
X=X_train, grid_resolution=50)
XX, YY = np.meshgrid(x_axis, y_axis)
Z = pdp.T.reshape(XX.shape).T
ax = Axes3D(fig)
surf = ax.plot_surface(XX, YY, Z, rstride=1, cstride=1, cmap=pl.cm.BuPu)
ax.set_xlabel(names[target_feature[0]])
ax.set_ylabel(names[target_feature[1]])
ax.set_zlabel('Partial dependence')
# pretty init view
ax.view_init(elev=22, azim=122)
pl.colorbar(surf)
pl.suptitle('Partial dependence of house value on median age and '
'average occupancy')
pl.subplots_adjust(top=0.9)
pl.show()
|
bsd-3-clause
|
darshanthaker/nupic
|
external/linux32/lib/python2.6/site-packages/matplotlib/backends/backend_qt4agg.py
|
70
|
4985
|
"""
Render to qt from agg
"""
from __future__ import division
import os, sys
import matplotlib
from matplotlib.figure import Figure
from backend_agg import FigureCanvasAgg
from backend_qt4 import QtCore, QtGui, FigureManagerQT, FigureCanvasQT,\
show, draw_if_interactive, backend_version, \
NavigationToolbar2QT
DEBUG = False
def new_figure_manager( num, *args, **kwargs ):
"""
Create a new figure manager instance
"""
if DEBUG: print 'backend_qtagg.new_figure_manager'
FigureClass = kwargs.pop('FigureClass', Figure)
thisFig = FigureClass( *args, **kwargs )
canvas = FigureCanvasQTAgg( thisFig )
return FigureManagerQT( canvas, num )
class NavigationToolbar2QTAgg(NavigationToolbar2QT):
def _get_canvas(self, fig):
return FigureCanvasQTAgg(fig)
class FigureManagerQTAgg(FigureManagerQT):
def _get_toolbar(self, canvas, parent):
# must be inited after the window, drawingArea and figure
# attrs are set
if matplotlib.rcParams['toolbar']=='classic':
print "Classic toolbar is not supported"
elif matplotlib.rcParams['toolbar']=='toolbar2':
toolbar = NavigationToolbar2QTAgg(canvas, parent)
else:
toolbar = None
return toolbar
class FigureCanvasQTAgg( FigureCanvasQT, FigureCanvasAgg ):
"""
The canvas the figure renders into. Calls the draw and print fig
methods, creates the renderers, etc...
Public attribute
figure - A Figure instance
"""
def __init__( self, figure ):
if DEBUG: print 'FigureCanvasQtAgg: ', figure
FigureCanvasQT.__init__( self, figure )
FigureCanvasAgg.__init__( self, figure )
self.drawRect = False
self.rect = []
self.replot = True
self.setAttribute(QtCore.Qt.WA_OpaquePaintEvent)
def resizeEvent( self, e ):
FigureCanvasQT.resizeEvent( self, e )
def drawRectangle( self, rect ):
self.rect = rect
self.drawRect = True
self.repaint( )
def paintEvent( self, e ):
"""
Draw to the Agg backend and then copy the image to the qt.drawable.
In Qt, all drawing should be done inside of here when a widget is
shown onscreen.
"""
#FigureCanvasQT.paintEvent( self, e )
if DEBUG: print 'FigureCanvasQtAgg.paintEvent: ', self, \
self.get_width_height()
# only replot data when needed
if type(self.replot) is bool: # might be a bbox for blitting
if self.replot:
FigureCanvasAgg.draw(self)
# matplotlib is in rgba byte order. QImage wants to put the bytes
# into argb format and is in a 4 byte unsigned int. Little endian
# system is LSB first and expects the bytes in reverse order
# (bgra).
if QtCore.QSysInfo.ByteOrder == QtCore.QSysInfo.LittleEndian:
stringBuffer = self.renderer._renderer.tostring_bgra()
else:
stringBuffer = self.renderer._renderer.tostring_argb()
qImage = QtGui.QImage(stringBuffer, self.renderer.width,
self.renderer.height,
QtGui.QImage.Format_ARGB32)
p = QtGui.QPainter(self)
p.drawPixmap(QtCore.QPoint(0, 0), QtGui.QPixmap.fromImage(qImage))
# draw the zoom rectangle to the QPainter
if self.drawRect:
p.setPen( QtGui.QPen( QtCore.Qt.black, 1, QtCore.Qt.DotLine ) )
p.drawRect( self.rect[0], self.rect[1], self.rect[2], self.rect[3] )
p.end()
# we are blitting here
else:
bbox = self.replot
l, b, r, t = bbox.extents
w = int(r) - int(l)
h = int(t) - int(b)
t = int(b) + h
reg = self.copy_from_bbox(bbox)
stringBuffer = reg.to_string_argb()
qImage = QtGui.QImage(stringBuffer, w, h, QtGui.QImage.Format_ARGB32)
pixmap = QtGui.QPixmap.fromImage(qImage)
p = QtGui.QPainter( self )
p.drawPixmap(QtCore.QPoint(l, self.renderer.height-t), pixmap)
p.end()
self.replot = False
self.drawRect = False
def draw( self ):
"""
Draw the figure when xwindows is ready for the update
"""
if DEBUG: print "FigureCanvasQtAgg.draw", self
self.replot = True
FigureCanvasAgg.draw(self)
self.update()
# Added following line to improve realtime pan/zoom on windows:
QtGui.qApp.processEvents()
def blit(self, bbox=None):
"""
Blit the region in bbox
"""
self.replot = bbox
l, b, w, h = bbox.bounds
t = b + h
self.update(l, self.renderer.height-t, w, h)
def print_figure(self, *args, **kwargs):
FigureCanvasAgg.print_figure(self, *args, **kwargs)
self.draw()
|
agpl-3.0
|
ch3ll0v3k/scikit-learn
|
examples/model_selection/grid_search_digits.py
|
227
|
2665
|
"""
============================================================
Parameter estimation using grid search with cross-validation
============================================================
This examples shows how a classifier is optimized by cross-validation,
which is done using the :class:`sklearn.grid_search.GridSearchCV` object
on a development set that comprises only half of the available labeled data.
The performance of the selected hyper-parameters and trained model is
then measured on a dedicated evaluation set that was not used during
the model selection step.
More details on tools available for model selection can be found in the
sections on :ref:`cross_validation` and :ref:`grid_search`.
"""
from __future__ import print_function
from sklearn import datasets
from sklearn.cross_validation import train_test_split
from sklearn.grid_search import GridSearchCV
from sklearn.metrics import classification_report
from sklearn.svm import SVC
print(__doc__)
# Loading the Digits dataset
digits = datasets.load_digits()
# To apply an classifier on this data, we need to flatten the image, to
# turn the data in a (samples, feature) matrix:
n_samples = len(digits.images)
X = digits.images.reshape((n_samples, -1))
y = digits.target
# Split the dataset in two equal parts
X_train, X_test, y_train, y_test = train_test_split(
X, y, test_size=0.5, random_state=0)
# Set the parameters by cross-validation
tuned_parameters = [{'kernel': ['rbf'], 'gamma': [1e-3, 1e-4],
'C': [1, 10, 100, 1000]},
{'kernel': ['linear'], 'C': [1, 10, 100, 1000]}]
scores = ['precision', 'recall']
for score in scores:
print("# Tuning hyper-parameters for %s" % score)
print()
clf = GridSearchCV(SVC(C=1), tuned_parameters, cv=5,
scoring='%s_weighted' % score)
clf.fit(X_train, y_train)
print("Best parameters set found on development set:")
print()
print(clf.best_params_)
print()
print("Grid scores on development set:")
print()
for params, mean_score, scores in clf.grid_scores_:
print("%0.3f (+/-%0.03f) for %r"
% (mean_score, scores.std() * 2, params))
print()
print("Detailed classification report:")
print()
print("The model is trained on the full development set.")
print("The scores are computed on the full evaluation set.")
print()
y_true, y_pred = y_test, clf.predict(X_test)
print(classification_report(y_true, y_pred))
print()
# Note the problem is too easy: the hyperparameter plateau is too flat and the
# output model is the same for precision and recall with ties in quality.
|
bsd-3-clause
|
datapythonista/pandas
|
pandas/tests/frame/methods/test_infer_objects.py
|
6
|
1241
|
from datetime import datetime
from pandas import DataFrame
import pandas._testing as tm
class TestInferObjects:
def test_infer_objects(self):
# GH#11221
df = DataFrame(
{
"a": ["a", 1, 2, 3],
"b": ["b", 2.0, 3.0, 4.1],
"c": [
"c",
datetime(2016, 1, 1),
datetime(2016, 1, 2),
datetime(2016, 1, 3),
],
"d": [1, 2, 3, "d"],
},
columns=["a", "b", "c", "d"],
)
df = df.iloc[1:].infer_objects()
assert df["a"].dtype == "int64"
assert df["b"].dtype == "float64"
assert df["c"].dtype == "M8[ns]"
assert df["d"].dtype == "object"
expected = DataFrame(
{
"a": [1, 2, 3],
"b": [2.0, 3.0, 4.1],
"c": [datetime(2016, 1, 1), datetime(2016, 1, 2), datetime(2016, 1, 3)],
"d": [2, 3, "d"],
},
columns=["a", "b", "c", "d"],
)
# reconstruct frame to verify inference is same
result = df.reset_index(drop=True)
tm.assert_frame_equal(result, expected)
|
bsd-3-clause
|
Trigition/MTG-DataScraper
|
scripts/artists.py
|
1
|
1815
|
#!/usr/bin/env python
import pandas as pd
import codecs
def split_artists(row):
artist_string = row['artist'].decode('utf8')
# Attempt to split by & and 'and'
# Replace & and 'and' with similar delimiter
# delimiters are assumed to be surrounded by spaces
sanitized_artists = artist_string.replace(' & ', '$').replace(' and ', '$')
artists = [artist.strip() for artist in sanitized_artists.split('$')]
return artists
def load_artist_correction_dict(filename):
corrections = {}
for line in codecs.open(filename, 'r', encoding='utf8').readlines():
# If line begins with '#' ignore
if line[0].strip() == '#' or len(line.strip()) == 0:
continue
bad_name, corrected_name = line.split(' -> ')
# Clean up names
bad_name = bad_name.strip()
corrected_name = corrected_name.strip()
corrections[bad_name] = corrected_name
return corrections
def artist_card(raw_card, new_card_id, artist_table, args, correction_dict={}):
card_artists_table = []
card_id = new_card_id
for referenced_artist in split_artists(raw_card):
# Check to see if referenced artist needs to be corrected
if referenced_artist in correction_dict:
# Artist name needs to be corrected
cur_artist = correction_dict[referenced_artist]
print "Correcting: %s to %s" % (referenced_artist, cur_artist)
else:
# No correcition necessary
cur_artist = referenced_artist
# Grab id of artist and create reference
artist_id = artist_table.get_id(cur_artist, 'artist')
card_artists_table.append( {args.card_id_column : new_card_id,
args.artist_id_column : artist_id} )
return card_artists_table
|
mit
|
garbersc/keras-galaxies
|
tests/keras_test.py
|
1
|
4828
|
from theano import config
import keras.callbacks
import keras.backend as T
from keras.models import Sequential
from keras.layers import Dense, Activation, Convolution2D, MaxPooling2D, MaxPooling1D, Dropout, Input, Convolution1D
from keras.layers.core import Lambda, Reshape
from keras.models import Model
import numpy as np
import matplotlib.pyplot as plt
print config.optimizer
class LossHistory(keras.callbacks.Callback):
def on_train_begin(self, logs={}):
self.losses = []
def on_batch_end(self, batch, logs={}):
print 'batch ended'
self.losses.append(logs.get('loss'))
class ValidLossHistory(keras.callbacks.Callback):
def on_train_begin(self, logs={}):
self.losses = []
def on_epoch_end(self, batch, logs={}):
print 'epoch ended'
self.losses.append(model_noNorm.evaluate(X_test, Y_test, batch_size=32*5)[0])
n_data=int(1e5)
Y_train = np.random.binomial(n=1, p=0.3, size=(n_data,3))
Y_train = np.asarray(Y_train, dtype='float32')
X_train = np.random.randn(n_data,20)
X_train=X_train**2
Y_test = np.random.binomial(n=1, p=0.3, size=(32*5,3))
X_test = np.random.randn(32*5,20)
X_test=X_test**2
#model0 = Sequential()
#main_input = Input(shape=(None,10),batch_input_shape=(None,10), dtype='float32', name='main_input')
main_input = Input(batch_shape=(None,20) , dtype='float32', name='main_input')
#x=MaxPooling1D(2,input_shape=(20,2))(main_input)
#print x.shape
x=Dense(output_dim=40, activation='relu',input_shape=(20,))(main_input)
x=Dropout(0.5)(x)
#x=Dense(output_dim=40, input_dim=10 activation='relu')#(main_input)
#model0.add(Activation("relu"))
x=Dense(output_dim=1024, activation='relu')(x)
x=Dropout(0.5)(x)
x=Dense(output_dim=1024, activation='relu')(x)
x=Dropout(0.5)(x)
'''
#model.add(MaxPooling1D())
model.add(Dense(output_dim=4000))
model.add(Activation("relu"))
model.add(Dropout(0.5))
#model.add(MaxPooling1D())
model.add(Dense(output_dim=4000))
model.add(Activation("relu"))
model.add(Dropout(0.5))
#model.add(MaxPooling1D())
model.add(Dense(output_dim=4000))
model.add(Activation("relu"))
model.add(Dropout(0.5))
#model.add(MaxPooling1D())
'''
x=Dense(output_dim=3,name='model0_out')(x)#,input_shape=(20,))(main_input)
#model0.compile(loss='mse', optimizer='adam', metrics=['accuracy'])
'''
def lambdaFunction(x,normalise):
if normalise:
print 'norm'
x_ret=T.clip(x,0.0,0.001)
#x_ret=T.ones_like(x)
else:
print 'no_norm'
x_ret=x
return T.reshape(x_ret,(x_ret.shape[0],3))
def output_shape(input_shape):
return (input_shape[0],3)
'''
#l_noNorm=Lambda(lambdaFunction,output_shape,arguments={'normalise': False})(x)
#l_norm=Lambda(lambdaFunction,output_shape,arguments={'normalise': True})(x)
#model=Model(input=main_input,output=[l_noNorm,l_norm])
#model=Model(input=main_input,output=l_noNorm)
#model_norm=Model(input=main_input,output=l_norm)
model_noNorm=Model(input=main_input,output=x)
#print model_norm.input
#print model_norm.input_shape
#print model_norm.output_shape
#model_norm=Model(input=model0.get_layer('model0_out').output,output=l_norm)
#NORMALISE=T.variable(False)
#model_norm.compile(loss='mse', optimizer='adam', metrics=['accuracy'])
model_noNorm.compile(loss='mse', optimizer='sgd', metrics=['accuracy'])
#model.compile(loss='mse', optimizer='adam', metrics=['accuracy'])
#model_norm.compile(loss='mse', optimizer='adam', metrics=['accuracy'])
#history = LossHistory()
#vHistory = ValidLossHistory()
model_noNorm.fit(X_train, Y_train, nb_epoch=5, batch_size=n_data)#, callbacks=[history,vHistory])
#history.on_train_begin()
#vHistory.on_train_begin()
#model=model_noNorm
#for i in xrange(0,3):
# print "epoch %i/5" % (i+1)
# for j in xrange(0,(X_train.shape[0])//10):
# if j>0 or i>0:
# model=model_norm
# #NORMALISE.set_value(True)
# #print NORMALISE
# #print NORMALISE.get_value()
# #print NORMALISE.eval()
# print "%s/%s" %(j+1,(X_train.shape[0])//10)
# #print T.shape( X_train[(j*(X_train.shape[0])//10) : ((j+1)*(X_train.shape[0])//10) ] )
# #print T.shape( Y_train[ (j*(X_train.shape[0])//10) : ((j+1)*(X_train.shape[0])//10) ] )
# print model.train_on_batch( x=X_train[(j*(X_train.shape[0])//10) : ((j+1)*(X_train.shape[0])//10) ], y=Y_train[ (j*(X_train.shape[0])//10) : ((j+1)*(X_train.shape[0])//10) ] )
# print model.predict_on_batch(x=X_train[(j*(X_train.shape[0])//10) : ((j+1)*(X_train.shape[0])//10) ])
#history.on_batch_end(j,)
#vHistory.on_epoch_end()
#print model_norm.get_weights()
#print model_noNorm.get_weights()
#print model_norm.get_weights()[0]==model_noNorm.get_weights()[0]
#loss_and_metrics = model.evaluate(X_test, Y_test, batch_size=32*5)
#print "\n"
#print loss_and_metrics
#lossplt = plt.plot(xrange(0,len(history.losses)),history.losses,'ro')
#lossplt = plt.plot(xrange(0,len(vHistory.losses)),vHistory.losses,'go')
#plt.show()
|
bsd-3-clause
|
victorbergelin/scikit-learn
|
sklearn/feature_extraction/tests/test_image.py
|
205
|
10378
|
# Authors: Emmanuelle Gouillart <[email protected]>
# Gael Varoquaux <[email protected]>
# License: BSD 3 clause
import numpy as np
import scipy as sp
from scipy import ndimage
from nose.tools import assert_equal, assert_true
from numpy.testing import assert_raises
from sklearn.feature_extraction.image import (
img_to_graph, grid_to_graph, extract_patches_2d,
reconstruct_from_patches_2d, PatchExtractor, extract_patches)
from sklearn.utils.graph import connected_components
def test_img_to_graph():
x, y = np.mgrid[:4, :4] - 10
grad_x = img_to_graph(x)
grad_y = img_to_graph(y)
assert_equal(grad_x.nnz, grad_y.nnz)
# Negative elements are the diagonal: the elements of the original
# image. Positive elements are the values of the gradient, they
# should all be equal on grad_x and grad_y
np.testing.assert_array_equal(grad_x.data[grad_x.data > 0],
grad_y.data[grad_y.data > 0])
def test_grid_to_graph():
#Checking that the function works with graphs containing no edges
size = 2
roi_size = 1
# Generating two convex parts with one vertex
# Thus, edges will be empty in _to_graph
mask = np.zeros((size, size), dtype=np.bool)
mask[0:roi_size, 0:roi_size] = True
mask[-roi_size:, -roi_size:] = True
mask = mask.reshape(size ** 2)
A = grid_to_graph(n_x=size, n_y=size, mask=mask, return_as=np.ndarray)
assert_true(connected_components(A)[0] == 2)
# Checking that the function works whatever the type of mask is
mask = np.ones((size, size), dtype=np.int16)
A = grid_to_graph(n_x=size, n_y=size, n_z=size, mask=mask)
assert_true(connected_components(A)[0] == 1)
# Checking dtype of the graph
mask = np.ones((size, size))
A = grid_to_graph(n_x=size, n_y=size, n_z=size, mask=mask, dtype=np.bool)
assert_true(A.dtype == np.bool)
A = grid_to_graph(n_x=size, n_y=size, n_z=size, mask=mask, dtype=np.int)
assert_true(A.dtype == np.int)
A = grid_to_graph(n_x=size, n_y=size, n_z=size, mask=mask, dtype=np.float)
assert_true(A.dtype == np.float)
def test_connect_regions():
lena = sp.misc.lena()
for thr in (50, 150):
mask = lena > thr
graph = img_to_graph(lena, mask)
assert_equal(ndimage.label(mask)[1], connected_components(graph)[0])
def test_connect_regions_with_grid():
lena = sp.misc.lena()
mask = lena > 50
graph = grid_to_graph(*lena.shape, mask=mask)
assert_equal(ndimage.label(mask)[1], connected_components(graph)[0])
mask = lena > 150
graph = grid_to_graph(*lena.shape, mask=mask, dtype=None)
assert_equal(ndimage.label(mask)[1], connected_components(graph)[0])
def _downsampled_lena():
lena = sp.misc.lena().astype(np.float32)
lena = (lena[::2, ::2] + lena[1::2, ::2] + lena[::2, 1::2]
+ lena[1::2, 1::2])
lena = (lena[::2, ::2] + lena[1::2, ::2] + lena[::2, 1::2]
+ lena[1::2, 1::2])
lena = lena.astype(np.float)
lena /= 16.0
return lena
def _orange_lena(lena=None):
lena = _downsampled_lena() if lena is None else lena
lena_color = np.zeros(lena.shape + (3,))
lena_color[:, :, 0] = 256 - lena
lena_color[:, :, 1] = 256 - lena / 2
lena_color[:, :, 2] = 256 - lena / 4
return lena_color
def _make_images(lena=None):
lena = _downsampled_lena() if lena is None else lena
# make a collection of lenas
images = np.zeros((3,) + lena.shape)
images[0] = lena
images[1] = lena + 1
images[2] = lena + 2
return images
downsampled_lena = _downsampled_lena()
orange_lena = _orange_lena(downsampled_lena)
lena_collection = _make_images(downsampled_lena)
def test_extract_patches_all():
lena = downsampled_lena
i_h, i_w = lena.shape
p_h, p_w = 16, 16
expected_n_patches = (i_h - p_h + 1) * (i_w - p_w + 1)
patches = extract_patches_2d(lena, (p_h, p_w))
assert_equal(patches.shape, (expected_n_patches, p_h, p_w))
def test_extract_patches_all_color():
lena = orange_lena
i_h, i_w = lena.shape[:2]
p_h, p_w = 16, 16
expected_n_patches = (i_h - p_h + 1) * (i_w - p_w + 1)
patches = extract_patches_2d(lena, (p_h, p_w))
assert_equal(patches.shape, (expected_n_patches, p_h, p_w, 3))
def test_extract_patches_all_rect():
lena = downsampled_lena
lena = lena[:, 32:97]
i_h, i_w = lena.shape
p_h, p_w = 16, 12
expected_n_patches = (i_h - p_h + 1) * (i_w - p_w + 1)
patches = extract_patches_2d(lena, (p_h, p_w))
assert_equal(patches.shape, (expected_n_patches, p_h, p_w))
def test_extract_patches_max_patches():
lena = downsampled_lena
i_h, i_w = lena.shape
p_h, p_w = 16, 16
patches = extract_patches_2d(lena, (p_h, p_w), max_patches=100)
assert_equal(patches.shape, (100, p_h, p_w))
expected_n_patches = int(0.5 * (i_h - p_h + 1) * (i_w - p_w + 1))
patches = extract_patches_2d(lena, (p_h, p_w), max_patches=0.5)
assert_equal(patches.shape, (expected_n_patches, p_h, p_w))
assert_raises(ValueError, extract_patches_2d, lena, (p_h, p_w),
max_patches=2.0)
assert_raises(ValueError, extract_patches_2d, lena, (p_h, p_w),
max_patches=-1.0)
def test_reconstruct_patches_perfect():
lena = downsampled_lena
p_h, p_w = 16, 16
patches = extract_patches_2d(lena, (p_h, p_w))
lena_reconstructed = reconstruct_from_patches_2d(patches, lena.shape)
np.testing.assert_array_equal(lena, lena_reconstructed)
def test_reconstruct_patches_perfect_color():
lena = orange_lena
p_h, p_w = 16, 16
patches = extract_patches_2d(lena, (p_h, p_w))
lena_reconstructed = reconstruct_from_patches_2d(patches, lena.shape)
np.testing.assert_array_equal(lena, lena_reconstructed)
def test_patch_extractor_fit():
lenas = lena_collection
extr = PatchExtractor(patch_size=(8, 8), max_patches=100, random_state=0)
assert_true(extr == extr.fit(lenas))
def test_patch_extractor_max_patches():
lenas = lena_collection
i_h, i_w = lenas.shape[1:3]
p_h, p_w = 8, 8
max_patches = 100
expected_n_patches = len(lenas) * max_patches
extr = PatchExtractor(patch_size=(p_h, p_w), max_patches=max_patches,
random_state=0)
patches = extr.transform(lenas)
assert_true(patches.shape == (expected_n_patches, p_h, p_w))
max_patches = 0.5
expected_n_patches = len(lenas) * int((i_h - p_h + 1) * (i_w - p_w + 1)
* max_patches)
extr = PatchExtractor(patch_size=(p_h, p_w), max_patches=max_patches,
random_state=0)
patches = extr.transform(lenas)
assert_true(patches.shape == (expected_n_patches, p_h, p_w))
def test_patch_extractor_max_patches_default():
lenas = lena_collection
extr = PatchExtractor(max_patches=100, random_state=0)
patches = extr.transform(lenas)
assert_equal(patches.shape, (len(lenas) * 100, 12, 12))
def test_patch_extractor_all_patches():
lenas = lena_collection
i_h, i_w = lenas.shape[1:3]
p_h, p_w = 8, 8
expected_n_patches = len(lenas) * (i_h - p_h + 1) * (i_w - p_w + 1)
extr = PatchExtractor(patch_size=(p_h, p_w), random_state=0)
patches = extr.transform(lenas)
assert_true(patches.shape == (expected_n_patches, p_h, p_w))
def test_patch_extractor_color():
lenas = _make_images(orange_lena)
i_h, i_w = lenas.shape[1:3]
p_h, p_w = 8, 8
expected_n_patches = len(lenas) * (i_h - p_h + 1) * (i_w - p_w + 1)
extr = PatchExtractor(patch_size=(p_h, p_w), random_state=0)
patches = extr.transform(lenas)
assert_true(patches.shape == (expected_n_patches, p_h, p_w, 3))
def test_extract_patches_strided():
image_shapes_1D = [(10,), (10,), (11,), (10,)]
patch_sizes_1D = [(1,), (2,), (3,), (8,)]
patch_steps_1D = [(1,), (1,), (4,), (2,)]
expected_views_1D = [(10,), (9,), (3,), (2,)]
last_patch_1D = [(10,), (8,), (8,), (2,)]
image_shapes_2D = [(10, 20), (10, 20), (10, 20), (11, 20)]
patch_sizes_2D = [(2, 2), (10, 10), (10, 11), (6, 6)]
patch_steps_2D = [(5, 5), (3, 10), (3, 4), (4, 2)]
expected_views_2D = [(2, 4), (1, 2), (1, 3), (2, 8)]
last_patch_2D = [(5, 15), (0, 10), (0, 8), (4, 14)]
image_shapes_3D = [(5, 4, 3), (3, 3, 3), (7, 8, 9), (7, 8, 9)]
patch_sizes_3D = [(2, 2, 3), (2, 2, 2), (1, 7, 3), (1, 3, 3)]
patch_steps_3D = [(1, 2, 10), (1, 1, 1), (2, 1, 3), (3, 3, 4)]
expected_views_3D = [(4, 2, 1), (2, 2, 2), (4, 2, 3), (3, 2, 2)]
last_patch_3D = [(3, 2, 0), (1, 1, 1), (6, 1, 6), (6, 3, 4)]
image_shapes = image_shapes_1D + image_shapes_2D + image_shapes_3D
patch_sizes = patch_sizes_1D + patch_sizes_2D + patch_sizes_3D
patch_steps = patch_steps_1D + patch_steps_2D + patch_steps_3D
expected_views = expected_views_1D + expected_views_2D + expected_views_3D
last_patches = last_patch_1D + last_patch_2D + last_patch_3D
for (image_shape, patch_size, patch_step, expected_view,
last_patch) in zip(image_shapes, patch_sizes, patch_steps,
expected_views, last_patches):
image = np.arange(np.prod(image_shape)).reshape(image_shape)
patches = extract_patches(image, patch_shape=patch_size,
extraction_step=patch_step)
ndim = len(image_shape)
assert_true(patches.shape[:ndim] == expected_view)
last_patch_slices = [slice(i, i + j, None) for i, j in
zip(last_patch, patch_size)]
assert_true((patches[[slice(-1, None, None)] * ndim] ==
image[last_patch_slices].squeeze()).all())
def test_extract_patches_square():
# test same patch size for all dimensions
lena = downsampled_lena
i_h, i_w = lena.shape
p = 8
expected_n_patches = ((i_h - p + 1), (i_w - p + 1))
patches = extract_patches(lena, patch_shape=p)
assert_true(patches.shape == (expected_n_patches[0], expected_n_patches[1],
p, p))
def test_width_patch():
# width and height of the patch should be less than the image
x = np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]])
assert_raises(ValueError, extract_patches_2d, x, (4, 1))
assert_raises(ValueError, extract_patches_2d, x, (1, 4))
|
bsd-3-clause
|
MO-cowbell/open-data-science
|
inventory_forecast/inventory_forecast.py
|
3
|
4914
|
#!/usr/bin/python
__author__ = 'Thomas Vo, DonorsChoose.org'
# sample forecasting script for blog post using both Python and R
import dataset
import numpy as np
import pandas as pd
import rpy2.robjects as ro
from math import factorial
# smoothing function (source = http://wiki.scipy.org/Cookbook/SavitzkyGolay)
def smoothing(y, window_size, order, deriv = 0, rate = 1):
order_range = range(order + 1)
half_window = (window_size -1) // 2
b = np.mat([[k ** i for i in order_range] for k in range(-half_window, half_window + 1)])
m = np.linalg.pinv(b).A[deriv] * rate ** deriv * factorial(deriv)
firstvals = y[0] - np.abs( y[1:half_window + 1][::-1] - y[0] )
lastvals = y[-1] + np.abs(y[-half_window - 1:-1][::-1] - y[-1])
y = np.concatenate((firstvals, y, lastvals))
return np.convolve(m[::-1], y, mode = 'valid')
# connects to DB, runs SQL query, returns dataframe
def download_data(api_url, query_filename, colname_date, colname_value):
# load the query
with open(query_filename, 'r') as query_file:
sql_query = query_file.read()
# connect to the database
db_connect = dataset.connect(url = api_url, reflectMetadata = False)
# run the query
query_results = db_connect.query(sql_query)
# load query results into dataframe
new_df = pd.DataFrame()
for row in query_results:
new_df = new_df.append({
colname_date: row[colname_date],
colname_value: row[colname_value]
}, ignore_index = True)
return new_df
# didn't want to deal with February 29ths
def drop_leap(df, colname_date):
leap_indices = []
for i in range(df.shape[0]):
if (df.ix[i, colname_date].month == 2) & (df.ix[i, colname_date].day == 29):
leap_indices.append(i)
df = df.drop(df.index[leap_indices])
df.reset_index(drop = True, inplace = True)
return df
# provide a df with date and value column to forecast
def forecast(df, r_filename, r_function, colname_date, colname_value, years_to_forecast = 1):
# only predict in increments of years, simplifies things
days_to_forecast = years_to_forecast * 365
# load the R script
with open(r_filename, 'r') as r_file:
r_script = r_file.read()
# sending an R function into Python
ro.r(r_script)
r_function = ro.globalenv[r_function]
# running the R function inside of Python, can only interpret lists
vec = r_function(list(df[colname_value]), log_vec = True, forecast_units = days_to_forecast)
# smooth the vector
vec = smoothing(y = np.array(vec), window_size = 51, order = 3)
# only keep the predicted values
vec = vec[::-1][:days_to_forecast][::-1]
# add new dates and values
for i in range(years_to_forecast):
# make new_df with 365 days into the future
new_df = df[(df.shape[0] - 365):].copy()
new_df.reset_index(drop = True, inplace = True)
new_df.loc[:, colname_date] = pd.DatetimeIndex(new_df.loc[:, colname_date]) + pd.DateOffset(years = 1)
new_df.loc[:, colname_value] = vec[((i) * 365):((i + 1) * 365)]
# merge new_df back to df
df = pd.concat([df, new_df])
df.reset_index(drop = True, inplace = True)
return df
def upload_data(api_url, df, tablename, colname_date, colname_value):
# connect to the database
db_connect = dataset.connect(url = api_url, reflectMetadata = False)
# assuming the user has write access, remove the entries uploaded from the previous run
db_connect.query('DELETE FROM ' + tablename + ';')
# insert rows
table = db_connect.load_table(tablename)
rows = [{colname_date: c1, colname_value: c2} for c1, c2 in zip(df[colname_date], df[colname_value])]
table.insert_many(rows)
if __name__ == '__main__':
# parameters that need to be specified
your_api_url = 'your_username:your_password.your_instance_name.redshift.amazonaws.com'
your_query_filename = 'inventory_query.sql'
your_tablename = 'inventory_forecast'
your_colname_date = 'date_of_interest'
your_colname_value = 'project_count'
your_r_filename = 'forecast.r'
your_r_function = 'forecast_vec'
temp_df = download_data(
api_url = your_api_url,
query_filename = your_query_filename,
colname_date = your_colname_date,
colname_value = your_colname_value)
temp_df = drop_leap(
df = temp_df,
colname_date = your_colname_date)
temp_df = forecast(
df = temp_df,
r_filename = your_r_filename,
r_function = your_r_function,
colname_date = your_colname_date,
colname_value = your_colname_value,
years_to_forecast = 1)
upload_data(
api_url = your_api_url,
df = temp_df,
tablename = your_tablename,
colname_date = your_colname_date,
colname_value = your_colname_value)
|
gpl-2.0
|
huobaowangxi/scikit-learn
|
sklearn/svm/tests/test_bounds.py
|
280
|
2541
|
import nose
from nose.tools import assert_equal, assert_true
from sklearn.utils.testing import clean_warning_registry
import warnings
import numpy as np
from scipy import sparse as sp
from sklearn.svm.bounds import l1_min_c
from sklearn.svm import LinearSVC
from sklearn.linear_model.logistic import LogisticRegression
dense_X = [[-1, 0], [0, 1], [1, 1], [1, 1]]
sparse_X = sp.csr_matrix(dense_X)
Y1 = [0, 1, 1, 1]
Y2 = [2, 1, 0, 0]
def test_l1_min_c():
losses = ['squared_hinge', 'log']
Xs = {'sparse': sparse_X, 'dense': dense_X}
Ys = {'two-classes': Y1, 'multi-class': Y2}
intercepts = {'no-intercept': {'fit_intercept': False},
'fit-intercept': {'fit_intercept': True,
'intercept_scaling': 10}}
for loss in losses:
for X_label, X in Xs.items():
for Y_label, Y in Ys.items():
for intercept_label, intercept_params in intercepts.items():
check = lambda: check_l1_min_c(X, Y, loss,
**intercept_params)
check.description = ('Test l1_min_c loss=%r %s %s %s' %
(loss, X_label, Y_label,
intercept_label))
yield check
def test_l2_deprecation():
clean_warning_registry()
with warnings.catch_warnings(record=True) as w:
assert_equal(l1_min_c(dense_X, Y1, "l2"),
l1_min_c(dense_X, Y1, "squared_hinge"))
assert_equal(w[0].category, DeprecationWarning)
def check_l1_min_c(X, y, loss, fit_intercept=True, intercept_scaling=None):
min_c = l1_min_c(X, y, loss, fit_intercept, intercept_scaling)
clf = {
'log': LogisticRegression(penalty='l1'),
'squared_hinge': LinearSVC(loss='squared_hinge',
penalty='l1', dual=False),
}[loss]
clf.fit_intercept = fit_intercept
clf.intercept_scaling = intercept_scaling
clf.C = min_c
clf.fit(X, y)
assert_true((np.asarray(clf.coef_) == 0).all())
assert_true((np.asarray(clf.intercept_) == 0).all())
clf.C = min_c * 1.01
clf.fit(X, y)
assert_true((np.asarray(clf.coef_) != 0).any() or
(np.asarray(clf.intercept_) != 0).any())
@nose.tools.raises(ValueError)
def test_ill_posed_min_c():
X = [[0, 0], [0, 0]]
y = [0, 1]
l1_min_c(X, y)
@nose.tools.raises(ValueError)
def test_unsupported_loss():
l1_min_c(dense_X, Y1, 'l1')
|
bsd-3-clause
|
hyqneuron/dsc_gan
|
dsc_gan5.py
|
1
|
34322
|
import tensorflow as tf
import numpy as np
from tensorflow.contrib import layers
import scipy.io as sio
from scipy.sparse.linalg import svds
# from skcuda.linalg import svd as svd_cuda
# import pycuda.gpuarray as gpuarray
# from pycuda.tools import DeviceMemoryPool
from sklearn import cluster
from sklearn.preprocessing import normalize
from munkres import Munkres
import os
import time
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('name') # name of experiment, used for creating log directory
parser.add_argument('--lambda1', type=float, default=1.0)
parser.add_argument('--lambda2', type=float, default=0.2) # sparsity cost on C
parser.add_argument('--lambda3', type=float, default=1.0) # lambda on gan loss
parser.add_argument('--lambda4', type=float, default=0.1) # lambda on AE L2 regularization
parser.add_argument('--lr', type=float, default=1e-3) # learning rate
parser.add_argument('--lr2', type=float, default=2e-4) # learning rate for discriminator and eqn3plus
parser.add_argument('--pretrain', type=int, default=0) # number of iterations of pretraining
parser.add_argument('--epochs', type=int, default=1000) # number of epochs to train on eqn3 and eqn3plus
parser.add_argument('--enable-at', type=int, default=300) # epoch at which to enable eqn3plus
parser.add_argument('--dataset', type=str, default='yaleb', choices=['yaleb', 'orl', 'coil20', 'coil100'])
parser.add_argument('--interval', type=int, default=50)
parser.add_argument('--interval2', type=int, default=1)
parser.add_argument('--bound', type=float, default=0.02) # discriminator weight clipping limit
parser.add_argument('--D-init', type=int, default=100) # number of discriminators steps before eqn3plus starts
parser.add_argument('--D-steps', type=int, default=1)
parser.add_argument('--G-steps', type=int, default=1)
parser.add_argument('--save', action='store_true') # save pretrained model
parser.add_argument('--r', type=int, default=0) # Nxr rxN, use 0 to default to NxN Coef
## new parameters
parser.add_argument('--rank', type=int, default=10) # dimension of the subspaces
parser.add_argument('--beta1', type=float, default=0.00) # promote subspaces' difference
parser.add_argument('--beta2', type=float, default=0.010) # promote org of subspaces' basis difference
parser.add_argument('--beta3', type=float, default=0.010) # promote org of subspaces' basis difference
parser.add_argument('--stop-real', action='store_true') # cut z_real path
parser.add_argument('--stationary', type=int, default=1) # update z_real every so generator epochs
parser.add_argument('--submean', action='store_true')
parser.add_argument('--proj-cluster', action='store_true')
parser.add_argument('--usebn', action='store_true')
parser.add_argument('--no-uni-norm', action='store_true')
parser.add_argument('--one2one', action='store_true')
parser.add_argument('--alpha', type=float, default=0.1)
parser.add_argument('--matfile', default=None)
parser.add_argument('--imgmult', type=float, default=1.0)
parser.add_argument('--palpha', type=float, default=None)
parser.add_argument('--kernel-size', type=int, nargs='+', default=None)
parser.add_argument('--m', type=float, default=None)
"""
Example launch commands:
CUDA_VISIBLE_DEVICES=0 python dsc_gan.py yaleb_run1 --pretrain 60000 --epochs 4000 --enable-at 3000 --dataset yaleb
pretrain for 60000 iterations first, then train on eqn3 for 3000 epochs, and on eqn3plus for 1000 epochs
CUDA_VISIBLE_DEVICES=0 python dsc_gan.py orl_run1 --pretrain 10000 --epochs 4000 --enable-at 2000 --dataset orl
pretrain for 10000 iterations first, then train on eqn3 for 2000 epochs, and on eqn3plus for 2000 epochs
"""
class ConvAE(object):
def __init__(self,
args,
n_input, n_hidden, kernel_size, n_class, n_sample_perclass, disc_size,
lambda1, lambda2, lambda3, batch_size, r=0, rank=10,
reg=None, disc_bound=0.02,
model_path=None, restore_path=None,
logs_path='logs'):
self.args = args
self.n_class = n_class
self.n_input = n_input
self.n_hidden = n_hidden
self.kernel_size = kernel_size
self.n_sample_perclass = n_sample_perclass
self.disc_size = disc_size
self.batch_size = batch_size
self.reg = reg
self.model_path = model_path
self.restore_path = restore_path
self.rank = rank
self.iter = 0
"""
Eqn3
"""
# input required to be fed
self.x = tf.placeholder(tf.float32, [None, n_input[0], n_input[1], 1])
self.learning_rate = tf.placeholder(tf.float32, [])
# run input through encoder, latent is the output, shape is the shape of encoder
latent, shape = self.encoder(self.x)
self.latent_shape = latent.shape
self.latent_size = reduce(lambda x, y: int(x) * int(y), self.latent_shape[1:], 1)
# self-expressive layer
z = tf.reshape(latent, [batch_size, -1])
z.set_shape([batch_size, self.latent_size])
if args.usebn:
z = tf.contrib.layers.batch_norm(z)
if r == 0:
Coef = tf.Variable(1.0e-4 * tf.ones([self.batch_size, self.batch_size], tf.float32), name='Coef')
else:
v = (1e-2) / r
L = tf.Variable(v * tf.ones([self.batch_size, r]), name='Coef_L')
R = tf.Variable(v * tf.ones([r, self.batch_size]), name='Coef_R')
Coef = tf.matmul(L, R, name='Coef_full')
z_c = tf.matmul(Coef, z, name='matmul_Cz')
self.Coef = Coef
Coef_weights = [v for v in tf.trainable_variables() if v.name.startswith('Coef')]
latent_c = tf.reshape(z_c, tf.shape(latent)) # petential problem here
self.z = z
# run self-expressive's output through decoder
self.x_r = self.decoder(latent_c, shape)
ae_weights = [v for v in tf.trainable_variables() if (v.name.startswith('enc') or v.name.startswith('dec'))]
self.ae_weight_norm = tf.sqrt(sum([tf.norm(v, 2) ** 2 for v in ae_weights]))
eqn3_weights = Coef_weights + ae_weights
# AE regularization loss
self.loss_aereg = tf.get_collection(tf.GraphKeys.REGULARIZATION_LOSSES) # weight decay
# Eqn 3 loss
self.loss_recon = 0.5 * tf.reduce_sum(tf.pow(tf.subtract(self.x_r, self.x), 2.0))
self.loss_sparsity = tf.reduce_sum(tf.pow(self.Coef, 2.0))
self.loss_selfexpress = 0.5 * tf.reduce_sum(tf.pow(tf.subtract(z_c, z), 2.0))
self.loss_eqn3 = self.loss_recon + lambda1 * self.loss_sparsity + lambda2 * self.loss_selfexpress + self.loss_aereg
with tf.variable_scope('optimizer_eqn3'):
self.optimizer_eqn3 = tf.train.AdamOptimizer(learning_rate=self.learning_rate).minimize(self.loss_eqn3,
var_list=eqn3_weights)
"""
Pretraining
"""
# pretraining loss
self.x_r_pre = self.decoder(latent, shape, reuse=True)
self.loss_recon_pre = 0.5 * tf.reduce_sum(tf.pow(tf.subtract(self.x_r_pre, self.x), 2.0))
self.loss_pretrain = self.loss_recon_pre + self.loss_aereg
with tf.variable_scope('optimizer_pre'):
self.optimizer_pre = tf.train.AdamOptimizer(learning_rate=self.learning_rate).minimize(self.loss_pretrain,
var_list=ae_weights)
"""
Discriminator
"""
# step counting
self.gen_step = tf.Variable(0, dtype=tf.float32, trainable=False) # keep track of number of generator steps
self.gen_step_op = self.gen_step.assign(self.gen_step + 1) # increment generator steps
self.y_x = tf.placeholder(tf.int32, [batch_size])
### write by myself
print 'building discriminator'
self.Us = self.make_Us()
u_primes = self.svd_initialization(self.z, self.y_x)
self.u_ini = [tf.assign(u, u_prime) for u, u_prime in zip(self.Us, u_primes)]
z_real = self.z
self.score_disc, self.Us_update_op = self.compute_disc_loss(z_real, self.y_x)
print 'adding disc regularization'
regulariz1 = self.regularization1(reuse=True)
regulariz2 = self.regularization2(reuse=True)
self.loss_disc = args.beta2 * regulariz1 + args.beta3 * regulariz2 - self.score_disc
print 'building disc optimizers'
with tf.variable_scope('optimizer_disc'):
self.optimizer_disc = tf.train.AdamOptimizer(self.learning_rate, beta1=0.0).minimize(self.loss_disc, var_list=self.Us)
print 'building eqn3plus optimizers'
# Eqn 3 + generator loss
self.loss_eqn3plus = self.loss_eqn3 + lambda3 * self.score_disc
with tf.variable_scope('optimizer_eqn3plus'):
self.optimizer_eqn3plus = tf.train.AdamOptimizer(learning_rate=self.learning_rate).minimize(
self.loss_eqn3plus, var_list=eqn3_weights)
# finalize stuffs
s0 = tf.summary.scalar("loss_recon_pre", self.loss_recon_pre / batch_size) # 13372
s1 = tf.summary.scalar("loss_recon", self.loss_recon)
s2 = tf.summary.scalar("loss_sparsity", self.loss_sparsity)
s3 = tf.summary.scalar("loss_selfexpress", self.loss_selfexpress)
s4 = tf.summary.scalar("score_disc", self.score_disc)
s5 = tf.summary.scalar("ae_l2_norm", self.ae_weight_norm) # 29.8
s6 = tf.summary.scalar("disc_real", self.disc_score_real)
s7 = tf.summary.scalar("disc_fake", self.disc_score_fake)
self.summaryop_eqn3 = tf.summary.merge([s1, s2, s3, s5])
self.summaryop_eqn3plus = tf.summary.merge([s1, s2, s3, s4, s5, s6, s7])
self.summaryop_pretrain = tf.summary.merge([s0, s5])
self.init = tf.global_variables_initializer()
config = tf.ConfigProto()
# config.gpu_options.allow_growth = True # stop TF from eating up all GPU RAM
# config.gpu_options.per_process_gpu_memory_fraction = 0.4
self.sess = tf.InteractiveSession(config=config)
self.sess.run(self.init)
self.saver = tf.train.Saver([v for v in ae_weights if v.name.startswith('enc_w') or v.name.startswith('dec_w')])
self.summary_writer = tf.summary.FileWriter(logs_path, graph=tf.get_default_graph(), flush_secs=20)
# Building the encoder
def encoder(self, x):
shapes = []
n_hidden = [1] + self.n_hidden
input = x
for i, k_size in enumerate(self.kernel_size):
w = tf.get_variable('enc_w{}'.format(i), shape=[k_size, k_size, n_hidden[i], n_hidden[i + 1]],
initializer=layers.xavier_initializer_conv2d(), regularizer=self.reg)
b = tf.get_variable('enc_b{}'.format(i), shape=[n_hidden[i + 1]], initializer=tf.zeros_initializer())
shapes.append(input.get_shape().as_list())
enc_i = tf.nn.conv2d(input, w, strides=[1, 2, 2, 1], padding='SAME')
enc_i = tf.nn.bias_add(enc_i, b)
enc_i = tf.nn.relu(enc_i)
input = enc_i
return input, shapes
# Building the decoder
def decoder(self, z, shapes, reuse=False):
# Encoder Hidden layer with sigmoid activation #1
input = z
n_hidden = list(reversed([1] + self.n_hidden))
shapes = list(reversed(shapes))
for i, k_size in enumerate(reversed(kernel_size)):
with tf.variable_scope('', reuse=reuse):
w = tf.get_variable('dec_w{}'.format(i), shape=[k_size, k_size, n_hidden[i + 1], n_hidden[i]],
initializer=layers.xavier_initializer_conv2d(), regularizer=self.reg)
b = tf.get_variable('dec_b{}'.format(i), shape=[n_hidden[i + 1]], initializer=tf.zeros_initializer())
dec_i = tf.nn.conv2d_transpose(input, w, tf.stack(
[tf.shape(self.x)[0], shapes[i][1], shapes[i][2], shapes[i][3]]),
strides=[1, 2, 2, 1], padding='SAME')
dec_i = tf.add(dec_i, b)
if i != len(self.n_hidden) - 1:
dec_i = tf.nn.relu(dec_i)
input = dec_i
return input
def get_u_init_for_g(self, g):
N_g = tf.shape(g)[0] # number of datapoints in this cluster
gt = tf.transpose(g)
q, r = tf.qr(gt, full_matrices=False)
idx = [j for j in xrange(args.rank)]
qq = tf.gather(tf.transpose(q), idx)
qq = tf.transpose(qq)
return qq
def svd_initialization(self, z, y):
group_index = [tf.where(tf.equal(y, k)) for k in xrange(self.n_class)] # indices of datapoints in k-th cluster
groups = [tf.gather(z, group_index[k]) for k in xrange(self.n_class)] # datapoints in k-th cluster
# remove extra dimension
groups = [tf.squeeze(g, axis=1) for g in groups]
# subtract mean
if self.args.submean:
groups = [g - tf.reduce_mean(g, 0, keep_dims=True) for g in groups]
dim1 = tf.shape(z)[1]
u_prime = [self.get_u_init_for_g(g) for g in groups]
return u_prime
def uniform_recombine(self, g):
N_g = tf.shape(g)[0]
selector = tf.random_uniform([N_g, N_g]) # make random selector matrix
if not self.args.no_uni_norm:
selector = selector / tf.reduce_sum(selector, 1, keep_dims=True) # normalize each row to 1
g_fake = tf.matmul(selector, g, name='matmul_selectfake')
return g_fake
def make_Us(self):
Us = []
for j in xrange(self.n_class):
u = tf.get_variable('disc_w{}'.format(j), shape=[self.latent_size, self.rank],
initializer=layers.xavier_initializer())
Us.append(u)
return Us
def match_idx(self, g):
"""
for the group g, identify the Ui whose residual is minimal, then return
label, loss, sreal, u
where label=i, loss=residual_real - residual_fake, sreal=residual_real, u=Ui
"""
N_g = tf.shape(g)[0]
g_fake = self.uniform_recombine(g)
combined_sreal = []
Us = []
for i in xrange(self.n_class):
u = self.Us[i]
u = tf.nn.l2_normalize(u, dim=0)
uT = tf.transpose(u)
s_real = tf.reduce_sum((g - tf.matmul(tf.matmul(g, u), uT)) ** 2) / tf.to_float(N_g)
combined_sreal.append(s_real)
Us.append(u)
combined_sreal = tf.convert_to_tensor(combined_sreal)
Us = tf.convert_to_tensor(Us)
label = tf.cast(tf.arg_min(combined_sreal, dimension=0), tf.int32)
sreal = combined_sreal[label]
u = Us[label]
# returns label, and corresponding s_real and u
return label, sreal, u
def compute_disc_loss(self, z, y):
group_index = [tf.where(tf.equal(y, k)) for k in xrange(self.n_class)] # indices of datapoints in k-th cluster
groups = [tf.gather(z, group_index[k]) for k in xrange(self.n_class)] # datapoints in k-th cluster
# remove extra dimension
groups = [tf.squeeze(g, axis=1) for g in groups]
# subtract mean
if self.args.submean:
groups = [g - tf.reduce_mean(g, 0, keep_dims=True) for g in groups]
dim1 = tf.shape(z)[1]
# for each group, find its Ui
group_all = [self.match_idx(g) for g in groups]
group_label, group_sreal, group_u = zip(*group_all)
# covnert some of them to tensor to make tf.where and tf.gather doable
group_label = tf.convert_to_tensor(group_label)
group_sreal = tf.convert_to_tensor(group_sreal)
group_new_loss = []
group_loss_real = []
group_loss_fake = []
Us_assign_ops = []
# identify the ones that are assigned to Ui but aren't the cluster with minimum residual, and do
# reinitialization on them
for i, g in enumerate(groups):
N_g = tf.shape(g)[0]
label = group_label[i]
sreal = group_sreal[i]
u = group_u[i]
u = tf.nn.l2_normalize(u, dim=0)
if self.args.one2one:
# indices of groups, whose label are the same as current one
idxs_with_label = tf.where(tf.equal(group_label, label))
# sreal of those corresponding groups
sreal_with_label = tf.squeeze(tf.gather(group_sreal, idxs_with_label), 1)
# among all those groups with the same label, whether current group has minimal sreal
ismin = tf.equal(sreal, tf.reduce_min(sreal_with_label))
# if it's the minimum, just use, otherwise reinit u
uu = tf.assign(self.Us[i], tf.cond(ismin, lambda: u, lambda: self.get_u_init_for_g(g)))
u = tf.nn.l2_normalize(uu, dim=0)
Us_assign_ops.append(uu)
# recompute loss
g = g / tf.norm(g, axis=1, keep_dims=True)
g_fake = self.uniform_recombine(g)
loss_real = tf.reduce_sum((g - tf.matmul(tf.matmul(g, u), tf.transpose(u))) ** 2) / tf.to_float(N_g)
loss_fake = tf.reduce_sum((g_fake - tf.matmul(tf.matmul(g_fake, u), tf.transpose(u))) ** 2, axis=1)
if self.args.m:
loss_fake = self.args.m - loss_fake
loss_fake = -tf.nn.relu(loss_fake)
loss_fake = tf.reduce_sum(loss_fake) / tf.to_float(N_g)
if self.args.stop_real:
loss_real = tf.stop_gradient(loss_real)
loss = loss_real - loss_fake
# add to list
group_new_loss.append(loss)
group_loss_real.append(loss_real)
group_loss_fake.append(loss_fake)
self.disc_score_real = tf.reduce_mean(group_loss_real)
self.disc_score_fake = tf.reduce_mean(group_loss_fake)
return -tf.reduce_mean(group_new_loss), tf.group(*Us_assign_ops)
def regularization1(self, reuse=False):
combined = []
for i in xrange(self.n_class):
ui = self.Us[i]
uiT = tf.transpose(ui)
temp_sum = []
for j in xrange(self.n_class):
if j == i:
continue
uj = self.Us[j]
s = tf.reduce_sum((tf.matmul(uiT, uj)) ** 2)
temp_sum.append(s)
combined.append(tf.add_n(temp_sum))
return tf.add_n(combined) / self.n_class
def regularization2(self, reuse=False):
combined = []
for i in xrange(self.n_class):
ui = self.Us[i]
uiT = tf.transpose(ui)
s = tf.reduce_sum((tf.matmul(uiT, ui) - tf.eye(self.rank)) ** 2)
combined.append(s)
return tf.add_n(combined) / self.n_class
def partial_fit_eqn3(self, X, lr):
# take a step on Eqn 3/4
cost, Coef, summary, _ = self.sess.run((self.loss_recon, self.Coef, self.summaryop_eqn3, self.optimizer_eqn3),
feed_dict={self.x: X, self.learning_rate: lr})
self.summary_writer.add_summary(summary, self.iter)
self.iter += 1
return cost, Coef
def assign_u_parameter(self, X, y):
self.sess.run(self.u_ini, feed_dict={self.x: X, self.y_x: y})
def partial_fit_disc(self, X, y_x, lr):
self.sess.run([self.optimizer_disc, self.Us_update_op], feed_dict={self.x: X, self.y_x: y_x, self.learning_rate: lr})
def partial_fit_eqn3plus(self, X, y_x, lr):
# assert y_x.min() == 0, 'y_x is 0-based'
cost, Coef, summary, _, _ = self.sess.run(
[self.loss_recon, self.Coef, self.summaryop_eqn3plus, self.optimizer_eqn3plus, self.gen_step_op],
feed_dict={self.x: X, self.y_x: y_x, self.learning_rate: lr})
self.summary_writer.add_summary(summary, self.iter)
self.iter += 1
return cost, Coef
def partial_fit_pretrain(self, X, lr):
cost, summary, _ = self.sess.run([self.loss_recon_pre, self.summaryop_pretrain, self.optimizer_pre],
feed_dict={self.x: X, self.learning_rate: lr})
self.summary_writer.add_summary(summary, self.iter)
self.iter += 1
return cost
def get_ae_weight_norm(self):
norm, = self.sess.run([self.ae_weight_norm])
return norm
def get_loss_recon_pre(self, X):
loss_recon_pre, = self.sess.run([self.loss_recon_pre], feed_dict={self.x: X})
return loss_recon_pre
def get_projection_y_x(self, X):
disc_weights = self.sess.run(self.disc_weights)
z_real = self.sess.run(self.z_real_submean, feed_dict={self.x: X})
residuals = []
for Ui in disc_weights:
proj = np.matmul(z_real, Ui)
recon = np.matmul(proj, Ui.transpose())
residual = ((z_real - recon) ** 2).sum(axis=1)
residuals.append(residual)
residuals = np.stack(residuals, axis=1) # Nxn_class
y_x = residuals.argmin(1)
return y_x
def log_accuracy(self, accuracy):
summary = tf.Summary(value=[tf.Summary.Value(tag='accuracy', simple_value=accuracy)])
self.summary_writer.add_summary(summary, self.iter)
def initlization(self):
self.sess.run(self.init)
def reconstruct(self, X):
return self.sess.run(self.x_r, feed_dict={self.x: X})
def transform(self, X):
return self.sess.run(self.z, feed_dict={self.x: X})
def save_model(self):
save_path = self.saver.save(self.sess, self.model_path)
print("model saved in file: %s" % save_path)
def restore(self):
self.saver.restore(self.sess, self.restore_path)
print("model restored")
def check_size(self, X):
z = self.sess.run(self.z, feed_dict={self.x: X})
return z
def best_map(L1, L2):
# L1 should be the groundtruth labels and L2 should be the clustering labels we got
Label1 = np.unique(L1)
nClass1 = len(Label1)
Label2 = np.unique(L2)
nClass2 = len(Label2)
nClass = np.maximum(nClass1, nClass2)
G = np.zeros((nClass, nClass))
for i in range(nClass1):
ind_cla1 = L1 == Label1[i]
ind_cla1 = ind_cla1.astype(float)
for j in range(nClass2):
ind_cla2 = L2 == Label2[j]
ind_cla2 = ind_cla2.astype(float)
G[i, j] = np.sum(ind_cla2 * ind_cla1)
m = Munkres()
index = m.compute(-G.T)
index = np.array(index)
c = index[:, 1]
newL2 = np.zeros(L2.shape)
for i in range(nClass2):
newL2[L2 == Label2[i]] = Label1[c[i]]
return newL2
def thrC(C, ro):
if ro < 1:
N = C.shape[1]
Cp = np.zeros((N, N))
S = np.abs(np.sort(-np.abs(C), axis=0))
Ind = np.argsort(-np.abs(C), axis=0)
for i in range(N):
cL1 = np.sum(S[:, i]).astype(float)
stop = False
csum = 0
t = 0
while (stop == False):
csum = csum + S[t, i]
if csum > ro * cL1:
stop = True
Cp[Ind[0:t + 1, i], i] = C[Ind[0:t + 1, i], i]
t = t + 1
else:
Cp = C
return Cp
def build_aff(C):
N = C.shape[0]
Cabs = np.abs(C)
ind = np.argsort(-Cabs, 0)
for i in range(N):
Cabs[:, i] = Cabs[:, i] / (Cabs[ind[0, i], i] + 1e-6)
Cksym = Cabs + Cabs.T;
return Cksym
def spectral_cluster(L, n, eps=2.2 * 10 - 8):
"""
L: Laplacian
n: number of clusters
Translates MATLAB code below:
N = size(L, 1)
DN = diag( 1./sqrt(sum(L)+eps) );
LapN = speye(N) - DN * L * DN;
[~,~,vN] = svd(LapN);
kerN = vN(:,N-n+1:N);
normN = sum(kerN .^2, 2) .^.5;
kerNS = bsxfun(@rdivide, kerN, normN + eps);
groups = kmeans(kerNS,n,'maxiter',MAXiter,'replicates',REPlic,'EmptyAction','singleton');
"""
N = L.shape[0]
DN = (1. / np.sqrt(L.sum(0) + eps))
LapN = np.eye(N) - DN * L * DN
def post_proC(C, K, d, alpha):
# C: coefficient matrix, K: number of clusters, d: dimension of each subspace
C = 0.5 * (C + C.T)
r = d * K + 1 # K=38, d=10
U, S, _ = svds(C, r, v0=np.ones(C.shape[0]))
# U, S, _ = svd_cuda(C, allocator=mem_pool)
# take U and S from GPU
# U = U[:, :r].get()
# S = S[:r].get()
U = U[:, ::-1]
S = np.sqrt(S[::-1])
S = np.diag(S)
U = U.dot(S)
U = normalize(U, norm='l2', axis=1)
Z = U.dot(U.T)
#Z = Z * (Z > 0)
L = np.abs(np.abs(Z) ** alpha)
L = L / L.max()
L = 0.5 * (L + L.T)
spectral = cluster.SpectralClustering(n_clusters=K, eigen_solver='arpack', affinity='precomputed',
assign_labels='discretize')
spectral.fit(L)
grp = spectral.fit_predict(L) # +1
return grp, L
def err_rate(gt_s, s):
c_x = best_map(gt_s, s)
err_x = np.sum(gt_s[:] != c_x[:])
missrate = err_x.astype(float) / (gt_s.shape[0])
return missrate
def build_laplacian(C):
C = 0.5 * (np.abs(C) + np.abs(C.T))
W = np.sum(C, axis=0)
W = np.diag(1.0 / W)
L = W.dot(C)
return L
def reinit_and_optimize(args, Img, Label, CAE, n_class, k=10, post_alpha=3.5):
alpha = args.alpha #max(0.4 - (n_class - 1) / 10 * 0.1, 0.1)
print
alpha
acc_ = []
if args.epochs is None:
num_epochs = 50 + n_class * 25 # 100+n_class*20
else:
num_epochs = args.epochs
# init
CAE.initlization()
###
### Stage 1: pretrain
###
# if we skip pretraining, we restore already-trained model
if args.pretrain == 0:
CAE.restore()
# otherwise we pretrain the model first
else:
print
'Pretrain for {} steps'.format(args.pretrain)
"""
After pretrain:
AE l2 norm : 29
Ae recon loss: 13372
"""
for epoch in xrange(1, args.pretrain + 1):
minibatch_size = 128
indices = np.random.permutation(Img.shape[0])[:minibatch_size]
minibatch = Img[indices] # pretrain with random mini-batch
cost = CAE.partial_fit_pretrain(minibatch, args.lr)
if epoch % 100 == 0:
norm = CAE.get_ae_weight_norm()
print 'pretraining epoch {}, cost: {}, norm: {}'.format(epoch, cost / float(minibatch_size), norm)
if args.save:
CAE.save_model()
###
### Stage 2: fine-tune network
###
print
'Finetune for {} steps'.format(num_epochs)
acc_x = 0.0
y_x_mode = 'svd'
for epoch in xrange(1, num_epochs + 1):
# eqn3
if epoch < args.enable_at:
cost, Coef = CAE.partial_fit_eqn3(Img, args.lr)
interval = args.interval # normal interval
# overtrain discriminator
elif epoch == args.enable_at:
print('Initialize discriminator for {} steps'.format(args.D_init))
CAE.assign_u_parameter(Img, y_x)
for i in xrange(args.D_init):
CAE.partial_fit_disc(Img, y_x, args.lr2)
if args.proj_cluster:
y_x_mode = 'projection'
# eqn3plus
else:
for i in xrange(args.D_steps):
CAE.partial_fit_disc(Img, y_x, args.lr2) # discriminator step discriminator
for i in xrange(args.G_steps):
cost, Coef = CAE.partial_fit_eqn3plus(Img, y_x, args.lr2)
interval = args.interval2 # GAN interval
# every interval epochs, perform clustering and evaluate accuracy
if epoch % interval == 0:
print("epoch: %.1d" % epoch, "cost: %.8f" % (cost / float(batch_size)))
Coef = thrC(Coef, alpha)
t_begin = time.time()
if y_x_mode == 'svd':
y_x_new, _ = post_proC(Coef, n_class, k, post_alpha)
else:
y_x_new = CAE.get_projection_y_x(Img)
if len(set(list(np.squeeze(y_x_new)))) == n_class:
y_x = y_x_new
else:
print('================================================')
print('Warning: clustering produced empty clusters')
print('================================================')
missrate_x = err_rate(Label, y_x)
t_end = time.time()
acc_x = 1 - missrate_x
print("accuracy: {}".format(acc_x))
print('post processing time: {}'.format(t_end - t_begin))
CAE.log_accuracy(acc_x)
clustered = True
mean = acc_x
median = acc_x
print("{} subjects, accuracy: {}".format(n_class, acc_x))
return (1 - mean), (1 - median)
def prepare_data_YaleB(folder):
# load face images and labels
mat = sio.loadmat(os.path.join(folder, args.matfile or 'YaleBCrop025.mat'))
img = mat['Y']
# Reorganize data a bit, put images into Img, and labels into Label
I = []
Label = []
for i in range(img.shape[2]): # i-th subject
for j in range(img.shape[1]): # j-th picture of i-th subject
temp = np.reshape(img[:, j, i], [42, 48])
Label.append(i)
I.append(temp)
I = np.array(I)
Label = np.array(Label[:])
Img = np.transpose(I, [0, 2, 1])
Img = np.expand_dims(Img[:], 3)
# constants
n_input = [48, 42]
n_hidden = [10, 20, 30]
kernel_size = [5, 3, 3]
n_sample_perclass = 64
disc_size = [200, 50, 1]
# tunable numbers
k = 10
post_alpha = 3.5
all_subjects = [38] # number of subjects to use in experiment
model_path = os.path.join(folder, 'model-102030-48x42-yaleb.ckpt')
return Img, Label, n_input, n_hidden, kernel_size, n_sample_perclass, disc_size, k, post_alpha, all_subjects, model_path
def prepare_data_orl(folder):
mat = sio.loadmat(os.path.join(folder, args.matfile or 'ORL2fea.mat'))
Label = mat['label'].reshape(400).astype(np.int32)
Img = mat['fea'].reshape(400, 32, 32, 1) * 100
# constants
n_input = [32, 32]
n_hidden = [5, 3, 3]
kernel_size = [5, 3, 3]
n_sample_perclass = 10
disc_size = [200, 50, 1]
# tunable numbers
k = 3 # svds parameter
post_alpha = 3.5 # Laplacian parameter
all_subjects = [40]
model_path = os.path.join(folder, 'model-533-32x32-orl-ckpt')
return Img, Label, n_input, n_hidden, kernel_size, n_sample_perclass, disc_size, k, post_alpha, all_subjects, model_path
def prepare_data_coil20(folder):
mat = sio.loadmat(os.path.join(folder, args.matfile or 'COIL20RRstd.mat'))
Label = mat['label'].reshape(-1).astype(np.int32) # 1440
Img = mat['fea'].reshape(-1, 32, 32, 1)
# Img = normalize_data(Img)
# constants
n_input = [32, 32]
n_hidden = [15]
kernel_size = args.kernel_size or [3]
n_sample_perclass = Img.shape[0] / 20
disc_size = [50, 1]
# tunable numbers
k = 10 # svds parameter
post_alpha = 3.5 # Laplacian parameter
all_subjects = [20]
model_path = os.path.join(folder, 'model-3-32x32-coil20-ckpt')
return Img, Label, n_input, n_hidden, kernel_size, n_sample_perclass, disc_size, k, post_alpha, all_subjects, model_path
def prepare_data_coil100(folder):
mat = sio.loadmat(os.path.join(folder, args.matfile or 'COLT100fea2fea.mat'))
Label = mat['label'].reshape(-1).astype(np.int32) # 1440
Img = mat['fea'].reshape(-1, 32, 32, 1)
# constants
n_input = [32, 32]
n_hidden = [50]
kernel_size = [5]
n_sample_perclass = Img.shape[0] / 100
disc_size = [50, 1]
# tunable numbers
k = 10 # svds parameter
post_alpha = 3.5 # Laplacian parameter
all_subjects = [100]
model_path = os.path.join(folder, 'model-5-32x32-coil100-ckpt')
return Img, Label, n_input, n_hidden, kernel_size, n_sample_perclass, disc_size, k, post_alpha, all_subjects, model_path
def normalize_data(data):
data = data - data.mean(axis=0)
data = data / data.std(axis=0)
return data
if __name__ == '__main__':
args = parser.parse_args()
assert args.name is not None and args.name != '', 'name of experiment must be specified'
# prepare data
folder = os.path.dirname(os.path.abspath(__file__))
preparation_funcs = {
'yaleb': prepare_data_YaleB,
'orl': prepare_data_orl,
'coil20': prepare_data_coil20,
'coil100': prepare_data_coil100}
assert args.dataset in preparation_funcs
Img, Label, n_input, n_hidden, kernel_size, n_sample_perclass, disc_size, k, post_alpha, all_subjects, model_path = \
preparation_funcs[args.dataset](folder)
Img = Img*args.imgmult
post_alpha = args.palpha or post_alpha
logs_path = os.path.join(folder, 'logs', args.name)
restore_path = model_path
# arrays for logging results
avg = []
med = []
# for each experiment setting, perform one loop
for n_class in all_subjects:
batch_size = n_class * n_sample_perclass
lambda1 = args.lambda1 # L2 sparsity on C
lambda2 = args.lambda2 # 0.2 # 1.0 * 10 ** (n_class / 10.0 - 3.0) # self-expressivity
lambda3 = args.lambda3 # discriminator gradient
# clear graph and build a new conv-AE
tf.reset_default_graph()
CAE = ConvAE(
args,
n_input, n_hidden, kernel_size, n_class, n_sample_perclass, disc_size,
lambda1, lambda2, lambda3, batch_size, r=args.r, rank=args.rank,
reg=tf.contrib.layers.l2_regularizer(tf.ones(1) * args.lambda4), disc_bound=args.bound,
model_path=model_path, restore_path=restore_path, logs_path=logs_path)
# perform optimization
avg_i, med_i = reinit_and_optimize(args, Img, Label, CAE, n_class, k=k, post_alpha=post_alpha)
# add result to list
avg.append(avg_i)
med.append(med_i)
# report results for all experiments
for i, n_class in enumerate(all_subjects):
print('%d subjects:' % n_class)
print('Mean: %.4f%%' % (avg[i] * 100), 'Median: %.4f%%' % (med[i] * 100))
|
mit
|
RapidApplicationDevelopment/tensorflow
|
tensorflow/examples/learn/multiple_gpu.py
|
13
|
3098
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Example of using Estimator with multiple GPUs to distribute one model.
This example only runs if you have multiple GPUs to assign to.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from sklearn import cross_validation
from sklearn import datasets
from sklearn import metrics
import tensorflow as tf
from tensorflow.contrib import layers
from tensorflow.contrib import learn
def my_model(features, target):
"""DNN with three hidden layers, and dropout of 0.1 probability.
Note: If you want to run this example with multiple GPUs, Cuda Toolkit 7.0 and
CUDNN 6.5 V2 from NVIDIA need to be installed beforehand.
Args:
features: `Tensor` of input features.
target: `Tensor` of targets.
Returns:
Tuple of predictions, loss and training op.
"""
# Convert the target to a one-hot tensor of shape (length of features, 3) and
# with a on-value of 1 for each one-hot vector of length 3.
target = tf.one_hot(target, 3, 1, 0)
# Create three fully connected layers respectively of size 10, 20, and 10 with
# each layer having a dropout probability of 0.1.
normalizer_fn = layers.dropout
normalizer_params = {'keep_prob': 0.5}
with tf.device('/gpu:1'):
features = layers.stack(features, layers.fully_connected, [10, 20, 10],
normalizer_fn=normalizer_fn,
normalizer_params=normalizer_params)
with tf.device('/gpu:2'):
# Compute logits (1 per class) and compute loss.
logits = layers.fully_connected(features, 3, activation_fn=None)
loss = tf.contrib.losses.softmax_cross_entropy(logits, target)
# Create a tensor for training op.
train_op = tf.contrib.layers.optimize_loss(
loss, tf.contrib.framework.get_global_step(), optimizer='Adagrad',
learning_rate=0.1)
return ({
'class': tf.argmax(logits, 1),
'prob': tf.nn.softmax(logits)}, loss, train_op)
def main(unused_argv):
iris = datasets.load_iris()
x_train, x_test, y_train, y_test = cross_validation.train_test_split(
iris.data, iris.target, test_size=0.2, random_state=42)
classifier = learn.Estimator(model_fn=my_model)
classifier.fit(x_train, y_train, steps=1000)
y_predicted = [
p['class'] for p in classifier.predict(x_test, as_iterable=True)]
score = metrics.accuracy_score(y_test, y_predicted)
print('Accuracy: {0:f}'.format(score))
if __name__ == '__main__':
tf.app.run()
|
apache-2.0
|
galactics/beyond
|
beyond/dates/date.py
|
2
|
16364
|
# -*- coding: utf-8 -*-
"""Date module
"""
from numpy import sin, radians
from collections import namedtuple
from datetime import datetime, timedelta, date
from ..errors import DateError, UnknownScaleError
from .eop import EopDb
from ..utils.node import Node
__all__ = ["Date", "timedelta"]
class Timescale(Node):
"""Definition of a time scale and its interactions with others"""
def __repr__(self): # pragma: no cover
return "<Scale '%s'>" % self.name
def __str__(self):
return self.name
def _scale_ut1_minus_utc(self, mjd, eop):
"""Definition of Universal Time relatively to Coordinated Universal Time"""
return eop.ut1_utc
def _scale_tai_minus_utc(self, mjd, eop):
"""Definition of International Atomic Time relatively to Coordinated Universal Time"""
return eop.tai_utc
def _scale_tt_minus_tai(self, mjd, eop):
"""Definition of Terrestrial Time relatively to International Atomic Time"""
return 32.184
def _scale_tai_minus_gps(self, mjd, eop):
"""Definition of International Atomic Time relatively to GPS time"""
return 19.0
def _scale_tdb_minus_tt(self, mjd, eop):
"""Definition of the Barycentric Dynamic Time scale relatively to Terrestrial Time"""
jd = mjd + Date.JD_MJD
jj = Date._julian_century(jd)
m = radians(357.5277233 + 35999.05034 * jj)
delta_lambda = radians(246.11 + 0.90251792 * (jd - Date.J2000))
return 0.001657 * sin(m) + 0.000022 * sin(delta_lambda)
def offset(self, mjd, new_scale, eop):
"""Compute the offset necessary in order to convert from one time-scale to another
Args:
mjd (float):
new_scale (str): Name of the desired scale
Return:
float: offset to apply in seconds
"""
delta = 0
for one, two in self.steps(new_scale):
one = one.name.lower()
two = two.name.lower()
# find the operation
oper = "_scale_{}_minus_{}".format(two, one)
# find the reverse operation
roper = "_scale_{}_minus_{}".format(one, two)
if hasattr(self, oper):
delta += getattr(self, oper)(mjd, eop)
elif hasattr(self, roper):
delta -= getattr(self, roper)(mjd, eop)
else: # pragma: no cover
raise DateError("Unknown convertion {} => {}".format(one, two))
return delta
UT1 = Timescale("UT1") # Universal Time
GPS = Timescale("GPS") # GPS Time
TDB = Timescale("TDB") # Barycentric Dynamical Time
UTC = Timescale("UTC") # Coordinated Universal Time
TAI = Timescale("TAI") # International Atomic Time
TT = Timescale("TT") # Terrestrial Time
GPS + TAI + UTC + UT1
TDB + TT + TAI
_cache = {"UT1": UT1, "GPS": GPS, "TDB": TDB, "UTC": UTC, "TAI": TAI, "TT": TT}
def get_scale(name):
if name in _cache.keys():
return _cache[name]
else:
raise UnknownScaleError(name)
class Date:
"""Date object
All computations and in-memory saving are made in
`MJD <https://en.wikipedia.org/wiki/Julian_day>`__ and
`TAI <https://en.wikipedia.org/wiki/International_Atomic_Time>`__.
In the current implementation, the Date object does not handle the
leap second.
The constructor can take:
* the same arguments as the standard library's datetime object (year, month, day, hour,
minute, second, microsecond)
* MJD as :py:class:`float`
* MJD as :py:class:`int` for days and :py:class:`float` for seconds
* a :py:class:`Date` or :py:class:`datetime` object
Keyword Arguments:
scale (str) : One of the following scales : "UT1", "UTC", "GPS", "TDB", "TAI", "TT"
Examples:
.. code-block:: python
Date(2016, 11, 17, 19, 16, 40)
Date(2016, 11, 17, 19, 16, 40, scale="TAI")
Date(57709.804455) # MJD
Date(57709, 69540.752649)
Date(datetime(2016, 11, 17, 19, 16, 40)) # built-in datetime object
Date.now()
Date objects interact with :py:class:`timedelta` as datetime do.
Attributes:
eop: Value of the Earth Orientation Parameters for this particular date (see
:ref:`eop`)
scale: Scale in which this date is represented
"""
__slots__ = ["_d", "_s", "_offset", "scale", "_cache", "eop"]
MJD_T0 = datetime(1858, 11, 17)
"""Origin of MJD"""
JD_MJD = 2400000.5
"""Offset between JD and MJD"""
J2000 = 2451545.0
"""Offset between JD and J2000"""
REF_SCALE = "TAI"
"""Scale used as reference internally"""
DEFAULT_SCALE = "UTC"
"""Default scale"""
def __init__(self, *args, scale=DEFAULT_SCALE, **kwargs):
if type(scale) is str:
scale = get_scale(scale.upper())
if len(args) == 1:
arg = args[0]
if isinstance(arg, datetime):
# Python datetime.datetime object
d, s = self._convert_dt(arg)
elif isinstance(arg, Date):
# Date object
d = arg.d
s = arg.s
scale = arg.scale
elif isinstance(arg, (float, int)):
# Modified Julian Day
if isinstance(arg, int):
d = arg
s = 0.0
else:
d = int(arg)
s = (arg - d) * 86400
else:
raise TypeError("Unknown type '{}'".format(type(arg)))
elif len(args) == 2 and (
isinstance(args[0], int) and isinstance(args[1], (int, float))
):
# Julian day and seconds in the day
d, s = args
elif len(args) in range(3, 8) and list(map(type, args)) == [int] * len(args):
# Same constructor as datetime.datetime
# (year, month, day, hour=0, minute=0, second=0, microsecond=0, tzinfo=None)
dt = datetime(*args, **kwargs)
d, s = self._convert_dt(dt)
else:
raise TypeError(
"Unknown type sequence {}".format(", ".join(str(type(x)) for x in args))
)
mjd = d + s / 86400.0
# Retrieve EOP for the given date and store
eop = EopDb.get(mjd)
# Retrieve the offset from REF_SCALE for the current date
offset = scale.offset(mjd, self.REF_SCALE, eop)
d += int((s + offset) // 86400)
s = (s + offset) % 86400.0
# As Date acts like an immutable object, we can't set its attributes normally
# like when we do ``self._d = _d``. Furthermore, those attribute represent the date with
# respect to REF_SCALE
super().__setattr__("_d", d)
super().__setattr__("_s", s)
super().__setattr__("_offset", offset)
super().__setattr__("scale", scale)
super().__setattr__("eop", eop)
super().__setattr__("_cache", {})
def __getstate__(self): # pragma: no cover
"""Used for pickling"""
return {
"d": self._d,
"s": self._s,
"offset": self._offset,
"scale": self.scale,
"eop": self.eop,
}
def __setstate__(self, state): # pragma: no cover
"""Used for unpickling"""
super().__setattr__("_d", state["d"])
super().__setattr__("_s", state["s"])
super().__setattr__("_offset", state["offset"])
super().__setattr__("scale", state["scale"])
super().__setattr__("eop", state["eop"])
super().__setattr__("_cache", {})
def __setattr__(self, *args): # pragma: no cover
raise TypeError("Can not modify attributes of immutable object")
def __delattr__(self, *args): # pragma: no cover
raise TypeError("Can not modify attributes of immutable object")
def __add__(self, other):
if isinstance(other, timedelta):
days, sec = divmod(other.total_seconds() + self.s, 86400)
else:
raise TypeError("Unknown operation with {}".format(type(other)))
return self.__class__(self.d + int(days), sec, scale=self.scale)
def __sub__(self, other):
if isinstance(other, timedelta):
other = timedelta(seconds=-other.total_seconds())
elif isinstance(other, datetime):
return self.datetime - other
elif isinstance(other, Date):
return self._datetime - other._datetime
else:
raise TypeError("Unknown operation with {}".format(type(other)))
return self.__add__(other)
def __gt__(self, other):
return self._mjd > other._mjd
def __ge__(self, other):
return self._mjd >= other._mjd
def __lt__(self, other):
return self._mjd < other._mjd
def __le__(self, other):
return self._mjd <= other._mjd
def __eq__(self, other):
return self._mjd == other._mjd
def __repr__(self): # pragma: no cover
return "<{} '{}'>".format(self.__class__.__name__, self)
def __str__(self): # pragma: no cover
if "str" not in self._cache.keys():
self._cache["str"] = "{} {}".format(self.datetime.isoformat(), self.scale)
return self._cache["str"]
def __format__(self, fmt): # pragma: no cover
if fmt:
return self.datetime.__format__(fmt)
else:
return str(self)
def __hash__(self):
return hash((self._d, self._s))
@classmethod
def _convert_dt(cls, dt):
if dt.tzinfo is None:
delta = dt - cls.MJD_T0
else:
tz = dt.utcoffset()
delta = dt.replace(tzinfo=None) - cls.MJD_T0 - tz
return delta.days, delta.seconds + delta.microseconds * 1e-6
def _convert_to_scale(self):
"""Convert the inner value (defined with respect to REF_SCALE) into the given scale
of the object
"""
d = self._d
s = (self._s - self._offset) % 86400.0
d -= int((s + self._offset) // 86400)
return d, s
@property
def d(self):
return self._convert_to_scale()[0]
@property
def s(self):
return self._convert_to_scale()[1]
@property
def datetime(self):
"""Conversion of the Date object into a ``datetime.datetime``
The resulting object is a timezone-naive instance with the same scale
as the originating Date object.
"""
if "dt_scale" not in self._cache.keys():
self._cache["dt_scale"] = self._datetime - timedelta(seconds=self._offset)
return self._cache["dt_scale"]
@property
def _datetime(self):
"""Conversion of the Date object into a :py:class:`datetime.datetime`.
The resulting object is a timezone-naive instance in the REF_SCALE time-scale
"""
if "dt" not in self._cache.keys():
self._cache["dt"] = self.MJD_T0 + timedelta(days=self._d, seconds=self._s)
return self._cache["dt"]
@classmethod
def strptime(cls, data, format, scale=DEFAULT_SCALE): # pragma: no cover
"""Convert a string representation of a date to a Date object"""
return cls(datetime.strptime(data, format), scale=scale)
@classmethod
def now(cls, scale=DEFAULT_SCALE):
"""
Args:
scale (str)
Return:
Date: Current time in the chosen scale
"""
return cls(datetime.utcnow()).change_scale(scale)
def strftime(self, fmt): # pragma: no cover
"""Format the date following the given format"""
return self.datetime.strftime(fmt)
def change_scale(self, new_scale):
"""
Args:
new_scale (str)
Return:
Date
"""
offset = self.scale.offset(self._mjd, new_scale, self.eop)
result = self.datetime + timedelta(seconds=offset)
return self.__class__(result, scale=new_scale)
@classmethod
def _julian_century(cls, jd):
return (jd - cls.J2000) / 36525.0
@property
def julian_century(self):
"""Compute the julian_century of the Date object relatively to its
scale
Return:
float
"""
return self._julian_century(self.jd)
@property
def jd(self):
"""Compute the Julian Date, which is the number of days from the
January 1, 4712 B.C., 12:00.
Return:
float
"""
return self.mjd + self.JD_MJD
@property
def _mjd(self):
"""
Return:
float: Date in terms of MJD in the REF_SCALE timescale
"""
return self._d + self._s / 86400.0
@property
def mjd(self):
"""Date in terms of MJD
Return:
float
"""
return self.d + self.s / 86400.0
@classmethod
def range(cls, start=None, stop=None, step=None, inclusive=False):
return DateRange(start, stop, step, inclusive)
@classmethod
def _range(cls, start=None, stop=None, step=None, inclusive=False):
"""Generator of a date range
Args:
start (Date):
stop (Date or datetime.timedelta):
step (timedelta):
Keyword Args:
inclusive (bool): If ``False``, the stopping date is not included.
This is the same behavior as the built-in :py:func:`range`.
Yield:
Date:
"""
def sign(x):
"""Inner function for determining the sign of a float"""
return (-1, 1)[x >= 0]
if not step:
raise ValueError("Null step")
# Convert stop from timedelta to Date object
if isinstance(stop, timedelta):
stop = start + stop
if sign((stop - start).total_seconds()) != sign(step.total_seconds()):
raise ValueError("start/stop order not coherent with step")
date = start
if step.total_seconds() > 0:
oper = "__le__" if inclusive else "__lt__"
else:
oper = "__ge__" if inclusive else "__gt__"
while getattr(date, oper)(stop):
yield date
date += step
class DateRange:
"""Object representing a Date.range call
Allow for manipulation of the range before any compytation
"""
_descriptor = namedtuple("range_descriptor", "start stop step inclusive")
def __init__(self, start, stop, step, inclusive):
"""
Args:
start (Date):
stop (Date or datetime.timedelta):
step (timedelta):
inclusive (bool): If ``False``, the stopping date is not included.
This is the same behavior as the built-in :py:func:`range`.
"""
if isinstance(stop, timedelta):
stop = start + stop
self._range = self._descriptor(start, stop, step, inclusive)
def __iter__(self):
for d in Date._range(*self._range):
yield d
def __contains__(self, date):
return self.start <= date <= self.stop
@property
def start(self):
return self._range.start
@property
def stop(self):
return self._range.stop
@property
def step(self):
return self._range.step
# This part is here to allow matplotlib to display Date objects directly
# in the plot, without any other conversion by the developer
# If matplotlib is importable, then a converter class is registered
# for converting all Date objects on the fly
try:
import matplotlib.dates as mdates
import matplotlib.units as munits
except ImportError: # pragma: no cover
pass
else: # pragma: no cover
class DateConverter(mdates.DateConverter):
@staticmethod
def convert(values, unit, axis):
try:
iter(values)
except TypeError:
if isinstance(values, (datetime, date)):
values = mdates.date2num(values)
else:
values = mdates.date2num(values.datetime)
else:
values = [mdates.date2num(v.datetime) for v in values]
return values
munits.registry.setdefault(Date, DateConverter())
munits.registry.setdefault(DateRange, DateConverter())
|
mit
|
mikeengland/fireant
|
fireant/tests/dataset/test_pandas_workaround.py
|
2
|
2043
|
from unittest import TestCase
import numpy as np
import pandas as pd
from fireant.queries.pandas_workaround import df_subtract
class TestSubtract(TestCase):
def test_subtract_partially_aligned_multi_index_dataframes_with_nans(self):
df0 = pd.DataFrame(
data=[
[1, 2],
[3, 4],
[5, 6],
[7, 8],
[9, 10],
[11, 12],
[13, 14],
[15, 16],
[17, 18],
],
columns=["happy", "sad"],
index=pd.MultiIndex.from_product([["a", "b", None], [0, 1, np.nan]], names=["l0", "l1"]),
)
df1 = pd.DataFrame(
data=[
[1, 2],
[3, 4],
[5, 6],
[7, 8],
[9, 10],
[11, 12],
[13, 14],
[15, 16],
[17, 18],
],
columns=["happy", "sad"],
index=pd.MultiIndex.from_product([["b", "c", None], [1, 2, np.nan]], names=["l0", "l1"]),
)
result = df_subtract(df0, df1, fill_value=0)
expected = pd.DataFrame.from_records(
[
["a", 0, 1 - 0, 2 - 0],
["a", 1, 3 - 0, 4 - 0],
["a", np.nan, 5 - 0, 6 - 0],
["b", 0, 7 - 0, 8 - 0],
["b", 1, 9 - 1, 10 - 2],
["b", np.nan, 11 - 5, 12 - 6],
[np.nan, 0, 13 - 0, 14 - 0],
[np.nan, 1, 15 - 13, 16 - 14],
[np.nan, np.nan, 17 - 17, 18 - 18],
["b", 2, 0 - 3, 0 - 4],
["c", 1, 0 - 7, 0 - 8],
["c", 2, 0 - 9, 0 - 10],
["c", np.nan, 0 - 11, 0 - 12],
[np.nan, 2, 0 - 15, 0 - 16],
],
columns=["l0", "l1", "happy", "sad"],
).set_index(["l0", "l1"])
pd.testing.assert_frame_equal(expected, result)
self.assertTrue(result.index.is_unique)
|
apache-2.0
|
saurabhjn76/sympy
|
examples/intermediate/mplot3d.py
|
93
|
1252
|
#!/usr/bin/env python
"""Matplotlib 3D plotting example
Demonstrates plotting with matplotlib.
"""
import sys
from sample import sample
from sympy import sin, Symbol
from sympy.external import import_module
def mplot3d(f, var1, var2, show=True):
"""
Plot a 3d function using matplotlib/Tk.
"""
import warnings
warnings.filterwarnings("ignore", "Could not match \S")
p = import_module('pylab')
# Try newer version first
p3 = import_module('mpl_toolkits.mplot3d',
__import__kwargs={'fromlist': ['something']}) or import_module('matplotlib.axes3d')
if not p or not p3:
sys.exit("Matplotlib is required to use mplot3d.")
x, y, z = sample(f, var1, var2)
fig = p.figure()
ax = p3.Axes3D(fig)
# ax.plot_surface(x, y, z, rstride=2, cstride=2)
ax.plot_wireframe(x, y, z)
ax.set_xlabel('X')
ax.set_ylabel('Y')
ax.set_zlabel('Z')
if show:
p.show()
def main():
x = Symbol('x')
y = Symbol('y')
mplot3d(x**2 - y**2, (x, -10.0, 10.0, 20), (y, -10.0, 10.0, 20))
# mplot3d(x**2+y**2, (x, -10.0, 10.0, 20), (y, -10.0, 10.0, 20))
# mplot3d(sin(x)+sin(y), (x, -3.14, 3.14, 10), (y, -3.14, 3.14, 10))
if __name__ == "__main__":
main()
|
bsd-3-clause
|
florentchandelier/zipline
|
zipline/__main__.py
|
1
|
12454
|
import errno
import os
from importlib import import_module
from functools import wraps
import click
import logbook
import pandas as pd
from six import text_type
import pkgutil
from zipline.data import bundles as bundles_module
from zipline.utils.cli import Date, Timestamp
from zipline.utils.run_algo import _run, load_extensions
from zipline.gens import brokers
try:
__IPYTHON__
except NameError:
__IPYTHON__ = False
@click.group()
@click.option(
'-e',
'--extension',
multiple=True,
help='File or module path to a zipline extension to load.',
)
@click.option(
'--strict-extensions/--non-strict-extensions',
is_flag=True,
help='If --strict-extensions is passed then zipline will not run if it'
' cannot load all of the specified extensions. If this is not passed or'
' --non-strict-extensions is passed then the failure will be logged but'
' execution will continue.',
)
@click.option(
'--default-extension/--no-default-extension',
is_flag=True,
default=True,
help="Don't load the default zipline extension.py file in $ZIPLINE_HOME.",
)
def main(extension, strict_extensions, default_extension):
"""Top level zipline entry point.
"""
# install a logbook handler before performing any other operations
logbook.StderrHandler().push_application()
load_extensions(
default_extension,
extension,
strict_extensions,
os.environ,
)
def extract_option_object(option):
"""Convert a click.option call into a click.Option object.
Parameters
----------
option : decorator
A click.option decorator.
Returns
-------
option_object : click.Option
The option object that this decorator will create.
"""
@option
def opt():
pass
return opt.__click_params__[0]
def ipython_only(option):
"""Mark that an option should only be exposed in IPython.
Parameters
----------
option : decorator
A click.option decorator.
Returns
-------
ipython_only_dec : decorator
A decorator that correctly applies the argument even when not
using IPython mode.
"""
if __IPYTHON__:
return option
argname = extract_option_object(option).name
def d(f):
@wraps(f)
def _(*args, **kwargs):
kwargs[argname] = None
return f(*args, **kwargs)
return _
return d
@main.command()
@click.option(
'-f',
'--algofile',
default=None,
type=click.File('r'),
help='The file that contains the algorithm to run.',
)
@click.option(
'-t',
'--algotext',
help='The algorithm script to run.',
)
@click.option(
'-D',
'--define',
multiple=True,
help="Define a name to be bound in the namespace before executing"
" the algotext. For example '-Dname=value'. The value may be any python"
" expression. These are evaluated in order so they may refer to previously"
" defined names.",
)
@click.option(
'--data-frequency',
type=click.Choice({'daily', 'minute'}),
default='daily',
show_default=True,
help='The data frequency of the simulation.',
)
@click.option(
'--local-benchmark',
default=None,
help='Use path to get local benchmark csv file.',
)
@click.option(
'--capital-base',
type=float,
default=10e6,
show_default=True,
help='The starting capital for the simulation.',
)
@click.option(
'-b',
'--bundle',
default='quantopian-quandl',
metavar='BUNDLE-NAME',
show_default=True,
help='The data bundle to use for the simulation.',
)
@click.option(
'--bundle-timestamp',
type=Timestamp(),
default=pd.Timestamp.utcnow(),
show_default=False,
help='The date to lookup data on or before.\n'
'[default: <current-time>]'
)
@click.option(
'-s',
'--start',
type=Date(tz='utc', as_timestamp=True),
help='The start date of the simulation.',
)
@click.option(
'-e',
'--end',
type=Date(tz='utc', as_timestamp=True),
help='The end date of the simulation.',
)
@click.option(
'-o',
'--output',
default='-',
metavar='FILENAME',
show_default=True,
help="The location to write the perf data. If this is '-' the perf will"
" be written to stdout.",
)
@click.option(
'--print-algo/--no-print-algo',
is_flag=True,
default=False,
help='Print the algorithm to stdout.',
)
@ipython_only(click.option(
'--local-namespace/--no-local-namespace',
is_flag=True,
default=None,
help='Should the algorithm methods be resolved in the local namespace.'
))
@click.option(
'--broker',
default=None,
help='Broker'
)
@click.option(
'--broker-uri',
default=None,
metavar='BROKER-URI',
show_default=True,
help='Connection to broker',
)
@click.option(
'--account-id',
default='',
metavar='ACT-ID',
help='Account ID to trade on from a single sign-on (SSO) for consolidated/individual or linked/advisor accounts',
)
@click.option(
'--state-file',
default=None,
metavar='FILENAME',
help='Filename where the state will be stored'
)
@click.option(
'--realtime-bar-target',
default=None,
metavar='DIRNAME',
help='Directory where the realtime collected minutely bars are saved'
)
@click.option(
'--list-brokers',
is_flag=True,
help='Get list of available brokers'
)
@click.pass_context
def run(ctx,
algofile,
algotext,
define,
data_frequency,
local_benchmark,
capital_base,
bundle,
bundle_timestamp,
start,
end,
output,
print_algo,
local_namespace,
broker,
broker_uri,
account_id,
state_file,
realtime_bar_target,
list_brokers):
"""Run a backtest for the given algorithm.
"""
if list_brokers:
click.echo("Supported brokers:")
for _, name, _ in pkgutil.iter_modules(brokers.__path__):
if name != 'broker':
click.echo(name)
return
# check that the start and end dates are passed correctly
if not broker and start is None and end is None:
# check both at the same time to avoid the case where a user
# does not pass either of these and then passes the first only
# to be told they need to pass the second argument also
ctx.fail(
"must specify dates with '-s' / '--start' and '-e' / '--end'",
)
if not broker and start is None:
ctx.fail("must specify a start date with '-s' / '--start'")
if not broker and end is None:
ctx.fail("must specify an end date with '-e' / '--end'")
if broker and broker_uri is None:
ctx.fail("must specify broker-uri if broker is specified")
if broker and state_file is None:
ctx.fail("must specify state-file with live trading")
if broker and realtime_bar_target is None:
ctx.fail("must specify realtime-bar-target with live trading")
brokerobj = None
if broker:
mod_name = 'zipline.gens.brokers.%s_broker' % broker.lower()
try:
bmod = import_module(mod_name)
except ImportError:
ctx.fail("unsupported broker: can't import module %s" % mod_name)
cl_name = '%sBroker' % broker.upper()
try:
bclass = getattr(bmod, cl_name)
except AttributeError:
ctx.fail("unsupported broker: can't import class %s from %s" %
(cl_name, mod_name))
if account_id is '':
brokerobj = bclass(broker_uri)
else:
brokerobj = bclass(broker_uri, account_id)
if (algotext is not None) == (algofile is not None):
ctx.fail(
"must specify exactly one of '-f' / '--algofile' or"
" '-t' / '--algotext'",
)
perf = _run(
initialize=None,
handle_data=None,
before_trading_start=None,
analyze=None,
algofile=algofile,
algotext=algotext,
defines=define,
data_frequency=data_frequency,
local_benchmark=local_benchmark,
capital_base=capital_base,
data=None,
bundle=bundle,
bundle_timestamp=bundle_timestamp,
start=start,
end=end,
output=output,
print_algo=print_algo,
local_namespace=local_namespace,
environ=os.environ,
broker=brokerobj,
account_id=account_id,
state_filename=state_file,
realtime_bar_target=realtime_bar_target
)
if output == '-':
click.echo(str(perf))
elif output != os.devnull: # make the zipline magic not write any data
perf.to_pickle(output)
return perf
def zipline_magic(line, cell=None):
"""The zipline IPython cell magic.
"""
load_extensions(
default=True,
extensions=[],
strict=True,
environ=os.environ,
)
try:
return run.main(
# put our overrides at the start of the parameter list so that
# users may pass values with higher precedence
[
'--algotext', cell,
'--output', os.devnull, # don't write the results by default
] + ([
# these options are set when running in line magic mode
# set a non None algo text to use the ipython user_ns
'--algotext', '',
'--local-namespace',
] if cell is None else []) + line.split(),
'%s%%zipline' % ((cell or '') and '%'),
# don't use system exit and propogate errors to the caller
standalone_mode=False,
)
except SystemExit as e:
# https://github.com/mitsuhiko/click/pull/533
# even in standalone_mode=False `--help` really wants to kill us ;_;
if e.code:
raise ValueError('main returned non-zero status code: %d' % e.code)
@main.command()
@click.option(
'-b',
'--bundle',
default='quantopian-quandl',
metavar='BUNDLE-NAME',
show_default=True,
help='The data bundle to ingest.',
)
@click.option(
'--assets-version',
type=int,
multiple=True,
help='Version of the assets db to which to downgrade.',
)
@click.option(
'--show-progress/--no-show-progress',
default=True,
help='Print progress information to the terminal.'
)
def ingest(bundle, assets_version, show_progress):
"""Ingest the data for the given bundle.
"""
bundles_module.ingest(
bundle,
os.environ,
pd.Timestamp.utcnow(),
assets_version,
show_progress,
)
@main.command()
@click.option(
'-b',
'--bundle',
default='quantopian-quandl',
metavar='BUNDLE-NAME',
show_default=True,
help='The data bundle to clean.',
)
@click.option(
'-e',
'--before',
type=Timestamp(),
help='Clear all data before TIMESTAMP.'
' This may not be passed with -k / --keep-last',
)
@click.option(
'-a',
'--after',
type=Timestamp(),
help='Clear all data after TIMESTAMP'
' This may not be passed with -k / --keep-last',
)
@click.option(
'-k',
'--keep-last',
type=int,
metavar='N',
help='Clear all but the last N downloads.'
' This may not be passed with -e / --before or -a / --after',
)
def clean(bundle, before, after, keep_last):
"""Clean up data downloaded with the ingest command.
"""
bundles_module.clean(
bundle,
before,
after,
keep_last,
)
@main.command()
def bundles():
"""List all of the available data bundles.
"""
for bundle in sorted(bundles_module.bundles.keys()):
if bundle.startswith('.'):
# hide the test data
continue
try:
ingestions = list(
map(text_type, bundles_module.ingestions_for_bundle(bundle))
)
except OSError as e:
if e.errno != errno.ENOENT:
raise
ingestions = []
# If we got no ingestions, either because the directory didn't exist or
# because there were no entries, print a single message indicating that
# no ingestions have yet been made.
for timestamp in ingestions or ["<no ingestions>"]:
click.echo("%s %s" % (bundle, timestamp))
if __name__ == '__main__':
main()
|
apache-2.0
|
andrewnc/scikit-learn
|
examples/cluster/plot_dict_face_patches.py
|
337
|
2747
|
"""
Online learning of a dictionary of parts of faces
==================================================
This example uses a large dataset of faces to learn a set of 20 x 20
images patches that constitute faces.
From the programming standpoint, it is interesting because it shows how
to use the online API of the scikit-learn to process a very large
dataset by chunks. The way we proceed is that we load an image at a time
and extract randomly 50 patches from this image. Once we have accumulated
500 of these patches (using 10 images), we run the `partial_fit` method
of the online KMeans object, MiniBatchKMeans.
The verbose setting on the MiniBatchKMeans enables us to see that some
clusters are reassigned during the successive calls to
partial-fit. This is because the number of patches that they represent
has become too low, and it is better to choose a random new
cluster.
"""
print(__doc__)
import time
import matplotlib.pyplot as plt
import numpy as np
from sklearn import datasets
from sklearn.cluster import MiniBatchKMeans
from sklearn.feature_extraction.image import extract_patches_2d
faces = datasets.fetch_olivetti_faces()
###############################################################################
# Learn the dictionary of images
print('Learning the dictionary... ')
rng = np.random.RandomState(0)
kmeans = MiniBatchKMeans(n_clusters=81, random_state=rng, verbose=True)
patch_size = (20, 20)
buffer = []
index = 1
t0 = time.time()
# The online learning part: cycle over the whole dataset 6 times
index = 0
for _ in range(6):
for img in faces.images:
data = extract_patches_2d(img, patch_size, max_patches=50,
random_state=rng)
data = np.reshape(data, (len(data), -1))
buffer.append(data)
index += 1
if index % 10 == 0:
data = np.concatenate(buffer, axis=0)
data -= np.mean(data, axis=0)
data /= np.std(data, axis=0)
kmeans.partial_fit(data)
buffer = []
if index % 100 == 0:
print('Partial fit of %4i out of %i'
% (index, 6 * len(faces.images)))
dt = time.time() - t0
print('done in %.2fs.' % dt)
###############################################################################
# Plot the results
plt.figure(figsize=(4.2, 4))
for i, patch in enumerate(kmeans.cluster_centers_):
plt.subplot(9, 9, i + 1)
plt.imshow(patch.reshape(patch_size), cmap=plt.cm.gray,
interpolation='nearest')
plt.xticks(())
plt.yticks(())
plt.suptitle('Patches of faces\nTrain time %.1fs on %d patches' %
(dt, 8 * len(faces.images)), fontsize=16)
plt.subplots_adjust(0.08, 0.02, 0.92, 0.85, 0.08, 0.23)
plt.show()
|
bsd-3-clause
|
amanzi/ats-dev
|
tools/utils/colors.py
|
2
|
6529
|
import matplotlib
import matplotlib.colors
import matplotlib.cm
import numpy as np
#
# Lists of disparate color palettes
#
enumerated_palettes = {
1 : ['#e41a1c','#377eb8','#4daf4a','#984ea3','#ff7f00','#ffff33','#a65628','#f781bf','#999999'],
2 : ['#a6cee3','#1f78b4','#b2df8a','#33a02c','#fb9a99','#e31a1c','#fdbf6f','#ff7f00','#cab2d6',
'#6a3d9a','#ffff99','#b15928'],
3 : ['#1b9e77','#d95f02','#7570b3','#e7298a','#66a61e','#e6ab02','#a6761d','#666666'],
}
def enumerated_colors(count, palette=1, chain=True):
"""Gets an enumerated list of count independent colors."""
p = enumerated_palettes[palette]
if count <= len(p):
return p[0:count]
else:
for p in enumerated_palettes.values():
if count <= len(p):
return p[0:count]
if chain:
# must chain...
p = enumerated_palettes[palette]
def chain_iter(p):
while True:
for c in p:
yield c
return [c for (i,c) in zip(range(count),chain_iter(p))]
else:
raise ValueError("No enumerated palettes of length {}.".format(count))
#
# Lists of diverging color palettes
#
def sampled_colors(count, cmap):
"""Gets a list of count colors sampled from a colormap."""
cm = cm_mapper(0,count-1,cmap)
return [cm(i) for i in range(count)]
# black-zero jet is jet, but with the 0-value set to black, with an immediate jump to blue
def blackzerojet_cmap(data):
blackzerojet_dict = {'blue': [[0.0, 0.0, 0.0],
[0.0, 0.0, 0.5],
[0.11, 1, 1],
[0.34000000000000002, 1, 1],
[0.65000000000000002, 0, 0],
[1, 0, 0]],
'green': [[0.0, 0.0, 0.0],
[0.0, 0.0, 0.0],
[0.125, 0, 0],
[0.375, 1, 1],
[0.64000000000000001, 1, 1],
[0.91000000000000003, 0, 0],
[1, 0, 0]],
'red': [[0.0, 0.0, 0.0],
[0.0, 0.0, 0.0],
[0.34999999999999998, 0, 0],
[0.66000000000000003, 1, 1],
[0.89000000000000001, 1, 1],
[1, 0.5, 0.5]]
}
minval = data[np.where(data > 0.)[0]].min(); print(minval)
maxval = data[np.where(data > 0.)[0]].max(); print(maxval)
oneminval = .9*minval/maxval
for color in ['blue', 'green', 'red']:
for i in range(1,len(blackzerojet_dict[color])):
blackzerojet_dict[color][i][0] = blackzerojet_dict[color][i][0]*(1-oneminval) + oneminval
return matplotlib.colors.LinearSegmentedColormap('blackzerojet', blackzerojet_dict)
# ice color map
def ice_cmap():
x = np.linspace(0,1,7)
b = np.array([1,1,1,1,1,0.8,0.6])
g = np.array([1,0.993,0.973,0.94,0.893,0.667,0.48])
r = np.array([1,0.8,0.6,0.5,0.2,0.,0.])
bb = np.array([x,b,b]).transpose()
gg = np.array([x,g,g]).transpose()
rr = np.array([x,r,r]).transpose()
ice_dict = {'blue': bb, 'green': gg, 'red': rr}
return matplotlib.colors.LinearSegmentedColormap('ice', ice_dict)
# water color map
def water_cmap():
x = np.linspace(0,1,8)
b = np.array([1.0, 1.0, 1.0, 1.0, 0.8, 0.6, 0.4, 0.2])
g = np.array([1.0, 0.8, 0.6, 0.4, 0.2, 0.0, 0.0, 0.0])
r = np.array([1.0, 0.7, 0.5, 0.3, 0.1, 0.0, 0.0, 0.0])
bb = np.array([x,b,b]).transpose()
gg = np.array([x,g,g]).transpose()
rr = np.array([x,r,r]).transpose()
water_dict = {'blue': bb, 'green': gg, 'red': rr}
return matplotlib.colors.LinearSegmentedColormap('water', water_dict)
# water color map
def gas_cmap():
x = np.linspace(0,1,8)
r = np.array([1.0, 1.0, 1.0, 1.0, 0.8, 0.6, 0.4, 0.2])
# g = np.array([1.0, 0.8, 0.6, 0.4, 0.2, 0.0, 0.0, 0.0])
b = np.array([1.0, 0.6, 0.4, 0.2, 0.0, 0.0, 0.0, 0.0])
g = np.array([1.0, 0.6, 0.4, 0.2, 0.0, 0.0, 0.0, 0.0])
bb = np.array([x,b,b]).transpose()
gg = np.array([x,g,g]).transpose()
rr = np.array([x,r,r]).transpose()
gas_dict = {'blue': bb, 'green': gg, 'red': rr}
return matplotlib.colors.LinearSegmentedColormap('gas', gas_dict)
# jet-by-index
def cm_mapper(vmin=0., vmax=1., cmap=matplotlib.cm.jet):
"""Create a map from value to color given a colormap.
Typical Usage:
>>> # plots 11 lines, with color scaled by index into jet
>>> mapper = cm_mapper(vmin=0, vmax=10, cmap=matplotlib.cm.jet)
>>> for i in range(11):
... data = np.load('data_%03d.npy'%i)
... plt.plot(x, data, color=mapper(i))
...
>>> plt.show()
"""
norm = matplotlib.colors.Normalize(vmin, vmax)
sm = matplotlib.cm.ScalarMappable(norm, cmap)
def mapper(value):
return sm.to_rgba(value)
return mapper
def alpha_cmap(color, flip=False):
"""Create a map from value to color, using a colormap that varies alpha in a given color."""
color = matplotlib.colors.to_rgba(color)
color_str = matplotlib.colors.to_hex(color)
color_list = [(color[0],color[1],color[2],0.1),
(color[0],color[1],color[2],1)]
if flip:
color_list = reversed(color_list)
return matplotlib.colors.LinearSegmentedColormap.from_list('alpha_{}'.format(color_str),
color_list)
def float_list_type(mystring):
"""Convert string-form list of doubles into list of doubles."""
colors = []
for f in mystring.strip("(").strip(")").strip("[").strip("]").split(","):
try:
colors.append(float(f))
except:
colors.append(f)
return colors
def desaturate(color, amount=0.4, is_hsv=False):
if not is_hsv:
hsv = matplotlib.colors.rgb_to_hsv(matplotlib.colors.to_rgb(color))
else:
hsv = color
print(hsv)
hsv[1] = max(0,hsv[1] - amount)
return matplotlib.colors.hsv_to_rgb(hsv)
def darken(color, fraction=0.6):
rgb = np.array(matplotlib.colors.to_rgb(color))
return tuple(np.maximum(rgb - fraction*rgb,0))
def lighten(color, fraction=0.6):
rgb = np.array(matplotlib.colors.to_rgb(color))
return tuple(np.minimum(rgb + fraction*(1-rgb),1))
|
bsd-3-clause
|
jchodera/openmoltools
|
openmoltools/tests/test_openeye.py
|
2
|
11798
|
from nose.plugins.attrib import attr
import simtk.unit as u
from simtk.openmm import app
import simtk.openmm as mm
import numpy as np
from mdtraj.testing import eq
from unittest import skipIf
from openmoltools import utils, packmol
import os
import openmoltools.openeye
import pandas as pd
import mdtraj as md
from mdtraj.testing import raises
smiles_fails_with_strictStereo = "CN1CCN(CC1)CCCOc2cc3c(cc2OC)C(=[NH+]c4cc(c(cc4Cl)Cl)OC)C(=C=[N-])C=[NH+]3"
try:
oechem = utils.import_("openeye.oechem")
if not oechem.OEChemIsLicensed(): raise(ImportError("Need License for OEChem!"))
oequacpac = utils.import_("openeye.oequacpac")
if not oequacpac.OEQuacPacIsLicensed(): raise(ImportError("Need License for oequacpac!"))
oeiupac = utils.import_("openeye.oeiupac")
if not oeiupac.OEIUPACIsLicensed(): raise(ImportError("Need License for OEOmega!"))
oeomega = utils.import_("openeye.oeomega")
if not oeomega.OEOmegaIsLicensed(): raise(ImportError("Need License for OEOmega!"))
HAVE_OE = True
openeye_exception_message = str()
except Exception as e:
HAVE_OE = False
openeye_exception_message = str(e)
try:
import parmed
HAVE_PARMED = True
except ImportError:
HAVE_PARMED = False
@skipIf(not HAVE_OE, "Cannot test openeye module without OpenEye tools.\n" + openeye_exception_message)
def test_butanol_keepconfs():
m0 = openmoltools.openeye.iupac_to_oemol("butanol")
m1 = openmoltools.openeye.get_charges(m0, keep_confs=1)
eq(m0.NumAtoms(), m1.NumAtoms())
assert m1.NumConfs() == 1, "This OEMol was created to have a single conformation."
assert m1.NumAtoms() == 15, "Butanol should have 15 atoms"
@skipIf(not HAVE_OE, "Cannot test openeye module without OpenEye tools.\n" + openeye_exception_message)
def test_butanol_unnormalized():
m0 = openmoltools.openeye.iupac_to_oemol("butanol")
m0.SetTitle("MyCustomTitle")
m1 = openmoltools.openeye.get_charges(m0, normalize=False, keep_confs=1)
eq(m0.NumAtoms(), m1.NumAtoms())
assert m1.NumConfs() == 1, "This OEMol was created to have a single conformation."
assert m1.NumAtoms() == 15, "Butanol should have 15 atoms"
assert m0.GetTitle() == m1.GetTitle(), "The title of the molecule should not be changed by normalization."
@skipIf(not HAVE_OE, "Cannot test openeye module without OpenEye tools.")
def test_butanol():
m0 = openmoltools.openeye.iupac_to_oemol("butanol")
m1 = openmoltools.openeye.get_charges(m0, keep_confs=-1)
eq(m0.NumAtoms(), m1.NumAtoms())
assert m1.NumConfs() >= 2, "Butanol should have multiple conformers."
assert m1.NumAtoms() == 15, "Butanol should have 15 atoms"
all_data = {}
for k, molecule in enumerate(m1.GetConfs()):
names_to_charges, str_repr = openmoltools.openeye.get_names_to_charges(molecule)
all_data[k] = names_to_charges
eq(sum(names_to_charges.values()), 0.0, decimal=7) # Net charge should be zero
# Build a table of charges indexed by conformer number and atom name
all_data = pd.DataFrame(all_data)
# The standard deviation along the conformer axis should be zero if all conformers have same charges
eq(all_data.std(1).values, np.zeros(m1.NumAtoms()), decimal=7)
with utils.enter_temp_directory():
# Try saving to disk as mol2
openmoltools.openeye.molecule_to_mol2(m1, "out.mol2")
# Make sure MDTraj can read the output
t = md.load("out.mol2")
# Make sure MDTraj can read the charges / topology info
atoms, bonds = md.formats.mol2.mol2_to_dataframes("out.mol2")
# Finally, make sure MDTraj and OpenEye report the same charges.
names_to_charges, str_repr = openmoltools.openeye.get_names_to_charges(m1)
q = atoms.set_index("name").charge
q0 = pd.Series(names_to_charges)
delta = q - q0 # An object containing the charges, with atom names as indices
eq(delta.values, np.zeros_like(delta.values), decimal=4)
@skipIf(not HAVE_OE, "Cannot test openeye module without OpenEye tools.")
def test_benzene():
m0 = openmoltools.openeye.iupac_to_oemol("benzene")
m1 = openmoltools.openeye.get_charges(m0)
eq(m0.NumAtoms(), m1.NumAtoms())
print(m1.NumConfs())
assert m1.NumConfs() == 1, "Benezene should have 1 conformer"
assert m1.NumAtoms() == 12, "Benezene should have 12 atoms"
names_to_charges, str_repr = openmoltools.openeye.get_names_to_charges(m1)
eq(sum(names_to_charges.values()), 0.0, decimal=7) # Net charge should be zero
with utils.enter_temp_directory():
# Try saving to disk as mol2
openmoltools.openeye.molecule_to_mol2(m1, "out.mol2")
# Make sure MDTraj can read the output
t = md.load("out.mol2")
# Make sure MDTraj can read the charges / topology info
atoms, bonds = md.formats.mol2.mol2_to_dataframes("out.mol2")
# Finally, make sure MDTraj and OpenEye report the same charges.
names_to_charges, str_repr = openmoltools.openeye.get_names_to_charges(m1)
q = atoms.set_index("name").charge
q0 = pd.Series(names_to_charges)
delta = q - q0 # An object containing the charges, with atom names as indices
eq(delta.values, np.zeros_like(delta.values), decimal=4)
@skipIf(not HAVE_OE, "Cannot test openeye module without OpenEye tools.")
def test_link_in_utils():
m0 = openmoltools.openeye.iupac_to_oemol("benzene")
m1 = openmoltools.openeye.get_charges(m0)
with utils.enter_temp_directory():
# This function was moved from utils to openeye, so check that the old link still works.
utils.molecule_to_mol2(m1, "out.mol2")
@skipIf(not HAVE_OE, "Cannot test openeye module without OpenEye tools.")
def test_smiles():
m0 = openmoltools.openeye.smiles_to_oemol("CCCCO")
charged0 = openmoltools.openeye.get_charges(m0)
m1 = openmoltools.openeye.iupac_to_oemol("butanol")
charged1 = openmoltools.openeye.get_charges(m1)
eq(charged0.NumAtoms(), charged1.NumAtoms())
@skipIf(not HAVE_OE, "Cannot test openeye module without OpenEye tools.")
def test_ffxml():
with utils.enter_temp_directory():
m0 = openmoltools.openeye.smiles_to_oemol("CCCCO")
charged0 = openmoltools.openeye.get_charges(m0)
m1 = openmoltools.openeye.smiles_to_oemol("ClC(Cl)(Cl)Cl")
charged1 = openmoltools.openeye.get_charges(m1)
trajectories, ffxml = openmoltools.openeye.oemols_to_ffxml([charged0, charged1])
@skipIf(not HAVE_OE, "Cannot test openeye module without OpenEye tools.")
def test_ffxml_simulation():
"""Test converting toluene and benzene smiles to oemol to ffxml to openmm simulation."""
with utils.enter_temp_directory():
m0 = openmoltools.openeye.smiles_to_oemol("Cc1ccccc1")
charged0 = openmoltools.openeye.get_charges(m0)
m1 = openmoltools.openeye.smiles_to_oemol("c1ccccc1")
charged1 = openmoltools.openeye.get_charges(m1)
ligands = [charged0, charged1]
n_atoms = [15,12]
trajectories, ffxml = openmoltools.openeye.oemols_to_ffxml(ligands)
eq(len(trajectories),len(ligands))
pdb_filename = utils.get_data_filename("chemicals/proteins/1vii.pdb")
temperature = 300 * u.kelvin
friction = 0.3 / u.picosecond
timestep = 0.01 * u.femtosecond
protein_traj = md.load(pdb_filename)
protein_traj.center_coordinates()
protein_top = protein_traj.top.to_openmm()
protein_xyz = protein_traj.openmm_positions(0)
for k, ligand in enumerate(ligands):
ligand_traj = trajectories[k]
ligand_traj.center_coordinates()
eq(ligand_traj.n_atoms, n_atoms[k])
eq(ligand_traj.n_frames, 1)
#Move the pre-centered ligand sufficiently far away from the protein to avoid a clash.
min_atom_pair_distance = ((ligand_traj.xyz[0] ** 2.).sum(1) ** 0.5).max() + ((protein_traj.xyz[0] ** 2.).sum(1) ** 0.5).max() + 0.3
ligand_traj.xyz += np.array([1.0, 0.0, 0.0]) * min_atom_pair_distance
ligand_xyz = ligand_traj.openmm_positions(0)
ligand_top = ligand_traj.top.to_openmm()
ffxml.seek(0)
forcefield = app.ForceField("amber10.xml", ffxml, "tip3p.xml")
model = app.modeller.Modeller(protein_top, protein_xyz)
model.add(ligand_top, ligand_xyz)
model.addSolvent(forcefield, padding=0.4 * u.nanometer)
system = forcefield.createSystem(model.topology, nonbondedMethod=app.PME, nonbondedCutoff=1.0 * u.nanometers, constraints=app.HAngles)
integrator = mm.LangevinIntegrator(temperature, friction, timestep)
simulation = app.Simulation(model.topology, system, integrator)
simulation.context.setPositions(model.positions)
print("running")
simulation.step(1)
@skipIf(not HAVE_OE, "Cannot test openeye module without OpenEye tools.")
@raises(RuntimeError)
def test_charge_fail1():
with utils.enter_temp_directory():
openmoltools.openeye.smiles_to_antechamber(smiles_fails_with_strictStereo, "test.mol2", "test.frcmod", strictStereo=True)
@skipIf(not HAVE_OE, "Cannot test openeye module without OpenEye tools.")
@raises(RuntimeError)
def test_charge_fail2():
m = openmoltools.openeye.smiles_to_oemol(smiles_fails_with_strictStereo)
m = openmoltools.openeye.get_charges(m, strictStereo=True, keep_confs=1)
@skipIf(not HAVE_OE, "Cannot test openeye module without OpenEye tools.")
def test_charge_success1():
with utils.enter_temp_directory():
openmoltools.openeye.smiles_to_antechamber(smiles_fails_with_strictStereo, "test.mol2", "test.frcmod", strictStereo=False)
@skipIf(not HAVE_OE, "Cannot test openeye module without OpenEye tools.")
def test_charge_success2():
m = openmoltools.openeye.smiles_to_oemol(smiles_fails_with_strictStereo)
m = openmoltools.openeye.get_charges(m, strictStereo=False)
@skipIf(not HAVE_OE, "Cannot test openeye module without OpenEye tools.")
@skipIf(not HAVE_PARMED, "Cannot test without Parmed Chemistry.")
@skipIf(packmol.PACKMOL_PATH is None, "Skipping testing of packmol conversion because packmol not found.")
@attr("parmed")
def test_binary_mixture_rename():
smiles_string0 = "CCCCCC"
smiles_string1 = "CCCCCCCCC"
with utils.enter_temp_directory(): # Prevents creating tons of GAFF files everywhere.
mol2_filename0 = "./A.mol2"
frcmod_filename0 = "./A.frcmod"
mol2_filename1 = "./B.mol2"
frcmod_filename1 = "./B.frcmod"
gaff_mol2_filenames = [mol2_filename0, mol2_filename1]
frcmod_filenames = [frcmod_filename0, frcmod_filename1]
prmtop_filename = "./box.prmtop"
inpcrd_filename = "./box.inpcrd"
openmoltools.openeye.smiles_to_antechamber(smiles_string0, mol2_filename0, frcmod_filename0)
openmoltools.openeye.smiles_to_antechamber(smiles_string1, mol2_filename1, frcmod_filename1)
openmoltools.utils.randomize_mol2_residue_names(gaff_mol2_filenames)
box_pdb_filename = "./box.pdb"
gaff_mol2_filenames = [mol2_filename0, mol2_filename1]
n_monomers = [10, 20]
packed_trj = packmol.pack_box([md.load(mol2) for mol2 in gaff_mol2_filenames], n_monomers)
packed_trj.save(box_pdb_filename)
tleap_cmd = openmoltools.amber.build_mixture_prmtop(gaff_mol2_filenames, frcmod_filenames, box_pdb_filename, prmtop_filename, inpcrd_filename)
prmtop = app.AmberPrmtopFile(prmtop_filename)
inpcrd = app.AmberInpcrdFile(inpcrd_filename)
system = prmtop.createSystem(nonbondedMethod=app.PME, nonbondedCutoff=1.0*u.nanometers, constraints=app.HBonds)
|
gpl-2.0
|
hdmetor/scikit-learn
|
sklearn/datasets/tests/test_svmlight_format.py
|
12
|
10796
|
from bz2 import BZ2File
import gzip
from io import BytesIO
import numpy as np
import os
import shutil
from tempfile import NamedTemporaryFile
from sklearn.externals.six import b
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import raises
from sklearn.utils.testing import assert_in
import sklearn
from sklearn.datasets import (load_svmlight_file, load_svmlight_files,
dump_svmlight_file)
currdir = os.path.dirname(os.path.abspath(__file__))
datafile = os.path.join(currdir, "data", "svmlight_classification.txt")
multifile = os.path.join(currdir, "data", "svmlight_multilabel.txt")
invalidfile = os.path.join(currdir, "data", "svmlight_invalid.txt")
invalidfile2 = os.path.join(currdir, "data", "svmlight_invalid_order.txt")
def test_load_svmlight_file():
X, y = load_svmlight_file(datafile)
# test X's shape
assert_equal(X.indptr.shape[0], 7)
assert_equal(X.shape[0], 6)
assert_equal(X.shape[1], 21)
assert_equal(y.shape[0], 6)
# test X's non-zero values
for i, j, val in ((0, 2, 2.5), (0, 10, -5.2), (0, 15, 1.5),
(1, 5, 1.0), (1, 12, -3),
(2, 20, 27)):
assert_equal(X[i, j], val)
# tests X's zero values
assert_equal(X[0, 3], 0)
assert_equal(X[0, 5], 0)
assert_equal(X[1, 8], 0)
assert_equal(X[1, 16], 0)
assert_equal(X[2, 18], 0)
# test can change X's values
X[0, 2] *= 2
assert_equal(X[0, 2], 5)
# test y
assert_array_equal(y, [1, 2, 3, 4, 1, 2])
def test_load_svmlight_file_fd():
# test loading from file descriptor
X1, y1 = load_svmlight_file(datafile)
fd = os.open(datafile, os.O_RDONLY)
try:
X2, y2 = load_svmlight_file(fd)
assert_array_equal(X1.data, X2.data)
assert_array_equal(y1, y2)
finally:
os.close(fd)
def test_load_svmlight_file_multilabel():
X, y = load_svmlight_file(multifile, multilabel=True)
assert_equal(y, [(0, 1), (2,), (), (1, 2)])
def test_load_svmlight_files():
X_train, y_train, X_test, y_test = load_svmlight_files([datafile] * 2,
dtype=np.float32)
assert_array_equal(X_train.toarray(), X_test.toarray())
assert_array_equal(y_train, y_test)
assert_equal(X_train.dtype, np.float32)
assert_equal(X_test.dtype, np.float32)
X1, y1, X2, y2, X3, y3 = load_svmlight_files([datafile] * 3,
dtype=np.float64)
assert_equal(X1.dtype, X2.dtype)
assert_equal(X2.dtype, X3.dtype)
assert_equal(X3.dtype, np.float64)
def test_load_svmlight_file_n_features():
X, y = load_svmlight_file(datafile, n_features=22)
# test X'shape
assert_equal(X.indptr.shape[0], 7)
assert_equal(X.shape[0], 6)
assert_equal(X.shape[1], 22)
# test X's non-zero values
for i, j, val in ((0, 2, 2.5), (0, 10, -5.2),
(1, 5, 1.0), (1, 12, -3)):
assert_equal(X[i, j], val)
# 21 features in file
assert_raises(ValueError, load_svmlight_file, datafile, n_features=20)
def test_load_compressed():
X, y = load_svmlight_file(datafile)
with NamedTemporaryFile(prefix="sklearn-test", suffix=".gz") as tmp:
tmp.close() # necessary under windows
with open(datafile, "rb") as f:
shutil.copyfileobj(f, gzip.open(tmp.name, "wb"))
Xgz, ygz = load_svmlight_file(tmp.name)
# because we "close" it manually and write to it,
# we need to remove it manually.
os.remove(tmp.name)
assert_array_equal(X.toarray(), Xgz.toarray())
assert_array_equal(y, ygz)
with NamedTemporaryFile(prefix="sklearn-test", suffix=".bz2") as tmp:
tmp.close() # necessary under windows
with open(datafile, "rb") as f:
shutil.copyfileobj(f, BZ2File(tmp.name, "wb"))
Xbz, ybz = load_svmlight_file(tmp.name)
# because we "close" it manually and write to it,
# we need to remove it manually.
os.remove(tmp.name)
assert_array_equal(X.toarray(), Xbz.toarray())
assert_array_equal(y, ybz)
@raises(ValueError)
def test_load_invalid_file():
load_svmlight_file(invalidfile)
@raises(ValueError)
def test_load_invalid_order_file():
load_svmlight_file(invalidfile2)
@raises(ValueError)
def test_load_zero_based():
f = BytesIO(b("-1 4:1.\n1 0:1\n"))
load_svmlight_file(f, zero_based=False)
def test_load_zero_based_auto():
data1 = b("-1 1:1 2:2 3:3\n")
data2 = b("-1 0:0 1:1\n")
f1 = BytesIO(data1)
X, y = load_svmlight_file(f1, zero_based="auto")
assert_equal(X.shape, (1, 3))
f1 = BytesIO(data1)
f2 = BytesIO(data2)
X1, y1, X2, y2 = load_svmlight_files([f1, f2], zero_based="auto")
assert_equal(X1.shape, (1, 4))
assert_equal(X2.shape, (1, 4))
def test_load_with_qid():
# load svmfile with qid attribute
data = b("""
3 qid:1 1:0.53 2:0.12
2 qid:1 1:0.13 2:0.1
7 qid:2 1:0.87 2:0.12""")
X, y = load_svmlight_file(BytesIO(data), query_id=False)
assert_array_equal(y, [3, 2, 7])
assert_array_equal(X.toarray(), [[.53, .12], [.13, .1], [.87, .12]])
res1 = load_svmlight_files([BytesIO(data)], query_id=True)
res2 = load_svmlight_file(BytesIO(data), query_id=True)
for X, y, qid in (res1, res2):
assert_array_equal(y, [3, 2, 7])
assert_array_equal(qid, [1, 1, 2])
assert_array_equal(X.toarray(), [[.53, .12], [.13, .1], [.87, .12]])
@raises(ValueError)
def test_load_invalid_file2():
load_svmlight_files([datafile, invalidfile, datafile])
@raises(TypeError)
def test_not_a_filename():
# in python 3 integers are valid file opening arguments (taken as unix
# file descriptors)
load_svmlight_file(.42)
@raises(IOError)
def test_invalid_filename():
load_svmlight_file("trou pic nic douille")
def test_dump():
Xs, y = load_svmlight_file(datafile)
Xd = Xs.toarray()
# slicing a csr_matrix can unsort its .indices, so test that we sort
# those correctly
Xsliced = Xs[np.arange(Xs.shape[0])]
for X in (Xs, Xd, Xsliced):
for zero_based in (True, False):
for dtype in [np.float32, np.float64, np.int32]:
f = BytesIO()
# we need to pass a comment to get the version info in;
# LibSVM doesn't grok comments so they're not put in by
# default anymore.
dump_svmlight_file(X.astype(dtype), y, f, comment="test",
zero_based=zero_based)
f.seek(0)
comment = f.readline()
try:
comment = str(comment, "utf-8")
except TypeError: # fails in Python 2.x
pass
assert_in("scikit-learn %s" % sklearn.__version__, comment)
comment = f.readline()
try:
comment = str(comment, "utf-8")
except TypeError: # fails in Python 2.x
pass
assert_in(["one", "zero"][zero_based] + "-based", comment)
X2, y2 = load_svmlight_file(f, dtype=dtype,
zero_based=zero_based)
assert_equal(X2.dtype, dtype)
assert_array_equal(X2.sorted_indices().indices, X2.indices)
if dtype == np.float32:
assert_array_almost_equal(
# allow a rounding error at the last decimal place
Xd.astype(dtype), X2.toarray(), 4)
else:
assert_array_almost_equal(
# allow a rounding error at the last decimal place
Xd.astype(dtype), X2.toarray(), 15)
assert_array_equal(y, y2)
def test_dump_concise():
one = 1
two = 2.1
three = 3.01
exact = 1.000000000000001
# loses the last decimal place
almost = 1.0000000000000001
X = [[one, two, three, exact, almost],
[1e9, 2e18, 3e27, 0, 0],
[0, 0, 0, 0, 0],
[0, 0, 0, 0, 0],
[0, 0, 0, 0, 0]]
y = [one, two, three, exact, almost]
f = BytesIO()
dump_svmlight_file(X, y, f)
f.seek(0)
# make sure it's using the most concise format possible
assert_equal(f.readline(),
b("1 0:1 1:2.1 2:3.01 3:1.000000000000001 4:1\n"))
assert_equal(f.readline(), b("2.1 0:1000000000 1:2e+18 2:3e+27\n"))
assert_equal(f.readline(), b("3.01 \n"))
assert_equal(f.readline(), b("1.000000000000001 \n"))
assert_equal(f.readline(), b("1 \n"))
f.seek(0)
# make sure it's correct too :)
X2, y2 = load_svmlight_file(f)
assert_array_almost_equal(X, X2.toarray())
assert_array_equal(y, y2)
def test_dump_comment():
X, y = load_svmlight_file(datafile)
X = X.toarray()
f = BytesIO()
ascii_comment = "This is a comment\nspanning multiple lines."
dump_svmlight_file(X, y, f, comment=ascii_comment, zero_based=False)
f.seek(0)
X2, y2 = load_svmlight_file(f, zero_based=False)
assert_array_almost_equal(X, X2.toarray())
assert_array_equal(y, y2)
# XXX we have to update this to support Python 3.x
utf8_comment = b("It is true that\n\xc2\xbd\xc2\xb2 = \xc2\xbc")
f = BytesIO()
assert_raises(UnicodeDecodeError,
dump_svmlight_file, X, y, f, comment=utf8_comment)
unicode_comment = utf8_comment.decode("utf-8")
f = BytesIO()
dump_svmlight_file(X, y, f, comment=unicode_comment, zero_based=False)
f.seek(0)
X2, y2 = load_svmlight_file(f, zero_based=False)
assert_array_almost_equal(X, X2.toarray())
assert_array_equal(y, y2)
f = BytesIO()
assert_raises(ValueError,
dump_svmlight_file, X, y, f, comment="I've got a \0.")
def test_dump_invalid():
X, y = load_svmlight_file(datafile)
f = BytesIO()
y2d = [y]
assert_raises(ValueError, dump_svmlight_file, X, y2d, f)
f = BytesIO()
assert_raises(ValueError, dump_svmlight_file, X, y[:-1], f)
def test_dump_query_id():
# test dumping a file with query_id
X, y = load_svmlight_file(datafile)
X = X.toarray()
query_id = np.arange(X.shape[0]) // 2
f = BytesIO()
dump_svmlight_file(X, y, f, query_id=query_id, zero_based=True)
f.seek(0)
X1, y1, query_id1 = load_svmlight_file(f, query_id=True, zero_based=True)
assert_array_almost_equal(X, X1.toarray())
assert_array_almost_equal(y, y1)
assert_array_almost_equal(query_id, query_id1)
|
bsd-3-clause
|
dimroc/tensorflow-mnist-tutorial
|
lib/python3.6/site-packages/tensorflow/contrib/learn/python/learn/estimators/estimator.py
|
5
|
55320
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Base Estimator class."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import abc
import copy
import inspect
import os
import tempfile
import numpy as np
import six
from tensorflow.contrib import framework as contrib_framework
from tensorflow.contrib import layers
from tensorflow.contrib import metrics as metrics_lib
from tensorflow.contrib.framework import deprecated
from tensorflow.contrib.framework import deprecated_arg_values
from tensorflow.contrib.framework import deprecated_args
from tensorflow.contrib.framework import list_variables
from tensorflow.contrib.framework import load_variable
from tensorflow.contrib.framework.python.framework import experimental
from tensorflow.contrib.framework.python.ops import variables as contrib_variables
from tensorflow.contrib.learn.python.learn import evaluable
from tensorflow.contrib.learn.python.learn import metric_spec
from tensorflow.contrib.learn.python.learn import monitors as monitor_lib
from tensorflow.contrib.learn.python.learn import trainable
from tensorflow.contrib.learn.python.learn.estimators import _sklearn as sklearn
from tensorflow.contrib.learn.python.learn.estimators import metric_key
from tensorflow.contrib.learn.python.learn.estimators import model_fn as model_fn_lib
from tensorflow.contrib.learn.python.learn.estimators import run_config
from tensorflow.contrib.learn.python.learn.estimators import tensor_signature
from tensorflow.contrib.learn.python.learn.estimators._sklearn import NotFittedError
from tensorflow.contrib.learn.python.learn.learn_io import data_feeder
from tensorflow.contrib.learn.python.learn.utils import export
from tensorflow.contrib.learn.python.learn.utils import saved_model_export_utils
from tensorflow.contrib.training.python.training import evaluation
from tensorflow.core.framework import summary_pb2
from tensorflow.python.client import session as tf_session
from tensorflow.python.framework import ops
from tensorflow.python.framework import random_seed
from tensorflow.python.framework import sparse_tensor
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import data_flow_ops
from tensorflow.python.ops import variables
from tensorflow.python.platform import gfile
from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.saved_model import builder as saved_model_builder
from tensorflow.python.saved_model import tag_constants
from tensorflow.python.training import basic_session_run_hooks
from tensorflow.python.training import device_setter
from tensorflow.python.training import monitored_session
from tensorflow.python.training import saver
from tensorflow.python.training import session_run_hook
from tensorflow.python.training import summary_io
from tensorflow.python.util import compat
AS_ITERABLE_DATE = '2016-09-15'
AS_ITERABLE_INSTRUCTIONS = (
'The default behavior of predict() is changing. The default value for\n'
'as_iterable will change to True, and then the flag will be removed\n'
'altogether. The behavior of this flag is described below.')
SCIKIT_DECOUPLE_DATE = '2016-12-01'
SCIKIT_DECOUPLE_INSTRUCTIONS = (
'Estimator is decoupled from Scikit Learn interface by moving into\n'
'separate class SKCompat. Arguments x, y and batch_size are only\n'
'available in the SKCompat class, Estimator will only accept input_fn.\n'
'Example conversion:\n'
' est = Estimator(...) -> est = SKCompat(Estimator(...))')
def _verify_input_args(x, y, input_fn, feed_fn, batch_size):
"""Verifies validity of co-existance of input arguments."""
if input_fn is None:
if x is None:
raise ValueError('Either x or input_fn must be provided.')
if contrib_framework.is_tensor(x) or (y is not None and
contrib_framework.is_tensor(y)):
raise ValueError('Inputs cannot be tensors. Please provide input_fn.')
if feed_fn is not None:
raise ValueError('Can not provide both feed_fn and x or y.')
else:
if (x is not None) or (y is not None):
raise ValueError('Can not provide both input_fn and x or y.')
if batch_size is not None:
raise ValueError('Can not provide both input_fn and batch_size.')
def _get_input_fn(x, y, input_fn, feed_fn, batch_size, shuffle=False, epochs=1):
"""Make inputs into input and feed functions.
Args:
x: Numpy, Pandas or Dask matrix or iterable.
y: Numpy, Pandas or Dask matrix or iterable.
input_fn: Pre-defined input function for training data.
feed_fn: Pre-defined data feeder function.
batch_size: Size to split data into parts. Must be >= 1.
shuffle: Whether to shuffle the inputs.
epochs: Number of epochs to run.
Returns:
Data input and feeder function based on training data.
Raises:
ValueError: Only one of `(x & y)` or `input_fn` must be provided.
"""
_verify_input_args(x, y, input_fn, feed_fn, batch_size)
if input_fn is not None:
return input_fn, feed_fn
df = data_feeder.setup_train_data_feeder(
x,
y,
n_classes=None,
batch_size=batch_size,
shuffle=shuffle,
epochs=epochs)
return df.input_builder, df.get_feed_dict_fn()
def infer_real_valued_columns_from_input_fn(input_fn):
"""Creates `FeatureColumn` objects for inputs defined by `input_fn`.
This interprets all inputs as dense, fixed-length float values. This creates
a local graph in which it calls `input_fn` to build the tensors, then discards
it.
Args:
input_fn: Input function returning a tuple of:
features - Dictionary of string feature name to `Tensor` or `Tensor`.
labels - `Tensor` of label values.
Returns:
List of `FeatureColumn` objects.
"""
with ops.Graph().as_default():
features, _ = input_fn()
return layers.infer_real_valued_columns(features)
def infer_real_valued_columns_from_input(x):
"""Creates `FeatureColumn` objects for inputs defined by input `x`.
This interprets all inputs as dense, fixed-length float values.
Args:
x: Real-valued matrix of shape [n_samples, n_features...]. Can be
iterator that returns arrays of features.
Returns:
List of `FeatureColumn` objects.
"""
input_fn, _ = _get_input_fn(
x=x, y=None, input_fn=None, feed_fn=None, batch_size=None)
return infer_real_valued_columns_from_input_fn(input_fn)
def _get_arguments(func):
"""Returns list of arguments this function has."""
if hasattr(func, '__code__'):
# Regular function.
return inspect.getargspec(func).args
elif hasattr(func, '__call__'):
# Callable object.
return _get_arguments(func.__call__)
elif hasattr(func, 'func'):
# Partial function.
return _get_arguments(func.func)
def _get_replica_device_setter(config):
"""Creates a replica device setter if required.
Args:
config: A RunConfig instance.
Returns:
A replica device setter, or None.
"""
ps_ops = [
'Variable', 'VariableV2', 'AutoReloadVariable', 'MutableHashTable',
'MutableHashTableOfTensors', 'MutableDenseHashTable'
]
if config.task_type:
worker_device = '/job:%s/task:%d' % (config.task_type, config.task_id)
else:
worker_device = '/job:worker'
if config.num_ps_replicas > 0:
return device_setter.replica_device_setter(
ps_tasks=config.num_ps_replicas, worker_device=worker_device,
merge_devices=True, ps_ops=ps_ops, cluster=config.cluster_spec)
else:
return None
def _make_metrics_ops(metrics, features, labels, predictions):
"""Add metrics based on `features`, `labels`, and `predictions`.
`metrics` contains a specification for how to run metrics. It is a dict
mapping friendly names to either `MetricSpec` objects, or directly to a metric
function (assuming that `predictions` and `labels` are single tensors), or to
`(pred_name, metric)` `tuple`, which passes `predictions[pred_name]` and
`labels` to `metric` (assuming `labels` is a single tensor).
Users are encouraged to use `MetricSpec` objects, which are more flexible and
cleaner. They also lead to clearer errors.
Args:
metrics: A dict mapping names to metrics specification, for example
`MetricSpec` objects.
features: A dict of tensors returned from an input_fn as features/inputs.
labels: A single tensor or a dict of tensors returned from an input_fn as
labels.
predictions: A single tensor or a dict of tensors output from a model as
predictions.
Returns:
A dict mapping the friendly given in `metrics` to the result of calling the
given metric function.
Raises:
ValueError: If metrics specifications do not work with the type of
`features`, `labels`, or `predictions` provided. Mostly, a dict is given
but no pred_name specified.
"""
metrics = metrics or {}
# If labels is a dict with a single key, unpack into a single tensor.
labels_tensor_or_dict = labels
if isinstance(labels, dict) and len(labels) == 1:
labels_tensor_or_dict = labels[list(labels.keys())[0]]
result = {}
# Iterate in lexicographic order, so the graph is identical among runs.
for name, metric in sorted(six.iteritems(metrics)):
if isinstance(metric, metric_spec.MetricSpec):
result[name] = metric.create_metric_ops(features, labels, predictions)
continue
# TODO(b/31229024): Remove the rest of this loop
logging.warning('Please specify metrics using MetricSpec. Using bare '
'functions or (key, fn) tuples is deprecated and support '
'for it will be removed on Oct 1, 2016.')
if isinstance(name, tuple):
# Multi-head metrics.
if len(name) != 2:
raise ValueError('Invalid metric for {}. It returned a tuple with '
'len {}, expected 2.'.format(name, len(name)))
if not isinstance(predictions, dict):
raise ValueError(
'Metrics passed provide (name, prediction), '
'but predictions are not dict. '
'Metrics: %s, Predictions: %s.' % (metrics, predictions))
# Here are two options: labels are single Tensor or a dict.
if isinstance(labels, dict) and name[1] in labels:
# If labels are dict and the prediction name is in it, apply metric.
result[name[0]] = metric(predictions[name[1]], labels[name[1]])
else:
# Otherwise pass the labels to the metric.
result[name[0]] = metric(predictions[name[1]], labels_tensor_or_dict)
else:
# Single head metrics.
if isinstance(predictions, dict):
raise ValueError(
'Metrics passed provide only name, no prediction, '
'but predictions are dict. '
'Metrics: %s, Labels: %s.' % (metrics, labels_tensor_or_dict))
result[name] = metric(predictions, labels_tensor_or_dict)
return result
def _dict_to_str(dictionary):
"""Get a `str` representation of a `dict`.
Args:
dictionary: The `dict` to be represented as `str`.
Returns:
A `str` representing the `dictionary`.
"""
return ', '.join('%s = %s' % (k, v) for k, v in sorted(dictionary.items()))
def _write_dict_to_summary(output_dir,
dictionary,
current_global_step):
"""Writes a `dict` into summary file in given output directory.
Args:
output_dir: `str`, directory to write the summary file in.
dictionary: the `dict` to be written to summary file.
current_global_step: `int`, the current global step.
"""
logging.info('Saving dict for global step %d: %s', current_global_step,
_dict_to_str(dictionary))
summary_writer = summary_io.SummaryWriterCache.get(output_dir)
summary_proto = summary_pb2.Summary()
for key in dictionary:
if dictionary[key] is None:
continue
value = summary_proto.value.add()
value.tag = key
if (isinstance(dictionary[key], np.float32) or
isinstance(dictionary[key], float)):
value.simple_value = float(dictionary[key])
else:
logging.warn('Skipping summary for %s, must be a float or np.float32.',
key)
summary_writer.add_summary(summary_proto, current_global_step)
summary_writer.flush()
class BaseEstimator(
sklearn.BaseEstimator, evaluable.Evaluable, trainable.Trainable):
"""Abstract BaseEstimator class to train and evaluate TensorFlow models.
Concrete implementation of this class should provide the following functions:
* _get_train_ops
* _get_eval_ops
* _get_predict_ops
`Estimator` implemented below is a good example of how to use this class.
"""
__metaclass__ = abc.ABCMeta
# Note that for Google users, this is overriden with
# learn_runner.EstimatorConfig.
# TODO(wicke): Remove this once launcher takes over config functionality
_Config = run_config.RunConfig # pylint: disable=invalid-name
def __init__(self, model_dir=None, config=None):
"""Initializes a BaseEstimator instance.
Args:
model_dir: Directory to save model parameters, graph and etc. This can
also be used to load checkpoints from the directory into a estimator to
continue training a previously saved model.
config: A RunConfig instance.
"""
# Model directory.
self._model_dir = model_dir
if self._model_dir is None:
self._model_dir = tempfile.mkdtemp()
logging.warning('Using temporary folder as model directory: %s',
self._model_dir)
# Create a run configuration.
if config is None:
self._config = BaseEstimator._Config()
logging.info('Using default config.')
else:
self._config = config
logging.info('Using config: %s', str(vars(self._config)))
# Set device function depending if there are replicas or not.
self._device_fn = _get_replica_device_setter(self._config)
# Features and labels TensorSignature objects.
# TODO(wicke): Rename these to something more descriptive
self._features_info = None
self._labels_info = None
self._graph = None
@property
def config(self):
# TODO(wicke): make RunConfig immutable, and then return it without a copy.
return copy.deepcopy(self._config)
@deprecated_args(
SCIKIT_DECOUPLE_DATE, SCIKIT_DECOUPLE_INSTRUCTIONS, ('x', None),
('y', None), ('batch_size', None)
)
def fit(self, x=None, y=None, input_fn=None, steps=None, batch_size=None,
monitors=None, max_steps=None):
# pylint: disable=g-doc-args,g-doc-return-or-yield
"""See `Trainable`.
Raises:
ValueError: If `x` or `y` are not `None` while `input_fn` is not `None`.
ValueError: If both `steps` and `max_steps` are not `None`.
"""
if (steps is not None) and (max_steps is not None):
raise ValueError('Can not provide both steps and max_steps.')
_verify_input_args(x, y, input_fn, None, batch_size)
if x is not None:
SKCompat(self).fit(x, y, batch_size, steps, max_steps, monitors)
return self
if max_steps is not None:
try:
start_step = load_variable(self._model_dir, ops.GraphKeys.GLOBAL_STEP)
if max_steps <= start_step:
logging.info('Skipping training since max_steps has already saved.')
return self
except: # pylint: disable=bare-except
pass
hooks = monitor_lib.replace_monitors_with_hooks(monitors, self)
if steps is not None or max_steps is not None:
hooks.append(basic_session_run_hooks.StopAtStepHook(steps, max_steps))
loss = self._train_model(input_fn=input_fn, hooks=hooks)
logging.info('Loss for final step: %s.', loss)
return self
@deprecated_args(
SCIKIT_DECOUPLE_DATE, SCIKIT_DECOUPLE_INSTRUCTIONS, ('x', None),
('y', None), ('batch_size', None)
)
def partial_fit(
self, x=None, y=None, input_fn=None, steps=1, batch_size=None,
monitors=None):
"""Incremental fit on a batch of samples.
This method is expected to be called several times consecutively
on different or the same chunks of the dataset. This either can
implement iterative training or out-of-core/online training.
This is especially useful when the whole dataset is too big to
fit in memory at the same time. Or when model is taking long time
to converge, and you want to split up training into subparts.
Args:
x: Matrix of shape [n_samples, n_features...]. Can be iterator that
returns arrays of features. The training input samples for fitting the
model. If set, `input_fn` must be `None`.
y: Vector or matrix [n_samples] or [n_samples, n_outputs]. Can be
iterator that returns array of labels. The training label values
(class labels in classification, real numbers in regression). If set,
`input_fn` must be `None`.
input_fn: Input function. If set, `x`, `y`, and `batch_size` must be
`None`.
steps: Number of steps for which to train model. If `None`, train forever.
batch_size: minibatch size to use on the input, defaults to first
dimension of `x`. Must be `None` if `input_fn` is provided.
monitors: List of `BaseMonitor` subclass instances. Used for callbacks
inside the training loop.
Returns:
`self`, for chaining.
Raises:
ValueError: If at least one of `x` and `y` is provided, and `input_fn` is
provided.
"""
logging.warning('The current implementation of partial_fit is not optimized'
' for use in a loop. Consider using fit() instead.')
return self.fit(x=x, y=y, input_fn=input_fn, steps=steps,
batch_size=batch_size, monitors=monitors)
@deprecated_args(
SCIKIT_DECOUPLE_DATE, SCIKIT_DECOUPLE_INSTRUCTIONS, ('x', None),
('y', None), ('batch_size', None)
)
def evaluate(self,
x=None,
y=None,
input_fn=None,
feed_fn=None,
batch_size=None,
steps=None,
metrics=None,
name=None,
checkpoint_path=None,
hooks=None,
log_progress=True):
# pylint: disable=g-doc-args,g-doc-return-or-yield
"""See `Evaluable`.
Raises:
ValueError: If at least one of `x` or `y` is provided, and at least one of
`input_fn` or `feed_fn` is provided.
Or if `metrics` is not `None` or `dict`.
"""
_verify_input_args(x, y, input_fn, feed_fn, batch_size)
if x is not None:
return SKCompat(self).score(x, y, batch_size, steps, metrics)
if metrics is not None and not isinstance(metrics, dict):
raise ValueError('Metrics argument should be None or dict. '
'Got %s.' % metrics)
eval_results, global_step = self._evaluate_model(
input_fn=input_fn,
feed_fn=feed_fn,
steps=steps,
metrics=metrics,
name=name,
checkpoint_path=checkpoint_path,
hooks=hooks,
log_progress=log_progress)
if eval_results is not None:
eval_results.update({'global_step': global_step})
return eval_results
@deprecated_args(
SCIKIT_DECOUPLE_DATE, SCIKIT_DECOUPLE_INSTRUCTIONS, ('x', None),
('batch_size', None), ('as_iterable', True)
)
def predict(
self, x=None, input_fn=None, batch_size=None, outputs=None,
as_iterable=True):
"""Returns predictions for given features.
Args:
x: Matrix of shape [n_samples, n_features...]. Can be iterator that
returns arrays of features. The training input samples for fitting the
model. If set, `input_fn` must be `None`.
input_fn: Input function. If set, `x` and 'batch_size' must be `None`.
batch_size: Override default batch size. If set, 'input_fn' must be
'None'.
outputs: list of `str`, name of the output to predict.
If `None`, returns all.
as_iterable: If True, return an iterable which keeps yielding predictions
for each example until inputs are exhausted. Note: The inputs must
terminate if you want the iterable to terminate (e.g. be sure to pass
num_epochs=1 if you are using something like read_batch_features).
Returns:
A numpy array of predicted classes or regression values if the
constructor's `model_fn` returns a `Tensor` for `predictions` or a `dict`
of numpy arrays if `model_fn` returns a `dict`. Returns an iterable of
predictions if as_iterable is True.
Raises:
ValueError: If x and input_fn are both provided or both `None`.
"""
_verify_input_args(x, None, input_fn, None, batch_size)
if x is not None and not as_iterable:
return SKCompat(self).predict(x, batch_size)
input_fn, feed_fn = _get_input_fn(x, None, input_fn, None, batch_size)
return self._infer_model(
input_fn=input_fn,
feed_fn=feed_fn,
outputs=outputs,
as_iterable=as_iterable)
def get_variable_value(self, name):
"""Returns value of the variable given by name.
Args:
name: string, name of the tensor.
Returns:
Numpy array - value of the tensor.
"""
return load_variable(self.model_dir, name)
def get_variable_names(self):
"""Returns list of all variable names in this model.
Returns:
List of names.
"""
return [name for name, _ in list_variables(self.model_dir)]
@property
def model_dir(self):
return self._model_dir
@deprecated_arg_values(
'2016-09-23',
'The signature of the input_fn accepted by export is changing to be '
'consistent with what\'s used by tf.Learn Estimator\'s train/evaluate. '
'input_fn (and in most cases, input_feature_key) will become required '
'args, and use_deprecated_input_fn will default to False and be removed '
'altogether.',
use_deprecated_input_fn=True,
input_fn=None)
def export(self,
export_dir,
input_fn=export._default_input_fn, # pylint: disable=protected-access
input_feature_key=None,
use_deprecated_input_fn=True,
signature_fn=None,
prediction_key=None,
default_batch_size=1,
exports_to_keep=None):
"""Exports inference graph into given dir.
Args:
export_dir: A string containing a directory to write the exported graph
and checkpoints.
input_fn: If `use_deprecated_input_fn` is true, then a function that given
`Tensor` of `Example` strings, parses it into features that are then
passed to the model. Otherwise, a function that takes no argument and
returns a tuple of (features, labels), where features is a dict of
string key to `Tensor` and labels is a `Tensor` that's currently not
used (and so can be `None`).
input_feature_key: Only used if `use_deprecated_input_fn` is false. String
key into the features dict returned by `input_fn` that corresponds to a
the raw `Example` strings `Tensor` that the exported model will take as
input. Can only be `None` if you're using a custom `signature_fn` that
does not use the first arg (examples).
use_deprecated_input_fn: Determines the signature format of `input_fn`.
signature_fn: Function that returns a default signature and a named
signature map, given `Tensor` of `Example` strings, `dict` of `Tensor`s
for features and `Tensor` or `dict` of `Tensor`s for predictions.
prediction_key: The key for a tensor in the `predictions` dict (output
from the `model_fn`) to use as the `predictions` input to the
`signature_fn`. Optional. If `None`, predictions will pass to
`signature_fn` without filtering.
default_batch_size: Default batch size of the `Example` placeholder.
exports_to_keep: Number of exports to keep.
Returns:
The string path to the exported directory. NB: this functionality was
added ca. 2016/09/25; clients that depend on the return value may need
to handle the case where this function returns None because subclasses
are not returning a value.
"""
# pylint: disable=protected-access
return export._export_estimator(
estimator=self,
export_dir=export_dir,
signature_fn=signature_fn,
prediction_key=prediction_key,
input_fn=input_fn,
input_feature_key=input_feature_key,
use_deprecated_input_fn=use_deprecated_input_fn,
default_batch_size=default_batch_size,
exports_to_keep=exports_to_keep)
@abc.abstractproperty
def _get_train_ops(self, features, labels):
"""Method that builds model graph and returns trainer ops.
Expected to be overridden by sub-classes that require custom support.
Args:
features: `Tensor` or `dict` of `Tensor` objects.
labels: `Tensor` or `dict` of `Tensor` objects.
Returns:
A `ModelFnOps` object.
"""
pass
@abc.abstractproperty
def _get_predict_ops(self, features):
"""Method that builds model graph and returns prediction ops.
Args:
features: `Tensor` or `dict` of `Tensor` objects.
Returns:
A `ModelFnOps` object.
"""
pass
def _get_eval_ops(self, features, labels, metrics):
"""Method that builds model graph and returns evaluation ops.
Expected to be overriden by sub-classes that require custom support.
Args:
features: `Tensor` or `dict` of `Tensor` objects.
labels: `Tensor` or `dict` of `Tensor` objects.
metrics: Dict of metrics to run. If None, the default metric functions
are used; if {}, no metrics are used. Otherwise, `metrics` should map
friendly names for the metric to a `MetricSpec` object defining which
model outputs to evaluate against which labels with which metric
function. Metric ops should support streaming, e.g., returning
update_op and value tensors. See more details in
`../../../../metrics/python/metrics/ops/streaming_metrics.py` and
`../metric_spec.py`.
Returns:
A `ModelFnOps` object.
"""
raise NotImplementedError('_get_eval_ops not implemented in BaseEstimator')
@deprecated(
'2016-09-23',
'The signature of the input_fn accepted by export is changing to be '
'consistent with what\'s used by tf.Learn Estimator\'s train/evaluate, '
'which makes this function useless. This will be removed after the '
'deprecation date.')
def _get_feature_ops_from_example(self, examples_batch):
"""Returns feature parser for given example batch using features info.
This function requires `fit()` has been called.
Args:
examples_batch: batch of tf.Example
Returns:
features: `Tensor` or `dict` of `Tensor` objects.
Raises:
ValueError: If `_features_info` attribute is not available (usually
because `fit()` has not been called).
"""
if self._features_info is None:
raise ValueError('Features information missing, was fit() ever called?')
return tensor_signature.create_example_parser_from_signatures(
self._features_info, examples_batch)
def _check_inputs(self, features, labels):
if self._features_info is not None:
logging.debug('Given features: %s, required signatures: %s.',
str(features), str(self._features_info))
if not tensor_signature.tensors_compatible(features, self._features_info):
raise ValueError('Features are incompatible with given information. '
'Given features: %s, required signatures: %s.' %
(str(features), str(self._features_info)))
else:
self._features_info = tensor_signature.create_signatures(features)
logging.debug('Setting feature info to %s.', str(self._features_info))
if labels is not None:
if self._labels_info is not None:
logging.debug('Given labels: %s, required signatures: %s.',
str(labels), str(self._labels_info))
if not tensor_signature.tensors_compatible(labels, self._labels_info):
raise ValueError('Labels are incompatible with given information. '
'Given labels: %s, required signatures: %s.' %
(str(labels), str(self._labels_info)))
else:
self._labels_info = tensor_signature.create_signatures(labels)
logging.debug('Setting labels info to %s', str(self._labels_info))
def _extract_metric_update_ops(self, eval_dict):
"""Separate update operations from metric value operations."""
update_ops = []
value_ops = {}
for name, metric_ops in six.iteritems(eval_dict):
if isinstance(metric_ops, (list, tuple)):
if len(metric_ops) == 2:
value_ops[name] = metric_ops[0]
update_ops.append(metric_ops[1])
else:
logging.warning(
'Ignoring metric {}. It returned a list|tuple with len {}, '
'expected 2'.format(name, len(metric_ops)))
value_ops[name] = metric_ops
else:
value_ops[name] = metric_ops
if update_ops:
update_ops = control_flow_ops.group(*update_ops)
else:
update_ops = None
return update_ops, value_ops
def _evaluate_model(self,
input_fn,
steps,
feed_fn=None,
metrics=None,
name='',
checkpoint_path=None,
hooks=None,
log_progress=True):
# TODO(wicke): Remove this once Model and associated code are gone.
if (hasattr(self._config, 'execution_mode') and
self._config.execution_mode not in ('all', 'evaluate', 'eval_evalset')):
return None, None
# Check that model has been trained (if nothing has been set explicitly).
if not checkpoint_path:
latest_path = saver.latest_checkpoint(self._model_dir)
if not latest_path:
raise NotFittedError("Couldn't find trained model at %s."
% self._model_dir)
checkpoint_path = latest_path
# Setup output directory.
eval_dir = os.path.join(self._model_dir, 'eval' if not name else
'eval_' + name)
with ops.Graph().as_default() as g:
random_seed.set_random_seed(self._config.tf_random_seed)
global_step = contrib_framework.create_global_step(g)
features, labels = input_fn()
self._check_inputs(features, labels)
# The default return type of _get_eval_ops is ModelFnOps. But there are
# some subclasses of tf.contrib.learn.Estimator which override this
# method and use the legacy signature, namely _get_eval_ops returns an
# `eval_dict` dictionary of Tensors. The following else-statement code
# covers these cases, but will soon be deleted after the subclasses are
# updated.
# TODO(b/32664904): Update subclasses and delete the else-statement.
eval_ops = self._get_eval_ops(features, labels, metrics)
if isinstance(eval_ops, model_fn_lib.ModelFnOps): # Default signature
eval_dict = eval_ops.eval_metric_ops
else: # Legacy signature
eval_dict = eval_ops
update_op, eval_dict = self._extract_metric_update_ops(eval_dict)
hooks = hooks or []
if feed_fn:
hooks.append(_FeedFnHook(feed_fn))
if steps:
hooks.append(
evaluation.StopAfterNEvalsHook(
steps, log_progress=log_progress))
global_step_key = 'global_step'
while global_step_key in eval_dict:
global_step_key = '_' + global_step_key
eval_dict[global_step_key] = global_step
eval_results = evaluation.evaluate_once(
checkpoint_path=checkpoint_path,
master=self._config.evaluation_master,
eval_ops=update_op,
final_ops=eval_dict,
hooks=hooks)
current_global_step = eval_results[global_step_key]
_write_dict_to_summary(eval_dir, eval_results, current_global_step)
return eval_results, current_global_step
def _get_features_from_input_fn(self, input_fn):
result = input_fn()
if isinstance(result, (list, tuple)):
return result[0]
return result
def _infer_model(self,
input_fn,
feed_fn=None,
outputs=None,
as_iterable=True,
iterate_batches=False):
# Check that model has been trained.
checkpoint_path = saver.latest_checkpoint(self._model_dir)
if not checkpoint_path:
raise NotFittedError("Couldn't find trained model at %s."
% self._model_dir)
with ops.Graph().as_default() as g:
random_seed.set_random_seed(self._config.tf_random_seed)
contrib_framework.create_global_step(g)
features = self._get_features_from_input_fn(input_fn)
infer_ops = self._call_legacy_get_predict_ops(features)
predictions = self._filter_predictions(infer_ops.predictions, outputs)
mon_sess = monitored_session.MonitoredSession(
session_creator=monitored_session.ChiefSessionCreator(
checkpoint_filename_with_path=checkpoint_path))
if not as_iterable:
with mon_sess:
if not mon_sess.should_stop():
return mon_sess.run(predictions, feed_fn() if feed_fn else None)
else:
return self._predict_generator(mon_sess, predictions, feed_fn,
iterate_batches)
def _predict_generator(self, mon_sess, predictions, feed_fn, iterate_batches):
with mon_sess:
while not mon_sess.should_stop():
preds = mon_sess.run(predictions, feed_fn() if feed_fn else None)
if iterate_batches:
yield preds
elif not isinstance(predictions, dict):
for pred in preds:
yield pred
else:
first_tensor = list(preds.values())[0]
if isinstance(first_tensor, sparse_tensor.SparseTensorValue):
batch_length = first_tensor.dense_shape[0]
else:
batch_length = first_tensor.shape[0]
for i in range(batch_length):
yield {key: value[i] for key, value in six.iteritems(preds)}
if self._is_input_constant(feed_fn, mon_sess.graph):
return
def _is_input_constant(self, feed_fn, graph):
# If there are no queue_runners, the input `predictions` is a
# constant, and we should stop after the first epoch. If,
# instead, there are queue_runners, eventually they should throw
# an `OutOfRangeError`.
if graph.get_collection(ops.GraphKeys.QUEUE_RUNNERS):
return False
# data_feeder uses feed_fn to generate `OutOfRangeError`.
if feed_fn is not None:
return False
return True
def _filter_predictions(self, predictions, outputs):
if not outputs:
return predictions
if not isinstance(predictions, dict):
raise ValueError(
'outputs argument is not valid in case of non-dict predictions.')
existing_keys = predictions.keys()
predictions = {
key: value
for key, value in six.iteritems(predictions) if key in outputs
}
if not predictions:
raise ValueError('Expected to run at least one output from %s, '
'provided %s.' % (existing_keys, outputs))
return predictions
def _train_model(self, input_fn, hooks):
all_hooks = []
self._graph = ops.Graph()
with self._graph.as_default() as g, g.device(self._device_fn):
random_seed.set_random_seed(self._config.tf_random_seed)
global_step = contrib_framework.create_global_step(g)
features, labels = input_fn()
self._check_inputs(features, labels)
model_fn_ops = self._call_legacy_get_train_ops(features, labels)
ops.add_to_collection(ops.GraphKeys.LOSSES, model_fn_ops.loss)
all_hooks.extend([
basic_session_run_hooks.NanTensorHook(model_fn_ops.loss),
basic_session_run_hooks.LoggingTensorHook(
{
'loss': model_fn_ops.loss,
'step': global_step
},
every_n_iter=100)
])
all_hooks.extend(hooks)
scaffold = model_fn_ops.training_scaffold or monitored_session.Scaffold()
if not (scaffold.saver or ops.get_collection(ops.GraphKeys.SAVERS)):
ops.add_to_collection(
ops.GraphKeys.SAVERS,
saver.Saver(
sharded=True,
max_to_keep=self._config.keep_checkpoint_max,
defer_build=True))
chief_hooks = []
if (self._config.save_checkpoints_secs or
self._config.save_checkpoints_steps):
saver_hook_exists = any([
isinstance(h, basic_session_run_hooks.CheckpointSaverHook)
for h in (all_hooks + model_fn_ops.training_hooks + chief_hooks +
model_fn_ops.training_chief_hooks)
])
if not saver_hook_exists:
chief_hooks = [
basic_session_run_hooks.CheckpointSaverHook(
self._model_dir,
save_secs=self._config.save_checkpoints_secs,
save_steps=self._config.save_checkpoints_steps,
scaffold=scaffold)
]
with monitored_session.MonitoredTrainingSession(
master=self._config.master,
is_chief=self._config.is_chief,
checkpoint_dir=self._model_dir,
scaffold=scaffold,
hooks=all_hooks + model_fn_ops.training_hooks,
chief_only_hooks=chief_hooks + model_fn_ops.training_chief_hooks,
save_checkpoint_secs=0, # Saving is handled by a hook.
save_summaries_steps=self._config.save_summary_steps,
config=self.config.tf_config) as mon_sess:
loss = None
while not mon_sess.should_stop():
_, loss = mon_sess.run([model_fn_ops.train_op, model_fn_ops.loss])
summary_io.SummaryWriterCache.clear()
return loss
def _call_legacy_get_predict_ops(self, features):
# The default return type of _get_predict_ops is ModelFnOps. But there are
# some subclasses of tf.contrib.learn.Estimator which override this
# method and use the legacy signature, namely _get_predict_ops returns a
# `predictions` Tensor or dict or Tensors. The following else-statement
# code covers these cases, but will soon be deleted after the subclasses
# are updated.
# TODO(b/32664904): Update subclasses and delete the else-statement.
infer_ops = self._get_predict_ops(features)
if isinstance(infer_ops, model_fn_lib.ModelFnOps): # Default signature
return infer_ops
return model_fn_lib.ModelFnOps(
mode=model_fn_lib.ModeKeys.INFER, predictions=infer_ops)
def _call_legacy_get_train_ops(self, features, labels):
train_ops = self._get_train_ops(features, labels)
if isinstance(train_ops, model_fn_lib.ModelFnOps): # Default signature
return train_ops
return model_fn_lib.ModelFnOps(
mode=model_fn_lib.ModeKeys.TRAIN,
predictions=None,
loss=train_ops[1],
train_op=train_ops[0])
def _identity_feature_engineering_fn(features, labels):
return features, labels
class Estimator(BaseEstimator):
"""Estimator class is the basic TensorFlow model trainer/evaluator.
"""
def __init__(self,
model_fn=None,
model_dir=None,
config=None,
params=None,
feature_engineering_fn=None):
"""Constructs an `Estimator` instance.
Args:
model_fn: Model function. Follows the signature:
* Args:
* `features`: single `Tensor` or `dict` of `Tensor`s
(depending on data passed to `fit`),
* `labels`: `Tensor` or `dict` of `Tensor`s (for multi-head
models). If mode is `ModeKeys.INFER`, `labels=None` will be
passed. If the `model_fn`'s signature does not accept
`mode`, the `model_fn` must still be able to handle
`labels=None`.
* `mode`: Optional. Specifies if this training, evaluation or
prediction. See `ModeKeys`.
* `params`: Optional `dict` of hyperparameters. Will receive what
is passed to Estimator in `params` parameter. This allows
to configure Estimators from hyper parameter tuning.
* `config`: Optional configuration object. Will receive what is passed
to Estimator in `config` parameter, or the default `config`.
Allows updating things in your model_fn based on configuration
such as `num_ps_replicas`.
* `model_dir`: Optional directory where model parameters, graph etc
are saved. Will receive what is passed to Estimator in
`model_dir` parameter, or the default `model_dir`. Allows
updating things in your model_fn that expect model_dir, such as
training hooks.
* Returns:
`ModelFnOps`
Also supports a legacy signature which returns tuple of:
* predictions: `Tensor`, `SparseTensor` or dictionary of same.
Can also be any type that is convertible to a `Tensor` or
`SparseTensor`, or dictionary of same.
* loss: Scalar loss `Tensor`.
* train_op: Training update `Tensor` or `Operation`.
Supports next three signatures for the function:
* `(features, labels) -> (predictions, loss, train_op)`
* `(features, labels, mode) -> (predictions, loss, train_op)`
* `(features, labels, mode, params) -> (predictions, loss, train_op)`
* `(features, labels, mode, params, config) ->
(predictions, loss, train_op)`
* `(features, labels, mode, params, config, model_dir) ->
(predictions, loss, train_op)`
model_dir: Directory to save model parameters, graph and etc. This can
also be used to load checkpoints from the directory into a estimator to
continue training a previously saved model.
config: Configuration object.
params: `dict` of hyper parameters that will be passed into `model_fn`.
Keys are names of parameters, values are basic python types.
feature_engineering_fn: Feature engineering function. Takes features and
labels which are the output of `input_fn` and
returns features and labels which will be fed
into `model_fn`. Please check `model_fn` for
a definition of features and labels.
Raises:
ValueError: parameters of `model_fn` don't match `params`.
"""
super(Estimator, self).__init__(model_dir=model_dir, config=config)
if model_fn is not None:
# Check number of arguments of the given function matches requirements.
model_fn_args = _get_arguments(model_fn)
if params is not None and 'params' not in model_fn_args:
raise ValueError('Estimator\'s model_fn (%s) has less than 4 '
'arguments, but not None params (%s) are passed.' %
(model_fn, params))
if params is None and 'params' in model_fn_args:
logging.warning('Estimator\'s model_fn (%s) includes params '
'argument, but params are not passed to Estimator.',
model_fn)
self._model_fn = model_fn
self.params = params
self._feature_engineering_fn = (
feature_engineering_fn or _identity_feature_engineering_fn)
def _call_model_fn(self, features, labels, mode):
"""Calls model function with support of 2, 3 or 4 arguments.
Args:
features: features dict.
labels: labels dict.
mode: ModeKeys
Returns:
A `ModelFnOps` object. If model_fn returns a tuple, wraps them up in a
`ModelFnOps` object.
Raises:
ValueError: if model_fn returns invalid objects.
"""
features, labels = self._feature_engineering_fn(features, labels)
model_fn_args = _get_arguments(self._model_fn)
kwargs = {}
if 'mode' in model_fn_args:
kwargs['mode'] = mode
if 'params' in model_fn_args:
kwargs['params'] = self.params
if 'config' in model_fn_args:
kwargs['config'] = self.config
if 'model_dir' in model_fn_args:
kwargs['model_dir'] = self.model_dir
model_fn_results = self._model_fn(features, labels, **kwargs)
if isinstance(model_fn_results, model_fn_lib.ModelFnOps):
return model_fn_results
# Here model_fn_ops should be a tuple with 3 elements.
if len(model_fn_results) != 3:
raise ValueError('Unrecognized value returned by model_fn, '
'please return ModelFnOps.')
return model_fn_lib.ModelFnOps(
mode=mode,
predictions=model_fn_results[0],
loss=model_fn_results[1],
train_op=model_fn_results[2])
def _get_train_ops(self, features, labels):
"""Method that builds model graph and returns trainer ops.
Expected to be overriden by sub-classes that require custom support.
This implementation uses `model_fn` passed as parameter to constructor to
build model.
Args:
features: `Tensor` or `dict` of `Tensor` objects.
labels: `Tensor` or `dict` of `Tensor` objects.
Returns:
`ModelFnOps` object.
"""
return self._call_model_fn(features, labels, model_fn_lib.ModeKeys.TRAIN)
def _get_eval_ops(self, features, labels, metrics):
"""Method that builds model graph and returns evaluation ops.
Expected to be overriden by sub-classes that require custom support.
This implementation uses `model_fn` passed as parameter to constructor to
build model.
Args:
features: `Tensor` or `dict` of `Tensor` objects.
labels: `Tensor` or `dict` of `Tensor` objects.
metrics: Dict of metrics to run. If None, the default metric functions
are used; if {}, no metrics are used. Otherwise, `metrics` should map
friendly names for the metric to a `MetricSpec` object defining which
model outputs to evaluate against which labels with which metric
function. Metric ops should support streaming, e.g., returning
update_op and value tensors. See more details in
`../../../../metrics/python/metrics/ops/streaming_metrics.py` and
`../metric_spec.py`.
Returns:
`ModelFnOps` object.
Raises:
ValueError: if `metrics` don't match `labels`.
"""
model_fn_ops = self._call_model_fn(
features, labels, model_fn_lib.ModeKeys.EVAL)
# Custom metrics should overwrite defaults.
if metrics:
model_fn_ops.eval_metric_ops.update(_make_metrics_ops(
metrics, features, labels, model_fn_ops.predictions))
if metric_key.MetricKey.LOSS not in model_fn_ops.eval_metric_ops:
model_fn_ops.eval_metric_ops[metric_key.MetricKey.LOSS] = (
metrics_lib.streaming_mean(model_fn_ops.loss))
return model_fn_ops
def _get_predict_ops(self, features):
"""Method that builds model graph and returns prediction ops.
Expected to be overriden by sub-classes that require custom support.
This implementation uses `model_fn` passed as parameter to constructor to
build model.
Args:
features: `Tensor` or `dict` of `Tensor` objects.
Returns:
`ModelFnOps` object.
"""
labels = tensor_signature.create_placeholders_from_signatures(
self._labels_info)
return self._call_model_fn(features, labels, model_fn_lib.ModeKeys.INFER)
@experimental
def export_savedmodel(
self, export_dir_base, input_fn,
default_output_alternative_key=None,
assets_extra=None,
as_text=False,
exports_to_keep=None):
"""Exports inference graph as a SavedModel into given dir.
Args:
export_dir_base: A string containing a directory to write the exported
graph and checkpoints.
input_fn: A function that takes no argument and
returns an `InputFnOps`.
default_output_alternative_key: the name of the head to serve when none is
specified.
assets_extra: A dict specifying how to populate the assets.extra directory
within the exported SavedModel. Each key should give the destination
path (including the filename) relative to the assets.extra directory.
The corresponding value gives the full path of the source file to be
copied. For example, the simple case of copying a single file without
renaming it is specified as
`{'my_asset_file.txt': '/path/to/my_asset_file.txt'}`.
as_text: whether to write the SavedModel proto in text format.
exports_to_keep: Number of exports to keep.
Returns:
The string path to the exported directory.
Raises:
ValueError: if an unrecognized export_type is requested.
"""
if input_fn is None:
raise ValueError('input_fn must be defined.')
with ops.Graph().as_default() as g:
contrib_variables.create_global_step(g)
# Call the input_fn and collect the input alternatives.
input_ops = input_fn()
input_alternatives, features = (
saved_model_export_utils.get_input_alternatives(input_ops))
# Call the model_fn and collect the output alternatives.
model_fn_ops = self._call_model_fn(features, None,
model_fn_lib.ModeKeys.INFER)
output_alternatives, actual_default_output_alternative_key = (
saved_model_export_utils.get_output_alternatives(
model_fn_ops, default_output_alternative_key))
# Build the SignatureDefs from all pairs of input and output signatures
signature_def_map = saved_model_export_utils.build_all_signature_defs(
input_alternatives, output_alternatives,
actual_default_output_alternative_key)
# Locate the latest checkpoint
# TODO(soergel): does it help that we know we have one from this step?
checkpoint_path = saver.latest_checkpoint(self._model_dir)
if not checkpoint_path:
raise NotFittedError("Couldn't find trained model at %s."
% self._model_dir)
export_dir = saved_model_export_utils.get_timestamped_export_dir(
export_dir_base)
with tf_session.Session('') as session:
variables.initialize_local_variables()
data_flow_ops.tables_initializer()
saver_for_restore = saver.Saver(
variables.global_variables(),
sharded=True)
saver_for_restore.restore(session, checkpoint_path)
init_op = control_flow_ops.group(
variables.local_variables_initializer(),
data_flow_ops.tables_initializer())
# Perform the export
builder = saved_model_builder.SavedModelBuilder(export_dir)
builder.add_meta_graph_and_variables(
session, [tag_constants.SERVING],
signature_def_map=signature_def_map,
assets_collection=ops.get_collection(
ops.GraphKeys.ASSET_FILEPATHS),
legacy_init_op=init_op)
builder.save(as_text)
# Add the extra assets
if assets_extra:
assets_extra_path = os.path.join(compat.as_bytes(export_dir),
compat.as_bytes('assets.extra'))
for dest_relative, source in assets_extra.items():
dest_absolute = os.path.join(compat.as_bytes(assets_extra_path),
compat.as_bytes(dest_relative))
dest_path = os.path.dirname(dest_absolute)
gfile.MakeDirs(dest_path)
gfile.Copy(source, dest_absolute)
return export_dir
class _FeedFnHook(session_run_hook.SessionRunHook):
"""Runs feed_fn and sets the feed_dict accordingly."""
def __init__(self, feed_fn):
self.feed_fn = feed_fn
def before_run(self, run_context): # pylint: disable=unused-argument
return session_run_hook.SessionRunArgs(
fetches=None, feed_dict=self.feed_fn())
# For time of deprecation x,y from Estimator allow direct access.
# pylint: disable=protected-access
class SKCompat(sklearn.BaseEstimator):
"""Scikit learn wrapper for TensorFlow Learn Estimator."""
def __init__(self, estimator):
self._estimator = estimator
def fit(self, x, y, batch_size=128, steps=None, max_steps=None,
monitors=None):
input_fn, feed_fn = _get_input_fn(x, y, input_fn=None, feed_fn=None,
batch_size=batch_size, shuffle=True,
epochs=None)
all_monitors = []
if feed_fn:
all_monitors = [_FeedFnHook(feed_fn)]
if monitors:
all_monitors.extend(monitors)
self._estimator.fit(input_fn=input_fn,
steps=steps,
max_steps=max_steps,
monitors=all_monitors)
return self
def score(self, x, y, batch_size=128, steps=None, metrics=None):
input_fn, feed_fn = _get_input_fn(x, y, input_fn=None,
feed_fn=None, batch_size=batch_size,
shuffle=False, epochs=1)
if metrics is not None and not isinstance(metrics, dict):
raise ValueError('Metrics argument should be None or dict. '
'Got %s.' % metrics)
eval_results, global_step = self._estimator._evaluate_model(
input_fn=input_fn,
feed_fn=feed_fn,
steps=steps,
metrics=metrics,
name='score')
if eval_results is not None:
eval_results.update({'global_step': global_step})
return eval_results
def predict(self, x, batch_size=128, outputs=None):
input_fn, feed_fn = _get_input_fn(
x, None, input_fn=None, feed_fn=None, batch_size=batch_size,
shuffle=False, epochs=1)
results = list(
self._estimator._infer_model(
input_fn=input_fn,
feed_fn=feed_fn,
outputs=outputs,
as_iterable=True,
iterate_batches=True))
if not isinstance(results[0], dict):
return np.concatenate([output for output in results], axis=0)
return {
key: np.concatenate(
[output[key] for output in results], axis=0)
for key in results[0]
}
|
apache-2.0
|
yavalvas/yav_com
|
build/matplotlib/doc/mpl_examples/animation/simple_3danim.py
|
12
|
1793
|
"""
A simple example of an animated plot... In 3D!
"""
import numpy as np
import matplotlib.pyplot as plt
import mpl_toolkits.mplot3d.axes3d as p3
import matplotlib.animation as animation
def Gen_RandLine(length, dims=2) :
"""
Create a line using a random walk algorithm
length is the number of points for the line.
dims is the number of dimensions the line has.
"""
lineData = np.empty((dims, length))
lineData[:, 0] = np.random.rand(dims)
for index in range(1, length) :
# scaling the random numbers by 0.1 so
# movement is small compared to position.
# subtraction by 0.5 is to change the range to [-0.5, 0.5]
# to allow a line to move backwards.
step = ((np.random.rand(dims) - 0.5) * 0.1)
lineData[:, index] = lineData[:, index-1] + step
return lineData
def update_lines(num, dataLines, lines) :
for line, data in zip(lines, dataLines) :
# NOTE: there is no .set_data() for 3 dim data...
line.set_data(data[0:2, :num])
line.set_3d_properties(data[2,:num])
return lines
# Attaching 3D axis to the figure
fig = plt.figure()
ax = p3.Axes3D(fig)
# Fifty lines of random 3-D lines
data = [Gen_RandLine(25, 3) for index in range(50)]
# Creating fifty line objects.
# NOTE: Can't pass empty arrays into 3d version of plot()
lines = [ax.plot(dat[0, 0:1], dat[1, 0:1], dat[2, 0:1])[0] for dat in data]
# Setting the axes properties
ax.set_xlim3d([0.0, 1.0])
ax.set_xlabel('X')
ax.set_ylim3d([0.0, 1.0])
ax.set_ylabel('Y')
ax.set_zlim3d([0.0, 1.0])
ax.set_zlabel('Z')
ax.set_title('3D Test')
# Creating the Animation object
line_ani = animation.FuncAnimation(fig, update_lines, 25, fargs=(data, lines),
interval=50, blit=False)
plt.show()
|
mit
|
MarcSpitz/ldebroux_kjadin_masters-thesis_2014
|
src/multicasttree.py
|
1
|
50504
|
# -*- coding: utf-8 -*-
# @author: Debroux Léonard <[email protected]>
# @author: Kevin Jadin <[email protected]>
import sys
import networkx as nx
import matplotlib.pyplot as plt
import networkx.algorithms.dag as dag
from operator import itemgetter
from collections import OrderedDict
import random
import logging as log
import nx_pylab
from Queue import PriorityQueue
from utils import Utils
from setup import Setup
import copy
import math
class MulticastTree(nx.DiGraph):
""" MulticastTree class """
def __init__(self, NetworkGraph, root):
super(MulticastTree, self).__init__()
self.NetworkGraph = NetworkGraph
self.C = set() # empty client set
self.root = root # root of the tree
self.improvements = 0 #amount of improvements made (addition and removal)
self.weight = 0 # weight of the tree (to be updated after every tree modification)
self.C.add(root)
self.add_node(root) # add the root
self.ttl = Setup.get('tabu_ttl')
self.tabuList = {}
self.usePathQueue = False
self.pathQueue = PriorityQueue()
self.childrenPaths = {}
self.parentPaths = {}
# linking the right method according to arguments (codegen)
self.export_step_codegen(Setup.get('steps'))
self.selectEdge = self.selectEdge_choose(Setup.get('selection_heuristic'))
def log(self, lvl=log.INFO):
log.log(lvl, ">>> tree information")
log.log(lvl, "\tweight: %s" % self.weight)
log.log(lvl, "\troot: %s" % self.root)
log.log(lvl, "\ttree clients: %s" % self.clients())
def multicastTreeCopy(self):
""" returns a complete copy of the tree """
MCTcopy = MulticastTree(self.NetworkGraph, self.root)
MCTcopy.graph = copy.deepcopy(self.graph)
MCTcopy.node = copy.deepcopy(self.node)
MCTcopy.adj = copy.deepcopy(self.adj)
MCTcopy.pred = copy.deepcopy(self.pred)
MCTcopy.succ = MCTcopy.adj
MCTcopy.edge = MCTcopy.adj
MCTcopy.C = copy.deepcopy(self.C)
MCTcopy.improvements = self.improvements
MCTcopy.weight = self.weight
MCTcopy.tabuList = copy.deepcopy(self.tabuList)
MCTcopy.usePathQueue = self.usePathQueue
MCTcopy.pathQueue = PriorityQueue()
MCTcopy.pathQueue.queue = copy.deepcopy(self.pathQueue.queue)
MCTcopy.childrenPaths = copy.deepcopy(self.childrenPaths)
MCTcopy.parentPaths = copy.deepcopy(self.parentPaths)
return MCTcopy
def export_step_codegen(self, method):
""" codegen the right exporting method for self.export_step() """
fname = "export_step"
if(method == Setup.PLOT):
def inner(self, outfile):
self.export_plot()
elif(method == Setup.FILE):
def inner(self, outfile):
self.export_file(outfile)
else:
def inner(self, outfile):
pass
inner.__doc__ = "docstring for "+fname
inner.__name__ = fname
setattr(self.__class__, inner.__name__, inner)
def export_file(self, outfile):
"""
@param outfile : string for filename with supported extension {pdf, png}
"""
import pylab
pylab.figure(figsize=(50,50))
self.draw()
pylab.savefig(outfile)
def export_plot(self):
# new window
plt.figure()
self.draw()
plt.show(block=False)
# clean plot
plt.clf()
def draw(self):
""" draw the tree on top of the graph """
# draw the graph except the current tree
graphOnlyEdges = list(set(self.NetworkGraph.edges()) - set(self.edges()))
graphOnlyNodes = list(set(self.NetworkGraph.nodes()) - set(self.nodes()))
ax = plt.axes()
ax.axes.get_xaxis().set_visible(False)
ax.axes.get_yaxis().set_visible(False)
nx.draw_networkx(self.NetworkGraph, self.NetworkGraph.layout, ax=ax, edgelist=graphOnlyEdges, nodelist=graphOnlyNodes, font_color='white', node_color='grey', node_shape='s') #node_shape='so^>v<dph8'
# draw the tree
nodeSize = 500
# draw steiner nodes
nx.draw_networkx_nodes(self, self.NetworkGraph.layout, node_color='black', node_size=nodeSize)
# draw the root
nx.draw_networkx_nodes(self, self.NetworkGraph.layout, nodelist=[self.root], node_color='purple', node_size=nodeSize)
# draw the clients
clientsWithoutRoot = set(self.C) - set([self.root])
nx.draw_networkx_nodes(self, self.NetworkGraph.layout, nodelist=clientsWithoutRoot, node_color='blue', node_size=nodeSize)
# draw the edges
edgeLabels=dict([((u,v,),d['weight']) for u,v,d in self.NetworkGraph.edges(data=True)])
nx_pylab.draw_networkx_edges(self, self.NetworkGraph.layout, width=2.0, arrow=True, edge_color='red')
nx.draw_networkx_edge_labels(self, self.NetworkGraph.layout, edge_labels=edgeLabels, label_pos=0.5, font_color='grey')
def export(self, outfile):
nx.draw_graphviz(self)
nx.write_dot(self, outfile)
def selectEdge_choose(self, heuristic):
""" returns the right selectEdge heuristic according to given argument """
if(heuristic == Setup.MOST_EXPENSIVE):
return self.selectEdge_mostExpensive
elif(heuristic == Setup.MOST_EXPENSIVE_PATH):
if(Setup.get('improve_maxtime') > 0):
self.usePathQueue = True
return self.selectEdge_mostExpensivePath
elif(heuristic == Setup.AVERAGED_MOST_EXPENSIVE_PATH):
if(Setup.get('improve_maxtime') > 0):
self.usePathQueue = True
return self.selectEdge_averagedMostExpensivePath
else: # use random selection heuristic
return self.selectEdge_random
def add_edges(self, path):
"""
add edges with attributes fetched from the NetworkGraph
@param: path: a path is a list of nodes [n1, n2, n3, n4, ..]
@raise: Exception if the edge is non-existent in the NetworkGraph
"""
NG = self.NetworkGraph
GraphEdges = NG.edges()
log.debug('GraphEdges: %s' % GraphEdges)
for i in range(len(path) - 1):
n1 = path[i]
n2 = path[i+1]
edgeAttributes = NG[n1][n2]
# build and add the edge to the tree edges set
edgeUnique = (n1, n2) if n1<n2 else (n2, n1)
log.debug('have to add edge: (%s,%s)' % (n1, n2))
if not edgeUnique in GraphEdges:
raise Exception("tree is corrupted")
self.add_edge(n1, n2, edgeAttributes)
self.weight += self[n1][n2]['weight']
def removeEdge(self):
""" removes an edge from the tree
Uses selectEdge() """
# select an edge to remove
edge = self.selectEdge()
if edge:
log.debug('selected edge: %s', edge)
self.weight -= edge[2]['weight']
self.remove_edge(edge[0], edge[1])
return edge
else:
return None
def clients(self):
""" returns the clients set """
return self.C
def predecessor(self, node):
""" In a tree, each node has at most one predecessor
Redefine networkx predecessors method to reflect this fact
@returns parent node or None if given node was the root """
pred = self.predecessors(node)
if pred:
return pred[0]
else:
return None
# █████╗ ██████╗ ██████╗ ██╗████████╗██╗ ██████╗ ███╗ ██╗
# ██╔══██╗██╔══██╗██╔══██╗██║╚══██╔══╝██║██╔═══██╗████╗ ██║
# ███████║██║ ██║██║ ██║██║ ██║ ██║██║ ██║██╔██╗ ██║
# ██╔══██║██║ ██║██║ ██║██║ ██║ ██║██║ ██║██║╚██╗██║
# ██║ ██║██████╔╝██████╔╝██║ ██║ ██║╚██████╔╝██║ ╚████║
# ╚═╝ ╚═╝╚═════╝ ╚═════╝ ╚═╝ ╚═╝ ╚═╝ ╚═════╝ ╚═╝ ╚═══╝
def addClient(self, c):
""" Subscribe a client to the multicast group
Adds the client to the tree and adds the needed edges
"""
log.debug('adding client %s' % c)
if not c in self.nodes():
pim_mode = Setup.get('pim_mode')
log.debug('PIM mode: %s' % pim_mode)
if pim_mode:
cleanedClosestPath = self.shortestPathToSource(c)
else:
cleanedClosestPath = self.shortestPathToTree(c)
log.debug('cleanedClosestPath: %s' % cleanedClosestPath)
if self.usePathQueue:
self.addToPathQueue(cleanedClosestPath)
self.add_edges(cleanedClosestPath)
else:
log.debug('client %s already in clients set' % c)
# add the client to the clients set
self.C.add(c)
def shortestPathToSource(self, client):
""" Use when simulated the behaviour of PIM-SSM
"""
NG = self.NetworkGraph
# take the shortest path from the root to the client as connection path
closestPath = NG.ShortestPaths[self.root][client][0]
# the path must be cleaned because of edges that might have a weight of 0.
# Consider the following example :
# T: n1 -0-> n2, the path n2-n1-n3 has the same weight as n1-n3.
# We could end up choosing the first path and thus create a loop
cleanedClosestPath = self.cleanPath(closestPath, self.nodes(), [client])
return cleanedClosestPath
def shortestPathToTree(self, client):
""" Returns the shortest path from the client to add to the tree
"""
NG = self.NetworkGraph
ShortestPathsLength = NG.ShortestPathsLength[client]
log.debug('distances to nodes: "%s"' % ShortestPathsLength)
PathsLengthToTree = {k: v[0] for k, v in ShortestPathsLength.items() if k in self.nodes()}
log.debug('PathsLengthToTree: %s' % PathsLengthToTree)
SortedPathsLengthToTree = OrderedDict(sorted(PathsLengthToTree.items(), key=itemgetter(1)))
log.debug('SortedPathsLengthToTree: %s' % SortedPathsLengthToTree)
closestParent, parentLength = SortedPathsLengthToTree.popitem(last=False)
log.debug('closestParent: %s' % closestParent)
closestPath = NG.ShortestPaths[closestParent][client][0]
# the path must be cleaned because of edges that might have a weight of 0.
# Consider the following example :
# T: n1 -0-> n2, the path n2-n1-n3 has the same weight as n1-n3.
# We could end up choosing the first path and thus create a loop
cleanedClosestPath = self.cleanPath(closestPath, self.nodes(), [client])
return cleanedClosestPath
# ██████╗ ███████╗███╗ ███╗ ██████╗ ██╗ ██╗ █████╗ ██╗
# ██╔══██╗██╔════╝████╗ ████║██╔═══██╗██║ ██║██╔══██╗██║
# ██████╔╝█████╗ ██╔████╔██║██║ ██║██║ ██║███████║██║
# ██╔══██╗██╔══╝ ██║╚██╔╝██║██║ ██║╚██╗ ██╔╝██╔══██║██║
# ██║ ██║███████╗██║ ╚═╝ ██║╚██████╔╝ ╚████╔╝ ██║ ██║███████╗
# ╚═╝ ╚═╝╚══════╝╚═╝ ╚═╝ ╚═════╝ ╚═══╝ ╚═╝ ╚═╝╚══════╝
def removeClient(self, c):
""" removes given client c from the clients set of self
"""
if c == self.root:
log.error('root cannot be removed from the client set')
elif c in self.C:
deg = self.degree(c)
self.C.remove(c)
if deg == 1:
# Upon a removal, the tree is only modified when the degree of the node is one
(node, removedEdges) = self.ascendingClean(c, list())
# here, the path is removed already
self.removeWeightFor(removedEdges)
if self.usePathQueue:
# The modifications of the tree cause the pathQueue to change
pathTuple = self.parentPaths[c]
if node == self.root:
# the clean goes up to the root, it means that pathTuple goes from root to c, just remove it
if pathTuple[1][0] != self.root: # root should be the first node of pathTuple
print ''
print 'paths', self.pathQueue.queue
print 'edges', self.edges()
print 'node', node
print 'client removed', c
raise Exception("Upon removal, cleaning was made up to root and the path is bad")
self.removeTupleFromPathQueue(pathTuple, tryMerge=False)
elif self.degree(node) == 1:
# node is a client
if node in self.parentPaths:
self.removeTupleFromPathQueue(pathTuple)
else:
# node has no parent path (means that a path should be split)
self.splitPathAroundNode(node, pathTuple, removeBotPath=True)
elif self.degree(node) == 2:
if node in self.parentPaths:
# removed path and try merge on node
self.removeTupleFromPathQueue(pathTuple, tryMerge=True)
else:
# node was previously of degree 3, and is in the middle of path.
# it thus has only one child path
if len(self.childrenPaths[node]) != 1:
raise Exception("bug: childrenPath[node] should have a length of one")
childPathTuple = self.childrenPaths[node][0]
if childPathTuple == pathTuple:
# remove path, no need to try to merge
self.removeTupleFromPathQueue(pathTuple, tryMerge=False)
else:
self.splitPathAroundNode(node, pathTuple, removeBotPath=True)
self.mergePaths(node)
# split path on node, rm botPath and try merge on node
else: # degree(node) >= 3
if not pathTuple[1][0] == node:
# if node is within pathTuple, pathTuple must be split
self.splitPathAroundNode(node, pathTuple, removeBotPath=True)
else:
self.removeTupleFromPathQueue(pathTuple, tryMerge=False)
log.debug("removed edges upon removal of %s: %s" % (c, removedEdges))
elif deg == 2:
log.debug("client %s of deg == 2 to remove", c)
self.mergePaths(c)
else: # deg >= 3
log.debug("client %s of deg >=3 to remove", c)
else:
log.error("%s is not in the clients set", c)
# ██╗███╗ ███╗██████╗ ██████╗ ██████╗ ██╗ ██╗███████╗
# ██║████╗ ████║██╔══██╗██╔══██╗██╔═══██╗██║ ██║██╔════╝
# ██║██╔████╔██║██████╔╝██████╔╝██║ ██║██║ ██║█████╗
# ██║██║╚██╔╝██║██╔═══╝ ██╔══██╗██║ ██║╚██╗ ██╔╝██╔══╝
# ██║██║ ╚═╝ ██║██║ ██║ ██║╚██████╔╝ ╚████╔╝ ███████╗
# ╚═╝╚═╝ ╚═╝╚═╝ ╚═╝ ╚═╝ ╚═════╝ ╚═══╝ ╚══════╝
def improveTreeOnce(self, nb, temperature):
""" performs one round of improvement on the tree
# procedure: 3 steps for each round of improvement
# 1) select and remove one edge
# 2) clean the tree by launching cleanTree on the two nodes linked by the removed edge
# 3) add a new path -> O(n^2) search for the shortest path to link the two components
"""
folder = "images/"
if self.improvements < 10:
temp = "00" + str(self.improvements)
elif self.improvements < 100:
temp = "0" + str(self.improvements)
else:
temp = str(self.improvements)
self.export_step(folder+"%s_step0_before_improve.png" % (temp))
# remove an edge from the graph
removed = self.removeEdge()
if removed:
parent, child, edgeAttributes = removed
self.export_step(folder+"%s_step1_(%s-%s)_edge_removed.png" % (temp, parent, child))
# from this point, the DiGraph is made of at least two connected components
subRoot, removedEdges = self.cleanTree(parent, child)
self.export_step(folder+"%s_step2_(%s-%s)_cleaned_tree.png" % (temp, parent, child))
newPathInstalled, degrading = self.reconnectCC(subRoot, removedEdges, temperature)
self.export_step(folder+"%s_step3_(%s-%s)_reconnected_components.png" % (temp, parent, child))
self.improvements += 1
return (newPathInstalled, degrading)
else:
log.debug('no edge found to remove')
return (False, False) # no new path has been installed
if not self.number_of_nodes() == self.number_of_edges()+1:
print 'ERROR nodes:', self.number_of_nodes(), 'edges:', self.number_of_edges(), ': should not be reached'
print 'edges', self.edges()
print 'paths', self.pathQueue.queue
raise Exception('the multicast tree is does not represent a tree after an improveOnce call')
# ███████╗██████╗ ██████╗ ███████╗ ███████╗███████╗██╗ ███████╗ ██████╗████████╗██╗ ██████╗ ███╗ ██╗
# ██╔════╝██╔══██╗██╔════╝ ██╔════╝ ██╔════╝██╔════╝██║ ██╔════╝██╔════╝╚══██╔══╝██║██╔═══██╗████╗ ██║
# █████╗ ██║ ██║██║ ███╗█████╗ ███████╗█████╗ ██║ █████╗ ██║ ██║ ██║██║ ██║██╔██╗ ██║
# ██╔══╝ ██║ ██║██║ ██║██╔══╝ ╚════██║██╔══╝ ██║ ██╔══╝ ██║ ██║ ██║██║ ██║██║╚██╗██║
# ███████╗██████╔╝╚██████╔╝███████╗ ███████║███████╗███████╗███████╗╚██████╗ ██║ ██║╚██████╔╝██║ ╚████║
# ╚══════╝╚═════╝ ╚═════╝ ╚══════╝ ╚══════╝╚══════╝╚══════╝╚══════╝ ╚═════╝ ╚═╝ ╚═╝ ╚═════╝ ╚═╝ ╚═══╝
def selectEdge_random(self):
""" randomly selects and returns an edge to remove from the tree """
edges = self.edges(data=True)
found = False
selectedEdge = None
while not found and edges:
selectedEdge = random.choice(edges)
n1, n2, attr = selectedEdge
if (n1, n2) in self.tabuList:
edges.remove(selectedEdge)
else:
found = True
return selectedEdge
def selectEdge_mostExpensive(self):
""" selects and returns the most expensive edge in the tree """
edges = self.edges(data=True)
selectedEdge = None
weight = -1;
# equalWeights use to be fair in the case of several paths having the same cost and being the most expensive
equalWeights = 2.0
for e in edges:
n1, n2, attr = e
if not (n1, n2) in self.tabuList:
if attr['weight'] > weight:
selectedEdge = e
weight = attr['weight']
equalWeights = 2.0
elif attr['weight'] == weight:
if random.random() < 1/equalWeights:
selectedEdge = e
equalWeights += 1
return selectedEdge
def selectEdge_mostExpensivePath(self):
""" selects and returns the most expensive edge in the tree """
if self.usePathQueue:
mostExpPath = self.popFirstValidPath(Setup.get('max_paths'))
if mostExpPath:
n1 = mostExpPath[0]
n2 = mostExpPath[1]
return (n1, n2, self.NetworkGraph[n1][n2])
else:
return None
def selectEdge_averagedMostExpensivePath(self):
if self.usePathQueue:
mostExpPath = self.popFirstValidPath(Setup.get('max_paths'))
if mostExpPath:
n1 = mostExpPath[0]
n2 = mostExpPath[1]
return (n1, n2, self.NetworkGraph[n1][n2])
else:
return None
def popFirstValidPath(self, maxPaths = 3):
""" pops the first valid path found in the pathQueue (which can contain invalid/non-split paths).
pops them in order and returns the first valid path.
"""
returnPathTuple = None
toRestore = []
validPaths = []
valid = False
while self.pathQueue.queue and len(validPaths) < maxPaths:
valid = True
pathTuple = self.pathQueue.queue[0]
# check if given path is valid : no coloured node or node with degree > 2
_, path = pathTuple
for n in path[1:-1]:
if (n in self.C) or (self.degree(n) > 2):
valid = False
self.splitPathAroundNode(n, pathTuple)
break # breaks the for loop
if valid:
# check if one of the edges of the path is in the tabu
for i in range(len(path) - 1):
n1 = path[i]
n2 = path[i+1]
if ((n1, n2) in self.tabuList) or ((n2, n1) in self.tabuList):
valid = False
poppedPathTuple = self.pathQueue.get() # when a path is in the tabu, pop it
if not poppedPathTuple == pathTuple:
raise Exception("PathQueue is corrupted")
toRestore.append(poppedPathTuple)
break
if valid:
# add the valid path to the list of valid paths (one will be selected later on)
poppedPathTuple = self.pathQueue.get()
# the current considered path will be added to validPaths
# it must be removed from the priority queue so that another may be selected
if not poppedPathTuple == pathTuple:
raise Exception("PathQueue is corrupted")
validPaths.append(poppedPathTuple)
for p in toRestore:
# restore all the paths that were removed because in the tabu
self.pathQueue.put(p)
if validPaths:
chosenPathTuple = random.choice(validPaths)
for p in validPaths:
# restore all the paths that were removed because chosen in validPaths
self.pathQueue.put(p)
self.removeTupleFromPathQueue(chosenPathTuple)
return chosenPathTuple[1]
else:
return None
# ██████╗██╗ ███████╗ █████╗ ███╗ ██╗██╗███╗ ██╗ ██████╗
# ██╔════╝██║ ██╔════╝██╔══██╗████╗ ██║██║████╗ ██║██╔════╝
# ██║ ██║ █████╗ ███████║██╔██╗ ██║██║██╔██╗ ██║██║ ███╗
# ██║ ██║ ██╔══╝ ██╔══██║██║╚██╗██║██║██║╚██╗██║██║ ██║
# ╚██████╗███████╗███████╗██║ ██║██║ ╚████║██║██║ ╚████║╚██████╔╝
# ╚═════╝╚══════╝╚══════╝╚═╝ ╚═╝╚═╝ ╚═══╝╚═╝╚═╝ ╚═══╝ ╚═════╝
def cleanTree(self, parent, child):
""" cleans the tree, by launching
ascending clean from given parent node
descending clean from given child node
@returns: one node from the child connected component (or None)
as well as the list of edges that have been removed
"""
asc = self.ascendingClean(parent, list())
desc = self.descendingClean(child, list())
self.removeWeightFor(asc[1])
self.removeWeightFor(desc[1])
removedEdge = (parent, child)
removedEdges = asc[1]
removedEdges.reverse()
removedEdges.append(removedEdge)
removedEdges = removedEdges + desc[1]
return (desc[0], removedEdges)
def removeWeightFor(self, path):
""" decrements self's weight by the cumulative weight of the given path
"""
for e in path:
self.weight -= self.NetworkGraph[e[0]][e[1]]['weight']
def ascendingClean(self, current, removedEdges):
""" launches an ascendingClean procedure:
@returns: one node from the tree (the first undeleted node, or None)
"""
log.debug('clients: %s' % self.C)
log.debug('current: %s' % current)
if (current in self.C) or (self.degree(current) >= 2):
log.debug('current kept: %s' % current)
return (current, removedEdges)
else:
log.debug('predecessors: %s' % self.predecessors(current))
parent = self.predecessors(current)[0] #only one element in the list if any
self.remove_node(current)
removedEdge = (parent, current)
removedEdges.append(removedEdge)
log.debug('current removed: %s and parent is: %s' % (current, parent))
return self.ascendingClean(parent, removedEdges)
def descendingClean(self, current, removedEdges):
""" launches an descendingClean procedure:
@returns: one node from the tree (the first undeleted node, or None)
"""
if (current in self.C) or (self.degree(current) >= 2):
return (current, removedEdges)
else:
child = self.successors(current)[0]
self.remove_node(current)
removedEdge = (current, child)
removedEdges.append(removedEdge)
return self.descendingClean(child, removedEdges)
def cleanPath(self, path, sT, dT):
""" cleans a path from the edges it contains that already are in the tree (in one direction or the other).
Needed to avoid creating loops in the tree upon reconnection
@param: path: the path to clean
"""
cleanedPath = []
firstInST = 0
for i in reversed(range(len(path))):
if path[i] in sT:
cleanedPath.append(path[i])
firstInST = i
break
for i in range(firstInST+1, len(path)):
if not path[i] in dT:
cleanedPath.append(path[i])
else:
cleanedPath.append(path[i])
break
return cleanedPath
# ██████╗ ███████╗ ██████╗ ██████╗ ███╗ ██╗███╗ ██╗███████╗ ██████╗████████╗██╗ ██████╗ ███╗ ██╗
# ██╔══██╗██╔════╝██╔════╝██╔═══██╗████╗ ██║████╗ ██║██╔════╝██╔════╝╚══██╔══╝██║██╔═══██╗████╗ ██║
# ██████╔╝█████╗ ██║ ██║ ██║██╔██╗ ██║██╔██╗ ██║█████╗ ██║ ██║ ██║██║ ██║██╔██╗ ██║
# ██╔══██╗██╔══╝ ██║ ██║ ██║██║╚██╗██║██║╚██╗██║██╔══╝ ██║ ██║ ██║██║ ██║██║╚██╗██║
# ██║ ██║███████╗╚██████╗╚██████╔╝██║ ╚████║██║ ╚████║███████╗╚██████╗ ██║ ██║╚██████╔╝██║ ╚████║
# ╚═╝ ╚═╝╚══════╝ ╚═════╝ ╚═════╝ ╚═╝ ╚═══╝╚═╝ ╚═══╝╚══════╝ ╚═════╝ ╚═╝ ╚═╝ ╚═════╝ ╚═╝ ╚═══╝
def reconnectCC(self, subRoot, removedEdges, temperature, onlyBest=False):
""" aims at reconnecting the two connected components after an edge removal
potentially inverts edge directions
@returns: False when he reconnection path is the same as the previously removed one
"""
# select a path (at least one edge) to add (may be the same one)
newPathInstalled = True
log.debug('subRoot: %s' % subRoot)
# desc = set of nodes from the subtree
desc = dag.descendants(self, subRoot).union(set([subRoot]))
# sourceTree = set of nodes from the source tree
sourceTree = set(self.nodes()) - desc
bestPath, degrading = self.selectReconnectionPath(sourceTree, desc, removedEdges, temperature)
if not bestPath:
log.debug("no improving path could be found for reconnecting the two components, restoring the previously removed edges..")
bestPath = self.edgePathToNodePath(removedEdges)
newPathInstalled = False
log.debug('descendants : %s' % desc)
log.debug('sourceTree : %s' % sourceTree)
log.debug('bestPath : %s' % bestPath)
log.debug('removedEdges: %s' % self.edgePathToNodePath(removedEdges))
if not bestPath[-1] == subRoot:
# attempt to reroot
self.reRoot(bestPath[-1], subRoot)
if self.usePathQueue:
self.addToPathQueue(bestPath)
self.addPathToTabu(bestPath)
self.add_edges(bestPath)
return (newPathInstalled, degrading)
def selectReconnectionPath(self, sourceTreeNodes, descTreeNodes, removedEdges, temperature):
""" selects reconnection path between the two components
considers pairs of nodes from the two given sets source/descTreeNodes
applies the search_strategy parameter
allows to degrade with probability derived from given temperature only if intensify_only parameter is set to False """
removedPath = self.edgePathToNodePath(removedEdges)
toImprove = self.NetworkGraph.getEdgePathWeight(removedEdges)
log.debug("cost to improve : toImprove = %s" % toImprove)
intensify = Setup.get('intensify_only')
nbNodesInST = min([Setup.get('improve_search_space'), len(sourceTreeNodes)])
sourceTreeNodesList = list(sourceTreeNodes)
descTreeNodesList = list(descTreeNodes)
sourceTreeNodesList = random.sample(sourceTreeNodesList, nbNodesInST)
degrading = False # we intensify if possible
improvingPath = None
improvingPathCost = sys.maxint
lessDegradingPath = None
lessDegradingPathCost = sys.maxint
search_strategy = Setup.get('search_strategy')
for stn in sourceTreeNodesList:
for dtn in descTreeNodesList:
log.debug("considered reconnection : (%s, %s)" % (stn, dtn))
sPW = self.NetworkGraph.ShortestPathsLength[stn][dtn][0]
if sPW < toImprove and sPW < improvingPathCost:
improvingPath = (stn, dtn)
improvingPathCost = sPW
# sPW >= toImprove
# if intensify = True, no need to check for a lessDegradingPath
elif (not intensify) and (sPW < lessDegradingPathCost):
sP = self.NetworkGraph.ShortestPaths[stn][dtn][0]
if (sP != removedPath):
lessDegradingPath = sP
lessDegradingPathCost = sPW
if (search_strategy == Setup.FIRST_IMPROVEMENT) and improvingPath:
break
if improvingPath:
(stn, dtn) = improvingPath
sP = self.NetworkGraph.ShortestPaths[stn][dtn][0]
cleanedImpPath = self.cleanPath(sP, sourceTreeNodes, descTreeNodes)
return (cleanedImpPath, degrading)
elif not intensify and lessDegradingPath:
cleanedPath = self.cleanPath(lessDegradingPath, sourceTreeNodes, descTreeNodes)
if cleanedPath != removedPath:
cPWeight = self.NetworkGraph.getNodePathWeight(cleanedPath)
if cPWeight < toImprove:
degrading = False # because the path is improving, although useless, for readability
return (cleanedPath, degrading)
else: # cPWeight >= toImprove
degrading = self.evaluateSAProbability(toImprove, cPWeight, temperature)
if degrading:
return (cleanedPath, degrading)
return (None, degrading)
def nodePathToEdgePath(self, nodePath):
""" converts a path expressed as [n1, n2, n3] to the tuple representation [(n1, n2), (n2, n3), (n3, n3)]
"""
returnedList = []
for i in range(len(nodePath) - 1):
n1 = nodePath[i]
n2 = nodePath[i+1]
returnedList.append((n1, n2,))
return returnedList
def edgePathToNodePath(self, edgePath):
""" converts a path expressed as tuple representation [(n1, n2), (n2, n3), (n3, n3)]
to a list of nodes representation: [n1, n2, n3]
"""
n1, n2 = edgePath[0]
nodePath = [n1, n2]
for e in edgePath[1:]:
n1, n2 = e
if not n1 == nodePath[-1]:
raise Exception("EdgePath is not correct")
nodePath.append(n2)
return nodePath
# ████████╗ █████╗ ██████╗ ██╗ ██╗
# ╚══██╔══╝██╔══██╗██╔══██╗██║ ██║
# ██║ ███████║██████╔╝██║ ██║
# ██║ ██╔══██║██╔══██╗██║ ██║
# ██║ ██║ ██║██████╔╝╚██████╔╝
# ╚═╝ ╚═╝ ╚═╝╚═════╝ ╚═════╝
def addPathToTabu(self, path):
""" adds given path to the tabu list with initial ttl value
"""
for i in range(len(path) - 1):
n1 = path[i]
n2 = path[i+1]
e = (n1, n2)
self.tabuList[e] = self.ttl+1
def updateTabu(self):
""" updates the tabu list: decrements all values by 1 and remove keys when such values reach 0
"""
for e in self.tabuList.copy():
if(self.tabuList[e] == 1):
del self.tabuList[e]
else:
self.tabuList[e] = self.tabuList[e] - 1
def emptyTabu(self):
""" empty the tabu list
"""
self.tabuList = {}
# ██████╗ ███████╗██████╗ ██████╗ ██████╗ ████████╗
# ██╔══██╗██╔════╝██╔══██╗██╔═══██╗██╔═══██╗╚══██╔══╝
# ██████╔╝█████╗ ██████╔╝██║ ██║██║ ██║ ██║
# ██╔══██╗██╔══╝ ██╔══██╗██║ ██║██║ ██║ ██║
# ██║ ██║███████╗██║ ██║╚██████╔╝╚██████╔╝ ██║
# ╚═╝ ╚═╝╚══════╝╚═╝ ╚═╝ ╚═════╝ ╚═════╝ ╚═╝
def reRoot(self, newRoot, oldRoot):
""" launches a reroot procedure from given oldRoot to given newRoot
"""
# when rerooting, some paths may be inverted, and thus, must change in the path priority queue
# if oldRoot is a black node and is now of degree 2, two paths must be merged into one
# Has to be done first, before inverting the edges in the tree.
if self.usePathQueue:
newRootInsideAPath = not newRoot in self.parentPaths
if newRootInsideAPath:
# need to do a split before inverting the paths up to the oldRoot
self.splitPathContainingNewRoot(newRoot, oldRoot)
# invert paths from newRoot to oldRoot
if not newRoot in self.parentPaths:
raise Exception("reRoot failed")
self.invertPathsFromNewRootToOldRoot(newRoot, oldRoot)
# attempt to connect paths together at oldRoot
self.mergePaths(oldRoot)
n1 = newRoot
parents = self.predecessors(n1)
while parents:
# parents of tree nodes can have at most 1 element
n2 = parents[0]
parents = self.predecessors(n2)
e = self[n2][n1]
self.remove_edge(n2, n1)
self.add_edge(n1, n2, e)
n1 = n2
# ██████╗ █████╗ ████████╗██╗ ██╗ ██████╗ ██╗ ██╗███████╗██╗ ██╗███████╗
# ██╔══██╗██╔══██╗╚══██╔══╝██║ ██║██╔═══██╗██║ ██║██╔════╝██║ ██║██╔════╝
# ██████╔╝███████║ ██║ ███████║██║ ██║██║ ██║█████╗ ██║ ██║█████╗
# ██╔═══╝ ██╔══██║ ██║ ██╔══██║██║▄▄ ██║██║ ██║██╔══╝ ██║ ██║██╔══╝
# ██║ ██║ ██║ ██║ ██║ ██║╚██████╔╝╚██████╔╝███████╗╚██████╔╝███████╗
# ╚═╝ ╚═╝ ╚═╝ ╚═╝ ╚═╝ ╚═╝ ╚══▀▀═╝ ╚═════╝ ╚══════╝ ╚═════╝ ╚══════╝
def invertPathsFromNewRootToOldRoot(self, newRoot, oldRoot):
""" rerooting case when a rerooting needs to be done from oldRoot to newRoot.
Invert all the paths from oldRoot to newRoot
"""
currentRoot = newRoot
toInvert = []
while currentRoot != oldRoot:
# parentPath is the path to invert
if not currentRoot in self.parentPaths:
# it means that currentRoot has a childPath that leads to the previous currentRoot,
# but is not in a path, that path has to be split
self.splitPathContainingNewRoot(currentRoot, oldRoot)
parentPathTuple = self.parentPaths[currentRoot]
toInvert.append(parentPathTuple)
# update the currentRoot to the last element of the parentPath
currentRoot = parentPathTuple[1][0]
# inversion must be done after climbing the tree
# top-down inversion to avoid messing with the parentPaths data structure
toInvert.reverse()
for t in toInvert:
self.invertPath(t)
def splitPathContainingNewRoot(self, newRoot, oldRoot):
""" transform the case when newRoot is inside a path into the simpler case when
there is a path starting and ending at newRoot
newRoot is the node where the split is to be done
"""
borderNodeFound = False
pathContainingNewRoot = None
n1 = newRoot
while not borderNodeFound:
parents = self.predecessors(n1)
if parents: # n1 has a predecessor (is not a root)
parent = parents[0]
if parent in self.childrenPaths: # the node parent has children paths
for p in self.childrenPaths[parent]:
if newRoot in p[1]: # if new root is in one of the children paths of parent
borderNodeFound = True
pathContainingNewRoot = p
break
n1 = parent
else:
# parents is empty, n1 should be oldRoot
if n1 == oldRoot:
raise Exception("Error in path splitting while rerooting, no path seems to contain newRoot")
else:
raise Exception("Error in path splitting while rerooting, a root different from oldRoot has been reached")
self.splitPathAroundNode(newRoot, pathContainingNewRoot)
def addToPathQueue(self, path):
""" pre: the path should be in the right way, that is, the first node
of the path is the one that is part of the tree component containing the root
The path should not have already been added to the tree
Called whenever a path is added to the tree
"""
pathWeight = self.NetworkGraph.getNodePathWeight(path)
pathTuple = (-pathWeight, path)
self.addTupleToPathQueue(pathTuple)
def addTupleToPathQueue(self, pathTuple):
""" adds a tuple to the path queue
each tuple contains a path and its weight (negated)
"""
n1 = pathTuple[1][0] # first node of the path
n2 = pathTuple[1][-1] # last node of the path
if n1 == n2:
print ''
print 'edges', self.edges()
print 'paths', self.pathQueue.queue
print pathTuple
raise Exception("Bad path, begins and ends with the same node. Shouldn't happen n1 == n2 %s", n1)
self.pathQueue.put(pathTuple)
self.addChildPath(pathTuple, n1)
self.addParentPath(pathTuple, n2)
def addChildPath(self, pathTuple, node):
if not node in self.childrenPaths:
self.childrenPaths[node] = [pathTuple]
else:
self.childrenPaths[node].append(pathTuple)
def addParentPath(self, pathTuple, node):
self.parentPaths[node] = pathTuple
def removeChildPath(self, pathTuple, node):
self.childrenPaths[node].remove(pathTuple)
if not self.childrenPaths[node]: # if the list becomes empty, remove the key
del self.childrenPaths[node]
def removeParentPath(self, pathTuple, node):
if not pathTuple == self.parentPaths[node]:
raise Exception('removeParentPath failed')
del self.parentPaths[node]
def removeTupleFromPathQueue(self, pathTuple, tryMerge = True):
n1 = pathTuple[1][0] # first node of the path
n2 = pathTuple[1][-1] # last node of the path
Utils.removeFromPriorityQueue(self.pathQueue, pathTuple)
self.removeChildPath(pathTuple, n1)
self.removeParentPath(pathTuple, n2)
# try to merge
if tryMerge:
self.mergePaths(n1)
self.mergePaths(n2)
def replacePaths(self, toRemove, toAdd):
for p in toRemove:
self.removeTupleFromPathQueue(p, False)
for p in toAdd:
self.addTupleToPathQueue(p)
def mergePaths(self, node):
""" attempts to merge the paths around the given node """
if (not node in self.C):
if (node in self.parentPaths) and (node in self.childrenPaths):
childrenTuples = self.childrenPaths[node]
parentTuple = self.parentPaths[node]
if (len(childrenTuples) == 1) and (parentTuple):
childTuple = childrenTuples[0] # there is only one childTuple
newPath = parentTuple[1][:]
newPath.extend(childTuple[1][1:])
newWeight = parentTuple[0]+childTuple[0]
newTuple = (newWeight, newPath)
self.replacePaths([childTuple, parentTuple], [newTuple])
def splitPathAroundNode(self, node, pathTuple, removeBotPath=False):
""" splits the path contained in pathTuple in two paths around node
The three data structures pathQueue, parentPaths and childrenPaths are updated """
weight, path = pathTuple
if not node in path:
raise Exception('A path cannot be split around a node if the node is not in the path')
nodeIndex = path.index(node)
topPath = path[:(nodeIndex+1)]
botPath = path[nodeIndex:]
topWeight = -self.NetworkGraph.getNodePathWeight(topPath)
botWeight = weight - topWeight
topTuple = (topWeight, topPath)
botTuple = (botWeight, botPath)
if removeBotPath:
self.replacePaths([pathTuple], [topTuple])
else:
self.replacePaths([pathTuple], [topTuple, botTuple])
def invertPath(self, pathTuple):
""" for given pathTuple (weight, path), invert all of its edges
(childrenPaths and parentPaths data structures are updated in
subsequent calls to add/removeTupleFrom/ToPathQueue) """
pWeight, path = pathTuple
oldRoot, newRoot = path[0], path[-1]
reversedPath = path[:]
reversedPath.reverse()
newPathTuple = (pWeight, reversedPath)
self.replacePaths([pathTuple], [newPathTuple])
# ███████╗██╗███╗ ███╗██╗ ██╗██╗ █████╗ ████████╗███████╗██████╗
# ██╔════╝██║████╗ ████║██║ ██║██║ ██╔══██╗╚══██╔══╝██╔════╝██╔══██╗
# ███████╗██║██╔████╔██║██║ ██║██║ ███████║ ██║ █████╗ ██║ ██║
# ╚════██║██║██║╚██╔╝██║██║ ██║██║ ██╔══██║ ██║ ██╔══╝ ██║ ██║
# ███████║██║██║ ╚═╝ ██║╚██████╔╝███████╗██║ ██║ ██║ ███████╗██████╔╝
# ╚══════╝╚═╝╚═╝ ╚═╝ ╚═════╝ ╚══════╝╚═╝ ╚═╝ ╚═╝ ╚══════╝╚═════╝
# █████╗ ███╗ ██╗███╗ ██╗███████╗ █████╗ ██╗ ██╗███╗ ██╗ ██████╗
# ██╔══██╗████╗ ██║████╗ ██║██╔════╝██╔══██╗██║ ██║████╗ ██║██╔════╝
# ███████║██╔██╗ ██║██╔██╗ ██║█████╗ ███████║██║ ██║██╔██╗ ██║██║ ███╗
# ██╔══██║██║╚██╗██║██║╚██╗██║██╔══╝ ██╔══██║██║ ██║██║╚██╗██║██║ ██║
# ██║ ██║██║ ╚████║██║ ╚████║███████╗██║ ██║███████╗██║██║ ╚████║╚██████╔╝
# ╚═╝ ╚═╝╚═╝ ╚═══╝╚═╝ ╚═══╝╚══════╝╚═╝ ╚═╝╚══════╝╚═╝╚═╝ ╚═══╝ ╚═════╝
def evaluateSAProbability(self, oldWeight, newWeight, temperature):
"""
returns True if we want to replace paths according to the temperature and their weights
If newWeight is higher than oldWeight (this corresponds to a degradation),
return True with probability exp( -(newWeight-oldWeight) / temperature)
else return False
If newWeight is lower than oldWeight (this corresponds to an improvement),
return True
"""
delta = 100*(newWeight - oldWeight)/(float(newWeight))
if delta > 0:
# degrading
temperature = float(temperature) # ensure we don't divide by an integer
if temperature == 0.0:
# do not degrade when temperature is zero
return False
val = math.exp(-delta/temperature)
r = random.random()
if r < val:
# print when a degradation is accepted
# from __future__ import print_function
# sys.stdout.write('|')
# print '|',
return True
else:
return False
else:
# improving
return True
# ██╗ ██╗ █████╗ ██╗ ██╗██████╗ █████╗ ████████╗██╗ ██████╗ ███╗ ██╗
# ██║ ██║██╔══██╗██║ ██║██╔══██╗██╔══██╗╚══██╔══╝██║██╔═══██╗████╗ ██║
# ██║ ██║███████║██║ ██║██║ ██║███████║ ██║ ██║██║ ██║██╔██╗ ██║
# ╚██╗ ██╔╝██╔══██║██║ ██║██║ ██║██╔══██║ ██║ ██║██║ ██║██║╚██╗██║
# ╚████╔╝ ██║ ██║███████╗██║██████╔╝██║ ██║ ██║ ██║╚██████╔╝██║ ╚████║
# ╚═══╝ ╚═╝ ╚═╝╚══════╝╚═╝╚═════╝ ╚═╝ ╚═╝ ╚═╝ ╚═╝ ╚═════╝ ╚═╝ ╚═══╝
def validate(self):
""" validates that self is a valid multicast service with respect to the inner clients set """
treeNodes = dag.descendants(self, self.root)
treeNodes.add(self.root)
# the tree rooted at self.root is the only component of the multicasttree 'self'
assert set(self.nodes()) == treeNodes
# every client of the multicast group is a node of the multicasttree 'self'
assert self.C.issubset(treeNodes)
# there is no loop in the tree
assert len(self.nodes()) == len(self.edges()) + 1
def validatePIMTree(self):
""" validates that self follows the PIM's shortest path-based way of building multicast trees """
log.debug("ensurePIMTree")
T = self
NG = T.NetworkGraph
clients = T.C
root = T.root
# compute PIM tree edges
shortestPaths = [NG.ShortestPaths[root][c][0] for c in clients]
log.debug("shortestPaths %s" % shortestPaths)
PIMTreeEdgesSet = set()
for nodesPath in shortestPaths:
edgesPath = T.nodePathToEdgePath(nodesPath)
PIMTreeEdgesSet |= set(edgesPath)
log.debug("PIMTreeEdgesSet %s" % PIMTreeEdgesSet)
# this tree edges
treeEdgesSet = set(T.edges())
diff = PIMTreeEdgesSet ^ treeEdgesSet
log.debug("diff %s" % diff)
if diff:
raise Exception("the given tree does not follow the PIM mode for tree building!")
|
gpl-2.0
|
spallavolu/scikit-learn
|
examples/linear_model/plot_ridge_path.py
|
254
|
1655
|
"""
===========================================================
Plot Ridge coefficients as a function of the regularization
===========================================================
Shows the effect of collinearity in the coefficients of an estimator.
.. currentmodule:: sklearn.linear_model
:class:`Ridge` Regression is the estimator used in this example.
Each color represents a different feature of the
coefficient vector, and this is displayed as a function of the
regularization parameter.
At the end of the path, as alpha tends toward zero
and the solution tends towards the ordinary least squares, coefficients
exhibit big oscillations.
"""
# Author: Fabian Pedregosa -- <[email protected]>
# License: BSD 3 clause
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from sklearn import linear_model
# X is the 10x10 Hilbert matrix
X = 1. / (np.arange(1, 11) + np.arange(0, 10)[:, np.newaxis])
y = np.ones(10)
###############################################################################
# Compute paths
n_alphas = 200
alphas = np.logspace(-10, -2, n_alphas)
clf = linear_model.Ridge(fit_intercept=False)
coefs = []
for a in alphas:
clf.set_params(alpha=a)
clf.fit(X, y)
coefs.append(clf.coef_)
###############################################################################
# Display results
ax = plt.gca()
ax.set_color_cycle(['b', 'r', 'g', 'c', 'k', 'y', 'm'])
ax.plot(alphas, coefs)
ax.set_xscale('log')
ax.set_xlim(ax.get_xlim()[::-1]) # reverse axis
plt.xlabel('alpha')
plt.ylabel('weights')
plt.title('Ridge coefficients as a function of the regularization')
plt.axis('tight')
plt.show()
|
bsd-3-clause
|
phobson/statsmodels
|
statsmodels/base/tests/test_data.py
|
5
|
35116
|
import numpy as np
from numpy.testing import assert_equal, assert_, assert_raises
import pandas
import pandas.util.testing as ptesting
from statsmodels.base import data as sm_data
from statsmodels.formula import handle_formula_data
#class TestDates(object):
# @classmethod
# def setupClass(cls):
# nrows = 10
# cls.dates_result = cls.dates_results = np.random.random(nrows)
#
# def test_dates(self):
# np.testing.assert_equal(data.wrap_output(self.dates_input, 'dates'),
# self.dates_result)
class TestArrays(object):
@classmethod
def setupClass(cls):
cls.endog = np.random.random(10)
cls.exog = np.c_[np.ones(10), np.random.random((10,2))]
cls.data = sm_data.handle_data(cls.endog, cls.exog)
nrows = 10
nvars = 3
cls.col_result = cls.col_input = np.random.random(nvars)
cls.row_result = cls.row_input = np.random.random(nrows)
cls.cov_result = cls.cov_input = np.random.random((nvars, nvars))
cls.xnames = ['const', 'x1', 'x2']
cls.ynames = 'y'
cls.row_labels = None
def test_orig(self):
np.testing.assert_equal(self.data.orig_endog, self.endog)
np.testing.assert_equal(self.data.orig_exog, self.exog)
def test_endogexog(self):
np.testing.assert_equal(self.data.endog, self.endog)
np.testing.assert_equal(self.data.exog, self.exog)
def test_attach(self):
data = self.data
# this makes sure what the wrappers need work but not the wrapped
# results themselves
np.testing.assert_equal(data.wrap_output(self.col_input, 'columns'),
self.col_result)
np.testing.assert_equal(data.wrap_output(self.row_input, 'rows'),
self.row_result)
np.testing.assert_equal(data.wrap_output(self.cov_input, 'cov'),
self.cov_result)
def test_names(self):
data = self.data
np.testing.assert_equal(data.xnames, self.xnames)
np.testing.assert_equal(data.ynames, self.ynames)
def test_labels(self):
#HACK: because numpy master after NA stuff assert_equal fails on
# pandas indices
np.testing.assert_(np.all(self.data.row_labels == self.row_labels))
class TestArrays2dEndog(TestArrays):
@classmethod
def setupClass(cls):
super(TestArrays2dEndog, cls).setupClass()
cls.endog = np.random.random((10,1))
cls.exog = np.c_[np.ones(10), np.random.random((10,2))]
cls.data = sm_data.handle_data(cls.endog, cls.exog)
#cls.endog = endog.squeeze()
def test_endogexog(self):
np.testing.assert_equal(self.data.endog, self.endog.squeeze())
np.testing.assert_equal(self.data.exog, self.exog)
class TestArrays1dExog(TestArrays):
@classmethod
def setupClass(cls):
super(TestArrays1dExog, cls).setupClass()
cls.endog = np.random.random(10)
exog = np.random.random(10)
cls.data = sm_data.handle_data(cls.endog, exog)
cls.exog = exog[:,None]
cls.xnames = ['x1']
cls.ynames = 'y'
def test_orig(self):
np.testing.assert_equal(self.data.orig_endog, self.endog)
np.testing.assert_equal(self.data.orig_exog, self.exog.squeeze())
class TestDataFrames(TestArrays):
@classmethod
def setupClass(cls):
cls.endog = pandas.DataFrame(np.random.random(10), columns=['y_1'])
exog = pandas.DataFrame(np.random.random((10,2)),
columns=['x_1','x_2'])
exog.insert(0, 'const', 1)
cls.exog = exog
cls.data = sm_data.handle_data(cls.endog, cls.exog)
nrows = 10
nvars = 3
cls.col_input = np.random.random(nvars)
cls.col_result = pandas.Series(cls.col_input,
index=exog.columns)
cls.row_input = np.random.random(nrows)
cls.row_result = pandas.Series(cls.row_input,
index=exog.index)
cls.cov_input = np.random.random((nvars, nvars))
cls.cov_result = pandas.DataFrame(cls.cov_input,
index = exog.columns,
columns = exog.columns)
cls.xnames = ['const', 'x_1', 'x_2']
cls.ynames = 'y_1'
cls.row_labels = cls.exog.index
def test_orig(self):
ptesting.assert_frame_equal(self.data.orig_endog, self.endog)
ptesting.assert_frame_equal(self.data.orig_exog, self.exog)
def test_endogexog(self):
np.testing.assert_equal(self.data.endog, self.endog.values.squeeze())
np.testing.assert_equal(self.data.exog, self.exog.values)
def test_attach(self):
data = self.data
# this makes sure what the wrappers need work but not the wrapped
# results themselves
ptesting.assert_series_equal(data.wrap_output(self.col_input,
'columns'),
self.col_result)
ptesting.assert_series_equal(data.wrap_output(self.row_input, 'rows'),
self.row_result)
ptesting.assert_frame_equal(data.wrap_output(self.cov_input, 'cov'),
self.cov_result)
class TestLists(TestArrays):
@classmethod
def setupClass(cls):
super(TestLists, cls).setupClass()
cls.endog = np.random.random(10).tolist()
cls.exog = np.c_[np.ones(10), np.random.random((10,2))].tolist()
cls.data = sm_data.handle_data(cls.endog, cls.exog)
class TestRecarrays(TestArrays):
@classmethod
def setupClass(cls):
super(TestRecarrays, cls).setupClass()
cls.endog = np.random.random(9).view([('y_1',
'f8')]).view(np.recarray)
exog = np.random.random(9*3).view([('const', 'f8'),('x_1', 'f8'),
('x_2', 'f8')]).view(np.recarray)
exog['const'] = 1
cls.exog = exog
cls.data = sm_data.handle_data(cls.endog, cls.exog)
cls.xnames = ['const', 'x_1', 'x_2']
cls.ynames = 'y_1'
def test_endogexog(self):
np.testing.assert_equal(self.data.endog, self.endog.view(float, type=np.ndarray))
np.testing.assert_equal(self.data.exog, self.exog.view((float, 3), type=np.ndarray))
class TestStructarrays(TestArrays):
@classmethod
def setupClass(cls):
super(TestStructarrays, cls).setupClass()
cls.endog = np.random.random(9).view([('y_1',
'f8')]).view(np.recarray)
exog = np.random.random(9*3).view([('const', 'f8'),('x_1', 'f8'),
('x_2', 'f8')]).view(np.recarray)
exog['const'] = 1
cls.exog = exog
cls.data = sm_data.handle_data(cls.endog, cls.exog)
cls.xnames = ['const', 'x_1', 'x_2']
cls.ynames = 'y_1'
def test_endogexog(self):
np.testing.assert_equal(self.data.endog, self.endog.view(float, type=np.ndarray))
np.testing.assert_equal(self.data.exog, self.exog.view((float,3), type=np.ndarray))
class TestListDataFrame(TestDataFrames):
@classmethod
def setupClass(cls):
cls.endog = np.random.random(10).tolist()
exog = pandas.DataFrame(np.random.random((10,2)),
columns=['x_1','x_2'])
exog.insert(0, 'const', 1)
cls.exog = exog
cls.data = sm_data.handle_data(cls.endog, cls.exog)
nrows = 10
nvars = 3
cls.col_input = np.random.random(nvars)
cls.col_result = pandas.Series(cls.col_input,
index=exog.columns)
cls.row_input = np.random.random(nrows)
cls.row_result = pandas.Series(cls.row_input,
index=exog.index)
cls.cov_input = np.random.random((nvars, nvars))
cls.cov_result = pandas.DataFrame(cls.cov_input,
index = exog.columns,
columns = exog.columns)
cls.xnames = ['const', 'x_1', 'x_2']
cls.ynames = 'y'
cls.row_labels = cls.exog.index
def test_endogexog(self):
np.testing.assert_equal(self.data.endog, self.endog)
np.testing.assert_equal(self.data.exog, self.exog.values)
def test_orig(self):
np.testing.assert_equal(self.data.orig_endog, self.endog)
ptesting.assert_frame_equal(self.data.orig_exog, self.exog)
class TestDataFrameList(TestDataFrames):
@classmethod
def setupClass(cls):
cls.endog = pandas.DataFrame(np.random.random(10), columns=['y_1'])
exog = pandas.DataFrame(np.random.random((10,2)),
columns=['x1','x2'])
exog.insert(0, 'const', 1)
cls.exog = exog.values.tolist()
cls.data = sm_data.handle_data(cls.endog, cls.exog)
nrows = 10
nvars = 3
cls.col_input = np.random.random(nvars)
cls.col_result = pandas.Series(cls.col_input,
index=exog.columns)
cls.row_input = np.random.random(nrows)
cls.row_result = pandas.Series(cls.row_input,
index=exog.index)
cls.cov_input = np.random.random((nvars, nvars))
cls.cov_result = pandas.DataFrame(cls.cov_input,
index = exog.columns,
columns = exog.columns)
cls.xnames = ['const', 'x1', 'x2']
cls.ynames = 'y_1'
cls.row_labels = cls.endog.index
def test_endogexog(self):
np.testing.assert_equal(self.data.endog, self.endog.values.squeeze())
np.testing.assert_equal(self.data.exog, self.exog)
def test_orig(self):
ptesting.assert_frame_equal(self.data.orig_endog, self.endog)
np.testing.assert_equal(self.data.orig_exog, self.exog)
class TestArrayDataFrame(TestDataFrames):
@classmethod
def setupClass(cls):
cls.endog = np.random.random(10)
exog = pandas.DataFrame(np.random.random((10,2)),
columns=['x_1','x_2'])
exog.insert(0, 'const', 1)
cls.exog = exog
cls.data = sm_data.handle_data(cls.endog, exog)
nrows = 10
nvars = 3
cls.col_input = np.random.random(nvars)
cls.col_result = pandas.Series(cls.col_input,
index=exog.columns)
cls.row_input = np.random.random(nrows)
cls.row_result = pandas.Series(cls.row_input,
index=exog.index)
cls.cov_input = np.random.random((nvars, nvars))
cls.cov_result = pandas.DataFrame(cls.cov_input,
index = exog.columns,
columns = exog.columns)
cls.xnames = ['const', 'x_1', 'x_2']
cls.ynames = 'y'
cls.row_labels = cls.exog.index
def test_endogexog(self):
np.testing.assert_equal(self.data.endog, self.endog)
np.testing.assert_equal(self.data.exog, self.exog.values)
def test_orig(self):
np.testing.assert_equal(self.data.orig_endog, self.endog)
ptesting.assert_frame_equal(self.data.orig_exog, self.exog)
class TestDataFrameArray(TestDataFrames):
@classmethod
def setupClass(cls):
cls.endog = pandas.DataFrame(np.random.random(10), columns=['y_1'])
exog = pandas.DataFrame(np.random.random((10,2)),
columns=['x1','x2']) # names mimic defaults
exog.insert(0, 'const', 1)
cls.exog = exog.values
cls.data = sm_data.handle_data(cls.endog, cls.exog)
nrows = 10
nvars = 3
cls.col_input = np.random.random(nvars)
cls.col_result = pandas.Series(cls.col_input,
index=exog.columns)
cls.row_input = np.random.random(nrows)
cls.row_result = pandas.Series(cls.row_input,
index=exog.index)
cls.cov_input = np.random.random((nvars, nvars))
cls.cov_result = pandas.DataFrame(cls.cov_input,
index = exog.columns,
columns = exog.columns)
cls.xnames = ['const', 'x1', 'x2']
cls.ynames = 'y_1'
cls.row_labels = cls.endog.index
def test_endogexog(self):
np.testing.assert_equal(self.data.endog, self.endog.values.squeeze())
np.testing.assert_equal(self.data.exog, self.exog)
def test_orig(self):
ptesting.assert_frame_equal(self.data.orig_endog, self.endog)
np.testing.assert_equal(self.data.orig_exog, self.exog)
class TestSeriesDataFrame(TestDataFrames):
@classmethod
def setupClass(cls):
cls.endog = pandas.Series(np.random.random(10), name='y_1')
exog = pandas.DataFrame(np.random.random((10,2)),
columns=['x_1','x_2'])
exog.insert(0, 'const', 1)
cls.exog = exog
cls.data = sm_data.handle_data(cls.endog, cls.exog)
nrows = 10
nvars = 3
cls.col_input = np.random.random(nvars)
cls.col_result = pandas.Series(cls.col_input,
index=exog.columns)
cls.row_input = np.random.random(nrows)
cls.row_result = pandas.Series(cls.row_input,
index=exog.index)
cls.cov_input = np.random.random((nvars, nvars))
cls.cov_result = pandas.DataFrame(cls.cov_input,
index = exog.columns,
columns = exog.columns)
cls.xnames = ['const', 'x_1', 'x_2']
cls.ynames = 'y_1'
cls.row_labels = cls.exog.index
def test_orig(self):
ptesting.assert_series_equal(self.data.orig_endog, self.endog)
ptesting.assert_frame_equal(self.data.orig_exog, self.exog)
class TestSeriesSeries(TestDataFrames):
@classmethod
def setupClass(cls):
cls.endog = pandas.Series(np.random.random(10), name='y_1')
exog = pandas.Series(np.random.random(10), name='x_1')
cls.exog = exog
cls.data = sm_data.handle_data(cls.endog, cls.exog)
nrows = 10
nvars = 1
cls.col_input = np.random.random(nvars)
cls.col_result = pandas.Series(cls.col_input,
index = [exog.name])
cls.row_input = np.random.random(nrows)
cls.row_result = pandas.Series(cls.row_input,
index = exog.index)
cls.cov_input = np.random.random((nvars, nvars))
cls.cov_result = pandas.DataFrame(cls.cov_input,
index = [exog.name],
columns = [exog.name])
cls.xnames = ['x_1']
cls.ynames = 'y_1'
cls.row_labels = cls.exog.index
def test_orig(self):
ptesting.assert_series_equal(self.data.orig_endog, self.endog)
ptesting.assert_series_equal(self.data.orig_exog, self.exog)
def test_endogexog(self):
np.testing.assert_equal(self.data.endog, self.endog.values.squeeze())
np.testing.assert_equal(self.data.exog, self.exog.values[:,None])
def test_alignment():
#Fix Issue #206
from statsmodels.regression.linear_model import OLS
from statsmodels.datasets.macrodata import load_pandas
d = load_pandas().data
#growth rates
gs_l_realinv = 400 * np.log(d['realinv']).diff().dropna()
gs_l_realgdp = 400 * np.log(d['realgdp']).diff().dropna()
lint = d['realint'][:-1] # incorrect indexing for test purposes
endog = gs_l_realinv
# re-index because they won't conform to lint
realgdp = gs_l_realgdp.reindex(lint.index, method='bfill')
data = dict(const=np.ones_like(lint), lrealgdp=realgdp, lint=lint)
exog = pandas.DataFrame(data)
# which index do we get??
np.testing.assert_raises(ValueError, OLS, *(endog, exog))
class TestMultipleEqsArrays(TestArrays):
@classmethod
def setupClass(cls):
cls.endog = np.random.random((10,4))
cls.exog = np.c_[np.ones(10), np.random.random((10,2))]
cls.data = sm_data.handle_data(cls.endog, cls.exog)
nrows = 10
nvars = 3
neqs = 4
cls.col_result = cls.col_input = np.random.random(nvars)
cls.row_result = cls.row_input = np.random.random(nrows)
cls.cov_result = cls.cov_input = np.random.random((nvars, nvars))
cls.cov_eq_result = cls.cov_eq_input = np.random.random((neqs,neqs))
cls.col_eq_result = cls.col_eq_input = np.array((neqs, nvars))
cls.xnames = ['const', 'x1', 'x2']
cls.ynames = ['y1', 'y2', 'y3', 'y4']
cls.row_labels = None
def test_attach(self):
data = self.data
# this makes sure what the wrappers need work but not the wrapped
# results themselves
np.testing.assert_equal(data.wrap_output(self.col_input, 'columns'),
self.col_result)
np.testing.assert_equal(data.wrap_output(self.row_input, 'rows'),
self.row_result)
np.testing.assert_equal(data.wrap_output(self.cov_input, 'cov'),
self.cov_result)
np.testing.assert_equal(data.wrap_output(self.cov_eq_input, 'cov_eq'),
self.cov_eq_result)
np.testing.assert_equal(data.wrap_output(self.col_eq_input,
'columns_eq'),
self.col_eq_result)
class TestMultipleEqsDataFrames(TestDataFrames):
@classmethod
def setupClass(cls):
cls.endog = endog = pandas.DataFrame(np.random.random((10,4)),
columns=['y_1', 'y_2', 'y_3', 'y_4'])
exog = pandas.DataFrame(np.random.random((10,2)),
columns=['x_1','x_2'])
exog.insert(0, 'const', 1)
cls.exog = exog
cls.data = sm_data.handle_data(cls.endog, cls.exog)
nrows = 10
nvars = 3
neqs = 4
cls.col_input = np.random.random(nvars)
cls.col_result = pandas.Series(cls.col_input,
index=exog.columns)
cls.row_input = np.random.random(nrows)
cls.row_result = pandas.Series(cls.row_input,
index=exog.index)
cls.cov_input = np.random.random((nvars, nvars))
cls.cov_result = pandas.DataFrame(cls.cov_input,
index = exog.columns,
columns = exog.columns)
cls.cov_eq_input = np.random.random((neqs, neqs))
cls.cov_eq_result = pandas.DataFrame(cls.cov_eq_input,
index=endog.columns,
columns=endog.columns)
cls.col_eq_input = np.random.random((nvars, neqs))
cls.col_eq_result = pandas.DataFrame(cls.col_eq_input,
index=exog.columns,
columns=endog.columns)
cls.xnames = ['const', 'x_1', 'x_2']
cls.ynames = ['y_1', 'y_2', 'y_3', 'y_4']
cls.row_labels = cls.exog.index
def test_attach(self):
data = self.data
ptesting.assert_series_equal(data.wrap_output(self.col_input,
'columns'),
self.col_result)
ptesting.assert_series_equal(data.wrap_output(self.row_input, 'rows'),
self.row_result)
ptesting.assert_frame_equal(data.wrap_output(self.cov_input, 'cov'),
self.cov_result)
ptesting.assert_frame_equal(data.wrap_output(self.cov_eq_input,
'cov_eq'),
self.cov_eq_result)
ptesting.assert_frame_equal(data.wrap_output(self.col_eq_input,
'columns_eq'),
self.col_eq_result)
class TestMissingArray(object):
@classmethod
def setupClass(cls):
X = np.random.random((25,4))
y = np.random.random(25)
y[10] = np.nan
X[2,3] = np.nan
X[14,2] = np.nan
cls.y, cls.X = y, X
def test_raise_no_missing(self):
# smoke test for #1700
sm_data.handle_data(np.random.random(20), np.random.random((20, 2)),
'raise')
def test_raise(self):
np.testing.assert_raises(Exception, sm_data.handle_data,
(self.y, self.X, 'raise'))
def test_drop(self):
y = self.y
X = self.X
combined = np.c_[y, X]
idx = ~np.isnan(combined).any(axis=1)
y = y[idx]
X = X[idx]
data = sm_data.handle_data(self.y, self.X, 'drop')
np.testing.assert_array_equal(data.endog, y)
np.testing.assert_array_equal(data.exog, X)
def test_none(self):
data = sm_data.handle_data(self.y, self.X, 'none', hasconst=False)
np.testing.assert_array_equal(data.endog, self.y)
np.testing.assert_array_equal(data.exog, self.X)
def test_endog_only_raise(self):
np.testing.assert_raises(Exception, sm_data.handle_data,
(self.y, None, 'raise'))
def test_endog_only_drop(self):
y = self.y
y = y[~np.isnan(y)]
data = sm_data.handle_data(self.y, None, 'drop')
np.testing.assert_array_equal(data.endog, y)
def test_mv_endog(self):
y = self.X
y = y[~np.isnan(y).any(axis=1)]
data = sm_data.handle_data(self.X, None, 'drop')
np.testing.assert_array_equal(data.endog, y)
def test_extra_kwargs_2d(self):
sigma = np.random.random((25, 25))
sigma = sigma + sigma.T - np.diag(np.diag(sigma))
data = sm_data.handle_data(self.y, self.X, 'drop', sigma=sigma)
idx = ~np.isnan(np.c_[self.y, self.X]).any(axis=1)
sigma = sigma[idx][:,idx]
np.testing.assert_array_equal(data.sigma, sigma)
def test_extra_kwargs_1d(self):
weights = np.random.random(25)
data = sm_data.handle_data(self.y, self.X, 'drop', weights=weights)
idx = ~np.isnan(np.c_[self.y, self.X]).any(axis=1)
weights = weights[idx]
np.testing.assert_array_equal(data.weights, weights)
class TestMissingPandas(object):
@classmethod
def setupClass(cls):
X = np.random.random((25,4))
y = np.random.random(25)
y[10] = np.nan
X[2,3] = np.nan
X[14,2] = np.nan
cls.y, cls.X = pandas.Series(y), pandas.DataFrame(X)
def test_raise_no_missing(self):
# smoke test for #1700
sm_data.handle_data(pandas.Series(np.random.random(20)),
pandas.DataFrame(np.random.random((20, 2))),
'raise')
def test_raise(self):
np.testing.assert_raises(Exception, sm_data.handle_data,
(self.y, self.X, 'raise'))
def test_drop(self):
y = self.y
X = self.X
combined = np.c_[y, X]
idx = ~np.isnan(combined).any(axis=1)
y = y.ix[idx]
X = X.ix[idx]
data = sm_data.handle_data(self.y, self.X, 'drop')
np.testing.assert_array_equal(data.endog, y.values)
ptesting.assert_series_equal(data.orig_endog, self.y.ix[idx])
np.testing.assert_array_equal(data.exog, X.values)
ptesting.assert_frame_equal(data.orig_exog, self.X.ix[idx])
def test_none(self):
data = sm_data.handle_data(self.y, self.X, 'none', hasconst=False)
np.testing.assert_array_equal(data.endog, self.y.values)
np.testing.assert_array_equal(data.exog, self.X.values)
def test_endog_only_raise(self):
np.testing.assert_raises(Exception, sm_data.handle_data,
(self.y, None, 'raise'))
def test_endog_only_drop(self):
y = self.y
y = y.dropna()
data = sm_data.handle_data(self.y, None, 'drop')
np.testing.assert_array_equal(data.endog, y.values)
def test_mv_endog(self):
y = self.X
y = y.ix[~np.isnan(y.values).any(axis=1)]
data = sm_data.handle_data(self.X, None, 'drop')
np.testing.assert_array_equal(data.endog, y.values)
def test_labels(self):
2, 10, 14
labels = pandas.Index([0, 1, 3, 4, 5, 6, 7, 8, 9, 11, 12, 13, 15,
16, 17, 18, 19, 20, 21, 22, 23, 24])
data = sm_data.handle_data(self.y, self.X, 'drop')
np.testing.assert_(data.row_labels.equals(labels))
class TestConstant(object):
@classmethod
def setupClass(cls):
from statsmodels.datasets.longley import load_pandas
cls.data = load_pandas()
def test_array_constant(self):
exog = self.data.exog.copy()
exog['const'] = 1
data = sm_data.handle_data(self.data.endog.values, exog.values)
np.testing.assert_equal(data.k_constant, 1)
np.testing.assert_equal(data.const_idx, 6)
def test_pandas_constant(self):
exog = self.data.exog.copy()
exog['const'] = 1
data = sm_data.handle_data(self.data.endog, exog)
np.testing.assert_equal(data.k_constant, 1)
np.testing.assert_equal(data.const_idx, 6)
def test_pandas_noconstant(self):
exog = self.data.exog.copy()
data = sm_data.handle_data(self.data.endog, exog)
np.testing.assert_equal(data.k_constant, 0)
np.testing.assert_equal(data.const_idx, None)
def test_array_noconstant(self):
exog = self.data.exog.copy()
data = sm_data.handle_data(self.data.endog.values, exog.values)
np.testing.assert_equal(data.k_constant, 0)
np.testing.assert_equal(data.const_idx, None)
class TestHandleMissing(object):
def test_pandas(self):
df = ptesting.makeDataFrame()
df.values[[2, 5, 10], [2, 3, 1]] = np.nan
y, X = df[df.columns[0]], df[df.columns[1:]]
data, _ = sm_data.handle_missing(y, X, missing='drop')
df = df.dropna()
y_exp, X_exp = df[df.columns[0]], df[df.columns[1:]]
ptesting.assert_frame_equal(data['exog'], X_exp)
ptesting.assert_series_equal(data['endog'], y_exp)
def test_arrays(self):
arr = np.random.randn(20, 4)
arr[[2, 5, 10], [2, 3, 1]] = np.nan
y, X = arr[:,0], arr[:,1:]
data, _ = sm_data.handle_missing(y, X, missing='drop')
bools_mask = np.ones(20, dtype=bool)
bools_mask[[2, 5, 10]] = False
y_exp = arr[bools_mask, 0]
X_exp = arr[bools_mask, 1:]
np.testing.assert_array_equal(data['endog'], y_exp)
np.testing.assert_array_equal(data['exog'], X_exp)
def test_pandas_array(self):
df = ptesting.makeDataFrame()
df.values[[2, 5, 10], [2, 3, 1]] = np.nan
y, X = df[df.columns[0]], df[df.columns[1:]].values
data, _ = sm_data.handle_missing(y, X, missing='drop')
df = df.dropna()
y_exp, X_exp = df[df.columns[0]], df[df.columns[1:]].values
np.testing.assert_array_equal(data['exog'], X_exp)
ptesting.assert_series_equal(data['endog'], y_exp)
def test_array_pandas(self):
df = ptesting.makeDataFrame()
df.values[[2, 5, 10], [2, 3, 1]] = np.nan
y, X = df[df.columns[0]].values, df[df.columns[1:]]
data, _ = sm_data.handle_missing(y, X, missing='drop')
df = df.dropna()
y_exp, X_exp = df[df.columns[0]].values, df[df.columns[1:]]
ptesting.assert_frame_equal(data['exog'], X_exp)
np.testing.assert_array_equal(data['endog'], y_exp)
def test_noop(self):
df = ptesting.makeDataFrame()
df.values[[2, 5, 10], [2, 3, 1]] = np.nan
y, X = df[df.columns[0]], df[df.columns[1:]]
data, _ = sm_data.handle_missing(y, X, missing='none')
y_exp, X_exp = df[df.columns[0]], df[df.columns[1:]]
ptesting.assert_frame_equal(data['exog'], X_exp)
ptesting.assert_series_equal(data['endog'], y_exp)
class CheckHasConstant(object):
def test_hasconst(self):
for x, result in zip(self.exogs, self.results):
mod = self.mod(self.y, x)
assert_equal(mod.k_constant, result[0]) #['k_constant'])
assert_equal(mod.data.k_constant, result[0])
if result[1] is None:
assert_(mod.data.const_idx is None)
else:
assert_equal(mod.data.const_idx, result[1])
# extra check after fit, some models raise on singular
fit_kwds = getattr(self, 'fit_kwds', {})
try:
res = mod.fit(**fit_kwds)
assert_equal(res.model.k_constant, result[0])
assert_equal(res.model.data.k_constant, result[0])
except:
pass
@classmethod
def setup_class(cls):
# create data
np.random.seed(0)
cls.y_c = np.random.randn(20)
cls.y_bin = (cls.y_c > 0).astype(int)
x1 = np.column_stack((np.ones(20), np.zeros(20)))
result1 = (1, 0)
x2 = np.column_stack((np.arange(20) < 10.5,
np.arange(20) > 10.5)).astype(float)
result2 = (1, None)
x3 = np.column_stack((np.arange(20), np.zeros(20)))
result3 = (0, None)
x4 = np.column_stack((np.arange(20), np.zeros((20, 2))))
result4 = (0, None)
x5 = np.column_stack((np.zeros(20), 0.5 * np.ones(20)))
result5 = (1, 1)
x5b = np.column_stack((np.arange(20), np.ones((20, 3))))
result5b = (1, 1)
x5c = np.column_stack((np.arange(20), np.ones((20, 3)) * [0.5, 1, 1]))
result5c = (1, 2)
# implicit and zero column
x6 = np.column_stack((np.arange(20) < 10.5,
np.arange(20) > 10.5,
np.zeros(20))).astype(float)
result6 = (1, None)
x7 = np.column_stack((np.arange(20) < 10.5,
np.arange(20) > 10.5,
np.zeros((20, 2)))).astype(float)
result7 = (1, None)
cls.exogs = (x1, x2, x3, x4, x5, x5b, x5c, x6, x7)
cls.results = (result1, result2, result3, result4, result5, result5b,
result5c, result6, result7)
class TestHasConstantOLS(CheckHasConstant):
def __init__(self):
self.setup_class() # why does nose do it properly
from statsmodels.regression.linear_model import OLS
self.mod = OLS
self.y = self.y_c
class TestHasConstantGLM(CheckHasConstant):
def __init__(self):
self.setup_class() # why does nose do it properly
from statsmodels.genmod.generalized_linear_model import GLM
from statsmodels.genmod import families
self.mod = lambda y, x : GLM(y, x, family=families.Binomial())
self.y = self.y_bin
class TestHasConstantLogit(CheckHasConstant):
def __init__(self):
self.setup_class() # why does nose do it properly
from statsmodels.discrete.discrete_model import Logit
self.mod = Logit
self.y = self.y_bin
self.fit_kwds = {'disp': False}
def test_dtype_object():
# see #880
X = np.random.random((40,2))
df = pandas.DataFrame(X)
df[2] = np.random.randint(2, size=40).astype('object')
df['constant'] = 1
y = pandas.Series(np.random.randint(2, size=40))
np.testing.assert_raises(ValueError, sm_data.handle_data, y, df)
def test_formula_missing_extra_arrays():
np.random.seed(1)
# because patsy can't turn off missing data-handling as of 0.3.0, we need
# separate tests to make sure that missing values are handled correctly
# when going through formulas
# there is a handle_formula_data step
# then there is the regular handle_data step
# see 2083
# the untested cases are endog/exog have missing. extra has missing.
# endog/exog are fine. extra has missing.
# endog/exog do or do not have missing and extra has wrong dimension
y = np.random.randn(10)
y_missing = y.copy()
y_missing[[2, 5]] = np.nan
X = np.random.randn(10)
X_missing = X.copy()
X_missing[[1, 3]] = np.nan
weights = np.random.uniform(size=10)
weights_missing = weights.copy()
weights_missing[[6]] = np.nan
weights_wrong_size = np.random.randn(12)
data = {'y': y,
'X': X,
'y_missing': y_missing,
'X_missing': X_missing,
'weights': weights,
'weights_missing': weights_missing}
data = pandas.DataFrame.from_dict(data)
data['constant'] = 1
formula = 'y_missing ~ X_missing'
((endog, exog),
missing_idx, design_info) = handle_formula_data(data, None, formula,
depth=2,
missing='drop')
kwargs = {'missing_idx': missing_idx, 'missing': 'drop',
'weights': data['weights_missing']}
model_data = sm_data.handle_data(endog, exog, **kwargs)
data_nona = data.dropna()
assert_equal(data_nona['y'].values, model_data.endog)
assert_equal(data_nona[['constant', 'X']].values, model_data.exog)
assert_equal(data_nona['weights'].values, model_data.weights)
tmp = handle_formula_data(data, None, formula, depth=2, missing='drop')
(endog, exog), missing_idx, design_info = tmp
weights_2d = np.random.randn(10, 10)
weights_2d[[8, 7], [7, 8]] = np.nan #symmetric missing values
kwargs.update({'weights': weights_2d,
'missing_idx': missing_idx})
model_data2 = sm_data.handle_data(endog, exog, **kwargs)
good_idx = [0, 4, 6, 9]
assert_equal(data.ix[good_idx, 'y'], model_data2.endog)
assert_equal(data.ix[good_idx, ['constant', 'X']], model_data2.exog)
assert_equal(weights_2d[good_idx][:, good_idx], model_data2.weights)
tmp = handle_formula_data(data, None, formula, depth=2, missing='drop')
(endog, exog), missing_idx, design_info = tmp
kwargs.update({'weights': weights_wrong_size,
'missing_idx': missing_idx})
assert_raises(ValueError, sm_data.handle_data, endog, exog, **kwargs)
if __name__ == "__main__":
import nose
#nose.runmodule(argv=[__file__, '-vvs', '-x', '--pdb', '--pdb-failure'],
# exit=False)
nose.runmodule(argv=[__file__, '-vvs', '-x'], exit=False)
|
bsd-3-clause
|
mne-tools/mne-tools.github.io
|
stable/_downloads/d1b7dbf437b3b47476964425eca7f23a/source_label_time_frequency.py
|
20
|
3702
|
"""
=========================================================
Compute power and phase lock in label of the source space
=========================================================
Compute time-frequency maps of power and phase lock in the source space.
The inverse method is linear based on dSPM inverse operator.
The example also shows the difference in the time-frequency maps
when they are computed with and without subtracting the evoked response
from each epoch. The former results in induced activity only while the
latter also includes evoked (stimulus-locked) activity.
"""
# Authors: Alexandre Gramfort <[email protected]>
#
# License: BSD (3-clause)
import numpy as np
import matplotlib.pyplot as plt
import mne
from mne import io
from mne.datasets import sample
from mne.minimum_norm import read_inverse_operator, source_induced_power
print(__doc__)
###############################################################################
# Set parameters
data_path = sample.data_path()
raw_fname = data_path + '/MEG/sample/sample_audvis_raw.fif'
fname_inv = data_path + '/MEG/sample/sample_audvis-meg-oct-6-meg-inv.fif'
label_name = 'Aud-rh'
fname_label = data_path + '/MEG/sample/labels/%s.label' % label_name
tmin, tmax, event_id = -0.2, 0.5, 2
# Setup for reading the raw data
raw = io.read_raw_fif(raw_fname)
events = mne.find_events(raw, stim_channel='STI 014')
inverse_operator = read_inverse_operator(fname_inv)
include = []
raw.info['bads'] += ['MEG 2443', 'EEG 053'] # bads + 2 more
# Picks MEG channels
picks = mne.pick_types(raw.info, meg=True, eeg=False, eog=True,
stim=False, include=include, exclude='bads')
reject = dict(grad=4000e-13, mag=4e-12, eog=150e-6)
# Load epochs
epochs = mne.Epochs(raw, events, event_id, tmin, tmax, picks=picks,
baseline=(None, 0), reject=reject,
preload=True)
# Compute a source estimate per frequency band including and excluding the
# evoked response
freqs = np.arange(7, 30, 2) # define frequencies of interest
label = mne.read_label(fname_label)
n_cycles = freqs / 3. # different number of cycle per frequency
# subtract the evoked response in order to exclude evoked activity
epochs_induced = epochs.copy().subtract_evoked()
plt.close('all')
for ii, (this_epochs, title) in enumerate(zip([epochs, epochs_induced],
['evoked + induced',
'induced only'])):
# compute the source space power and the inter-trial coherence
power, itc = source_induced_power(
this_epochs, inverse_operator, freqs, label, baseline=(-0.1, 0),
baseline_mode='percent', n_cycles=n_cycles, n_jobs=1)
power = np.mean(power, axis=0) # average over sources
itc = np.mean(itc, axis=0) # average over sources
times = epochs.times
##########################################################################
# View time-frequency plots
plt.subplots_adjust(0.1, 0.08, 0.96, 0.94, 0.2, 0.43)
plt.subplot(2, 2, 2 * ii + 1)
plt.imshow(20 * power,
extent=[times[0], times[-1], freqs[0], freqs[-1]],
aspect='auto', origin='lower', vmin=0., vmax=30., cmap='RdBu_r')
plt.xlabel('Time (s)')
plt.ylabel('Frequency (Hz)')
plt.title('Power (%s)' % title)
plt.colorbar()
plt.subplot(2, 2, 2 * ii + 2)
plt.imshow(itc,
extent=[times[0], times[-1], freqs[0], freqs[-1]],
aspect='auto', origin='lower', vmin=0, vmax=0.7,
cmap='RdBu_r')
plt.xlabel('Time (s)')
plt.ylabel('Frequency (Hz)')
plt.title('ITC (%s)' % title)
plt.colorbar()
plt.show()
|
bsd-3-clause
|
paulmueller/ODTbrain
|
examples/backprop_from_mie_3d_sphere.py
|
1
|
3811
|
r"""Mie sphere
The *in silico* data set was created with the Mie calculation software
`GMM-field`_. The data consist of a two-dimensional projection of a
sphere with radius :math:`R=14\lambda`,
refractive index :math:`n_\mathrm{sph}=1.006`,
embedded in a medium of refractive index :math:`n_\mathrm{med}=1.0`
onto a detector which is :math:`l_\mathrm{D} = 20\lambda` away from the
center of the sphere.
The package :mod:`nrefocus` must be used to numerically focus
the detected field prior to the 3D backpropagation with ODTbrain.
In :func:`odtbrain.backpropagate_3d`, the parameter `lD` must
be set to zero (:math:`l_\mathrm{D}=0`).
The figure shows the 3D reconstruction from Mie simulations of a
perfect sphere using 200 projections. Missing angle artifacts are
visible along the :math:`y`-axis due to the :math:`2\pi`-only
coverage in 3D Fourier space.
.. _`GMM-field`: https://code.google.com/p/scatterlib/wiki/Nearfield
"""
import matplotlib.pylab as plt
import nrefocus
import numpy as np
import odtbrain as odt
from example_helper import load_data
Ex, cfg = load_data("mie_3d_sphere_field.zip",
f_sino_imag="mie_sphere_imag.txt",
f_sino_real="mie_sphere_real.txt",
f_info="mie_info.txt")
# Manually set number of angles:
A = 200
print("Example: Backpropagation from 3D Mie scattering")
print("Refractive index of medium:", cfg["nm"])
print("Measurement position from object center:", cfg["lD"])
print("Wavelength sampling:", cfg["res"])
print("Number of angles for reconstruction:", A)
print("Performing backpropagation.")
# Reconstruction angles
angles = np.linspace(0, 2 * np.pi, A, endpoint=False)
# Perform focusing
Ex = nrefocus.refocus(Ex,
d=-cfg["lD"]*cfg["res"],
nm=cfg["nm"],
res=cfg["res"],
)
# Create sinogram
u_sin = np.tile(Ex.flat, A).reshape(A, int(cfg["size"]), int(cfg["size"]))
# Apply the Rytov approximation
u_sinR = odt.sinogram_as_rytov(u_sin)
# Backpropagation
fR = odt.backpropagate_3d(uSin=u_sinR,
angles=angles,
res=cfg["res"],
nm=cfg["nm"],
lD=0,
padfac=2.1,
save_memory=True)
# RI computation
nR = odt.odt_to_ri(fR, cfg["res"], cfg["nm"])
# Plotting
fig, axes = plt.subplots(2, 3, figsize=(8, 5))
axes = np.array(axes).flatten()
# field
axes[0].set_title("Mie field phase")
axes[0].set_xlabel("detector x")
axes[0].set_ylabel("detector y")
axes[0].imshow(np.angle(Ex), cmap="coolwarm")
axes[1].set_title("Mie field amplitude")
axes[1].set_xlabel("detector x")
axes[1].set_ylabel("detector y")
axes[1].imshow(np.abs(Ex), cmap="gray")
# line plot
axes[2].set_title("line plots")
axes[2].set_xlabel("distance [px]")
axes[2].set_ylabel("real refractive index")
center = int(cfg["size"] / 2)
x = np.arange(cfg["size"]) - center
axes[2].plot(x, nR[:, center, center].real, label="x")
axes[2].plot(x, nR[center, center, :].real, label="z")
axes[2].plot(x, nR[center, :, center].real, label="y")
axes[2].legend(loc=4)
axes[2].set_xlim((-center, center))
dn = abs(cfg["nsph"] - cfg["nm"])
axes[2].set_ylim((cfg["nm"] - dn / 10, cfg["nsph"] + dn))
axes[2].ticklabel_format(useOffset=False)
# cross sections
axes[3].set_title("RI reconstruction\nsection at x=0")
axes[3].set_xlabel("z")
axes[3].set_ylabel("y")
axes[3].imshow(nR[center, :, :].real)
axes[4].set_title("RI reconstruction\nsection at y=0")
axes[4].set_xlabel("x")
axes[4].set_ylabel("z")
axes[4].imshow(nR[:, center, :].real)
axes[5].set_title("RI reconstruction\nsection at z=0")
axes[5].set_xlabel("y")
axes[5].set_ylabel("x")
axes[5].imshow(nR[:, :, center].real)
plt.tight_layout()
plt.show()
|
bsd-3-clause
|
pbrunet/pythran
|
doc/papers/sc2013/cython.py
|
5
|
1604
|
#!/usr/bin/env python
import numpy as np
import matplotlib.mlab as mlab
import matplotlib.pyplot as plt
import math
with file("cython.dat") as fd:
r = list()
for line in fd:
r.append(filter(None, line.split(' ')))
x = np.array(
[
[float(_[1]) for _ in r],
[float(_[2]) for _ in r],
[float(_[3]) for _ in r],
[float(_[4]) for _ in r],
]
)
y = np.zeros(x.shape)
for i,j in enumerate(x):
y[i] = j/x[2]
for i in xrange(y.shape[0]):
for j in xrange(x.shape[1]):
if math.isinf(y[i][j]):
y[i][j] = x[i][j]/x[0][j]
if math.isnan(y[i][j]):
y[i][j] = 0
fig = plt.figure(1, figsize=(8,4))
hatches = [ ' ' , 'x', ' ' , 'x' , '+' , 'x' , 'o' , 'O' , '.' , '*' ]
colors = [ 'grey' , 'grey', 'w', 'w' ]
p = [0]*len(x)
for i,j in enumerate(y):
p[i] = plt.bar(range(i,y.size+len(y)+len(y)-1, len(y) +1), j, hatch=hatches[i], color=colors[i])
#plt.xlabel('Comparison between cython and pythran')
plt.xticks(range(2,y.size+len(y)+1, len(y) +1), [_[0] for _ in r ] )
plt.ylabel('normalized execution time')
#plt.title(r'$comparaison between cython and pythran with/without parallelism$')
plt.grid(True, axis='y')
plt.legend( (p[0][0], p[1][0], p[2][0], p[3][0]), ('Pythran', 'Pythran + OMP', 'Cython', 'Cython + OMP') , bbox_to_anchor=(0., 1.02, 1., .102), loc=2, ncol=2, mode="expand", borderaxespad=0.)
plt.savefig('cython.pdf',format='pdf')
|
bsd-3-clause
|
vorwerkc/pymatgen
|
pymatgen/util/plotting.py
|
5
|
21612
|
# coding: utf-8
# Copyright (c) Pymatgen Development Team.
# Distributed under the terms of the MIT License.
"""
Utilities for generating nicer plots.
"""
import math
import numpy as np
from pymatgen.core.periodic_table import Element
def pretty_plot(width=8, height=None, plt=None, dpi=None, color_cycle=("qualitative", "Set1_9")):
"""
Provides a publication quality plot, with nice defaults for font sizes etc.
Args:
width (float): Width of plot in inches. Defaults to 8in.
height (float): Height of plot in inches. Defaults to width * golden
ratio.
plt (matplotlib.pyplot): If plt is supplied, changes will be made to an
existing plot. Otherwise, a new plot will be created.
dpi (int): Sets dot per inch for figure. Defaults to 300.
color_cycle (tuple): Set the color cycle for new plots to one of the
color sets in palettable. Defaults to a qualitative Set1_9.
Returns:
Matplotlib plot object with properly sized fonts.
"""
ticksize = int(width * 2.5)
golden_ratio = (math.sqrt(5) - 1) / 2
if not height:
height = int(width * golden_ratio)
if plt is None:
import importlib
import matplotlib.pyplot as plt
mod = importlib.import_module("palettable.colorbrewer.%s" % color_cycle[0])
colors = getattr(mod, color_cycle[1]).mpl_colors
from cycler import cycler
plt.figure(figsize=(width, height), facecolor="w", dpi=dpi)
ax = plt.gca()
ax.set_prop_cycle(cycler("color", colors))
else:
fig = plt.gcf()
fig.set_size_inches(width, height)
plt.xticks(fontsize=ticksize)
plt.yticks(fontsize=ticksize)
ax = plt.gca()
ax.set_title(ax.get_title(), size=width * 4)
labelsize = int(width * 3)
ax.set_xlabel(ax.get_xlabel(), size=labelsize)
ax.set_ylabel(ax.get_ylabel(), size=labelsize)
return plt
def pretty_plot_two_axis(
x, y1, y2, xlabel=None, y1label=None, y2label=None, width=8, height=None, dpi=300, **plot_kwargs
):
"""
Variant of pretty_plot that does a dual axis plot. Adapted from matplotlib
examples. Makes it easier to create plots with different axes.
Args:
x (np.ndarray/list): Data for x-axis.
y1 (dict/np.ndarray/list): Data for y1 axis (left). If a dict, it will
be interpreted as a {label: sequence}.
y2 (dict/np.ndarray/list): Data for y2 axis (right). If a dict, it will
be interpreted as a {label: sequence}.
xlabel (str): If not None, this will be the label for the x-axis.
y1label (str): If not None, this will be the label for the y1-axis.
y2label (str): If not None, this will be the label for the y2-axis.
width (float): Width of plot in inches. Defaults to 8in.
height (float): Height of plot in inches. Defaults to width * golden
ratio.
dpi (int): Sets dot per inch for figure. Defaults to 300.
plot_kwargs: Passthrough kwargs to matplotlib's plot method. E.g.,
linewidth, etc.
Returns:
matplotlib.pyplot
"""
# pylint: disable=E1101
import palettable.colorbrewer.diverging
colors = palettable.colorbrewer.diverging.RdYlBu_4.mpl_colors
c1 = colors[0]
c2 = colors[-1]
golden_ratio = (math.sqrt(5) - 1) / 2
if not height:
height = int(width * golden_ratio)
import matplotlib.pyplot as plt
width = 12
labelsize = int(width * 3)
ticksize = int(width * 2.5)
styles = ["-", "--", "-.", "."]
fig, ax1 = plt.subplots()
fig.set_size_inches((width, height))
if dpi:
fig.set_dpi(dpi)
if isinstance(y1, dict):
for i, (k, v) in enumerate(y1.items()):
ax1.plot(x, v, c=c1, marker="s", ls=styles[i % len(styles)], label=k, **plot_kwargs)
ax1.legend(fontsize=labelsize)
else:
ax1.plot(x, y1, c=c1, marker="s", ls="-", **plot_kwargs)
if xlabel:
ax1.set_xlabel(xlabel, fontsize=labelsize)
if y1label:
# Make the y-axis label, ticks and tick labels match the line color.
ax1.set_ylabel(y1label, color=c1, fontsize=labelsize)
ax1.tick_params("x", labelsize=ticksize)
ax1.tick_params("y", colors=c1, labelsize=ticksize)
ax2 = ax1.twinx()
if isinstance(y2, dict):
for i, (k, v) in enumerate(y2.items()):
ax2.plot(x, v, c=c2, marker="o", ls=styles[i % len(styles)], label=k)
ax2.legend(fontsize=labelsize)
else:
ax2.plot(x, y2, c=c2, marker="o", ls="-")
if y2label:
# Make the y-axis label, ticks and tick labels match the line color.
ax2.set_ylabel(y2label, color=c2, fontsize=labelsize)
ax2.tick_params("y", colors=c2, labelsize=ticksize)
return plt
def pretty_polyfit_plot(x, y, deg=1, xlabel=None, ylabel=None, **kwargs):
r"""
Convenience method to plot data with trend lines based on polynomial fit.
Args:
x: Sequence of x data.
y: Sequence of y data.
deg (int): Degree of polynomial. Defaults to 1.
xlabel (str): Label for x-axis.
ylabel (str): Label for y-axis.
\\*\\*kwargs: Keyword args passed to pretty_plot.
Returns:
matplotlib.pyplot object.
"""
plt = pretty_plot(**kwargs)
pp = np.polyfit(x, y, deg)
xp = np.linspace(min(x), max(x), 200)
plt.plot(xp, np.polyval(pp, xp), "k--", x, y, "o")
if xlabel:
plt.xlabel(xlabel)
if ylabel:
plt.ylabel(ylabel)
return plt
def periodic_table_heatmap(
elemental_data,
cbar_label="",
cbar_label_size=14,
show_plot=False,
cmap="YlOrRd",
cmap_range=None,
blank_color="grey",
value_format=None,
max_row=9,
):
"""
A static method that generates a heat map overlayed on a periodic table.
Args:
elemental_data (dict): A dictionary with the element as a key and a
value assigned to it, e.g. surface energy and frequency, etc.
Elements missing in the elemental_data will be grey by default
in the final table elemental_data={"Fe": 4.2, "O": 5.0}.
cbar_label (string): Label of the colorbar. Default is "".
cbar_label_size (float): Font size for the colorbar label. Default is 14.
cmap_range (tuple): Minimum and maximum value of the colormap scale.
If None, the colormap will autotmatically scale to the range of the
data.
show_plot (bool): Whether to show the heatmap. Default is False.
value_format (str): Formatting string to show values. If None, no value
is shown. Example: "%.4f" shows float to four decimals.
cmap (string): Color scheme of the heatmap. Default is 'YlOrRd'.
Refer to the matplotlib documentation for other options.
blank_color (string): Color assigned for the missing elements in
elemental_data. Default is "grey".
max_row (integer): Maximum number of rows of the periodic table to be
shown. Default is 9, which means the periodic table heat map covers
the first 9 rows of elements.
"""
# Convert primitive_elemental data in the form of numpy array for plotting.
if cmap_range is not None:
max_val = cmap_range[1]
min_val = cmap_range[0]
else:
max_val = max(elemental_data.values())
min_val = min(elemental_data.values())
max_row = min(max_row, 9)
if max_row <= 0:
raise ValueError("The input argument 'max_row' must be positive!")
value_table = np.empty((max_row, 18)) * np.nan
blank_value = min_val - 0.01
for el in Element:
if el.row > max_row:
continue
value = elemental_data.get(el.symbol, blank_value)
value_table[el.row - 1, el.group - 1] = value
# Initialize the plt object
import matplotlib.pyplot as plt
fig, ax = plt.subplots()
plt.gcf().set_size_inches(12, 8)
# We set nan type values to masked values (ie blank spaces)
data_mask = np.ma.masked_invalid(value_table.tolist())
heatmap = ax.pcolor(
data_mask,
cmap=cmap,
edgecolors="w",
linewidths=1,
vmin=min_val - 0.001,
vmax=max_val + 0.001,
)
cbar = fig.colorbar(heatmap)
# Grey out missing elements in input data
cbar.cmap.set_under(blank_color)
# Set the colorbar label and tick marks
cbar.set_label(cbar_label, rotation=270, labelpad=25, size=cbar_label_size)
cbar.ax.tick_params(labelsize=cbar_label_size)
# Refine and make the table look nice
ax.axis("off")
ax.invert_yaxis()
# Label each block with corresponding element and value
for i, row in enumerate(value_table):
for j, el in enumerate(row):
if not np.isnan(el):
symbol = Element.from_row_and_group(i + 1, j + 1).symbol
plt.text(
j + 0.5,
i + 0.25,
symbol,
horizontalalignment="center",
verticalalignment="center",
fontsize=14,
)
if el != blank_value and value_format is not None:
plt.text(
j + 0.5,
i + 0.5,
value_format % el,
horizontalalignment="center",
verticalalignment="center",
fontsize=10,
)
plt.tight_layout()
if show_plot:
plt.show()
return plt
def format_formula(formula):
"""
Converts str of chemical formula into
latex format for labelling purposes
Args:
formula (str): Chemical formula
"""
formatted_formula = ""
number_format = ""
for i, s in enumerate(formula):
if s.isdigit():
if not number_format:
number_format = "_{"
number_format += s
if i == len(formula) - 1:
number_format += "}"
formatted_formula += number_format
else:
if number_format:
number_format += "}"
formatted_formula += number_format
number_format = ""
formatted_formula += s
return r"$%s$" % (formatted_formula)
def van_arkel_triangle(list_of_materials, annotate=True):
"""
A static method that generates a binary van Arkel-Ketelaar triangle to
quantify the ionic, metallic and covalent character of a compound
by plotting the electronegativity difference (y) vs average (x).
See:
A.E. van Arkel, Molecules and Crystals in Inorganic Chemistry,
Interscience, New York (1956)
and
J.A.A Ketelaar, Chemical Constitution (2nd edn.), An Introduction
to the Theory of the Chemical Bond, Elsevier, New York (1958)
Args:
list_of_materials (list): A list of computed entries of binary
materials or a list of lists containing two elements (str).
annotate (bool): Whether or not to lable the points on the
triangle with reduced formula (if list of entries) or pair
of elements (if list of list of str).
"""
# F-Fr has the largest X difference. We set this
# as our top corner of the triangle (most ionic)
pt1 = np.array([(Element("F").X + Element("Fr").X) / 2, abs(Element("F").X - Element("Fr").X)])
# Cs-Fr has the lowest average X. We set this as our
# bottom left corner of the triangle (most metallic)
pt2 = np.array(
[
(Element("Cs").X + Element("Fr").X) / 2,
abs(Element("Cs").X - Element("Fr").X),
]
)
# O-F has the highest average X. We set this as our
# bottom right corner of the triangle (most covalent)
pt3 = np.array([(Element("O").X + Element("F").X) / 2, abs(Element("O").X - Element("F").X)])
# get the parameters for the lines of the triangle
d = np.array(pt1) - np.array(pt2)
slope1 = d[1] / d[0]
b1 = pt1[1] - slope1 * pt1[0]
d = pt3 - pt1
slope2 = d[1] / d[0]
b2 = pt3[1] - slope2 * pt3[0]
# Initialize the plt object
import matplotlib.pyplot as plt
# set labels and appropriate limits for plot
plt.xlim(pt2[0] - 0.45, -b2 / slope2 + 0.45)
plt.ylim(-0.45, pt1[1] + 0.45)
plt.annotate("Ionic", xy=[pt1[0] - 0.3, pt1[1] + 0.05], fontsize=20)
plt.annotate("Covalent", xy=[-b2 / slope2 - 0.65, -0.4], fontsize=20)
plt.annotate("Metallic", xy=[pt2[0] - 0.4, -0.4], fontsize=20)
plt.xlabel(r"$\frac{\chi_{A}+\chi_{B}}{2}$", fontsize=25)
plt.ylabel(r"$|\chi_{A}-\chi_{B}|$", fontsize=25)
# Set the lines of the triangle
chi_list = [el.X for el in Element]
plt.plot(
[min(chi_list), pt1[0]],
[slope1 * min(chi_list) + b1, pt1[1]],
"k-",
linewidth=3,
)
plt.plot([pt1[0], -b2 / slope2], [pt1[1], 0], "k-", linewidth=3)
plt.plot([min(chi_list), -b2 / slope2], [0, 0], "k-", linewidth=3)
plt.xticks(fontsize=15)
plt.yticks(fontsize=15)
# Shade with appropriate colors corresponding to ionic, metallci and covalent
ax = plt.gca()
# ionic filling
ax.fill_between(
[min(chi_list), pt1[0]],
[slope1 * min(chi_list) + b1, pt1[1]],
facecolor=[1, 1, 0],
zorder=-5,
edgecolor=[1, 1, 0],
)
ax.fill_between(
[pt1[0], -b2 / slope2],
[pt1[1], slope2 * min(chi_list) - b1],
facecolor=[1, 1, 0],
zorder=-5,
edgecolor=[1, 1, 0],
)
# metal filling
XPt = Element("Pt").X
ax.fill_between(
[min(chi_list), (XPt + min(chi_list)) / 2],
[0, slope1 * (XPt + min(chi_list)) / 2 + b1],
facecolor=[1, 0, 0],
zorder=-3,
alpha=0.8,
)
ax.fill_between(
[(XPt + min(chi_list)) / 2, XPt],
[slope1 * ((XPt + min(chi_list)) / 2) + b1, 0],
facecolor=[1, 0, 0],
zorder=-3,
alpha=0.8,
)
# covalent filling
ax.fill_between(
[(XPt + min(chi_list)) / 2, ((XPt + min(chi_list)) / 2 + -b2 / slope2) / 2],
[0, slope2 * (((XPt + min(chi_list)) / 2 + -b2 / slope2) / 2) + b2],
facecolor=[0, 1, 0],
zorder=-4,
alpha=0.8,
)
ax.fill_between(
[((XPt + min(chi_list)) / 2 + -b2 / slope2) / 2, -b2 / slope2],
[slope2 * (((XPt + min(chi_list)) / 2 + -b2 / slope2) / 2) + b2, 0],
facecolor=[0, 1, 0],
zorder=-4,
alpha=0.8,
)
# Label the triangle with datapoints
for entry in list_of_materials:
if type(entry).__name__ not in ["ComputedEntry", "ComputedStructureEntry"]:
X_pair = [Element(el).X for el in entry]
formatted_formula = "%s-%s" % tuple(entry)
else:
X_pair = [Element(el).X for el in entry.composition.as_dict().keys()]
formatted_formula = format_formula(entry.composition.reduced_formula)
plt.scatter(np.mean(X_pair), abs(X_pair[0] - X_pair[1]), c="b", s=100)
if annotate:
plt.annotate(
formatted_formula,
fontsize=15,
xy=[np.mean(X_pair) + 0.005, abs(X_pair[0] - X_pair[1])],
)
plt.tight_layout()
return plt
def get_ax_fig_plt(ax=None, **kwargs):
"""
Helper function used in plot functions supporting an optional Axes argument.
If ax is None, we build the `matplotlib` figure and create the Axes else
we return the current active figure.
Args:
kwargs: keyword arguments are passed to plt.figure if ax is not None.
Returns:
ax: :class:`Axes` object
figure: matplotlib figure
plt: matplotlib pyplot module.
"""
import matplotlib.pyplot as plt
if ax is None:
fig = plt.figure(**kwargs)
ax = fig.add_subplot(1, 1, 1)
else:
fig = plt.gcf()
return ax, fig, plt
def get_ax3d_fig_plt(ax=None, **kwargs):
"""
Helper function used in plot functions supporting an optional Axes3D
argument. If ax is None, we build the `matplotlib` figure and create the
Axes3D else we return the current active figure.
Args:
kwargs: keyword arguments are passed to plt.figure if ax is not None.
Returns:
ax: :class:`Axes` object
figure: matplotlib figure
plt: matplotlib pyplot module.
"""
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import axes3d
if ax is None:
fig = plt.figure(**kwargs)
ax = axes3d.Axes3D(fig)
else:
fig = plt.gcf()
return ax, fig, plt
def get_axarray_fig_plt(
ax_array, nrows=1, ncols=1, sharex=False, sharey=False, squeeze=True, subplot_kw=None, gridspec_kw=None, **fig_kw
):
"""
Helper function used in plot functions that accept an optional array of Axes
as argument. If ax_array is None, we build the `matplotlib` figure and
create the array of Axes by calling plt.subplots else we return the
current active figure.
Returns:
ax: Array of :class:`Axes` objects
figure: matplotlib figure
plt: matplotlib pyplot module.
"""
import matplotlib.pyplot as plt
if ax_array is None:
fig, ax_array = plt.subplots(
nrows=nrows,
ncols=ncols,
sharex=sharex,
sharey=sharey,
squeeze=squeeze,
subplot_kw=subplot_kw,
gridspec_kw=gridspec_kw,
**fig_kw,
)
else:
fig = plt.gcf()
ax_array = np.reshape(np.array(ax_array), (nrows, ncols))
if squeeze:
if ax_array.size == 1:
ax_array = ax_array[0]
elif any(s == 1 for s in ax_array.shape):
ax_array = ax_array.ravel()
return ax_array, fig, plt
def add_fig_kwargs(func):
"""
Decorator that adds keyword arguments for functions returning matplotlib
figures.
The function should return either a matplotlib figure or None to signal
some sort of error/unexpected event.
See doc string below for the list of supported options.
"""
from functools import wraps
@wraps(func)
def wrapper(*args, **kwargs):
# pop the kwds used by the decorator.
title = kwargs.pop("title", None)
size_kwargs = kwargs.pop("size_kwargs", None)
show = kwargs.pop("show", True)
savefig = kwargs.pop("savefig", None)
tight_layout = kwargs.pop("tight_layout", False)
ax_grid = kwargs.pop("ax_grid", None)
ax_annotate = kwargs.pop("ax_annotate", None)
fig_close = kwargs.pop("fig_close", False)
# Call func and return immediately if None is returned.
fig = func(*args, **kwargs)
if fig is None:
return fig
# Operate on matplotlib figure.
if title is not None:
fig.suptitle(title)
if size_kwargs is not None:
fig.set_size_inches(size_kwargs.pop("w"), size_kwargs.pop("h"), **size_kwargs)
if ax_grid is not None:
for ax in fig.axes:
ax.grid(bool(ax_grid))
if ax_annotate:
from string import ascii_letters
tags = ascii_letters
if len(fig.axes) > len(tags):
tags = (1 + len(ascii_letters) // len(fig.axes)) * ascii_letters
for ax, tag in zip(fig.axes, tags):
ax.annotate("(%s)" % tag, xy=(0.05, 0.95), xycoords="axes fraction")
if tight_layout:
try:
fig.tight_layout()
except Exception as exc:
# For some unknown reason, this problem shows up only on travis.
# https://stackoverflow.com/questions/22708888/valueerror-when-using-matplotlib-tight-layout
print("Ignoring Exception raised by fig.tight_layout\n", str(exc))
if savefig:
fig.savefig(savefig)
import matplotlib.pyplot as plt
if show:
plt.show()
if fig_close:
plt.close(fig=fig)
return fig
# Add docstring to the decorated method.
s = (
"\n\n"
+ """\
Keyword arguments controlling the display of the figure:
================ ====================================================
kwargs Meaning
================ ====================================================
title Title of the plot (Default: None).
show True to show the figure (default: True).
savefig "abc.png" or "abc.eps" to save the figure to a file.
size_kwargs Dictionary with options passed to fig.set_size_inches
e.g. size_kwargs=dict(w=3, h=4)
tight_layout True to call fig.tight_layout (default: False)
ax_grid True (False) to add (remove) grid from all axes in fig.
Default: None i.e. fig is left unchanged.
ax_annotate Add labels to subplots e.g. (a), (b).
Default: False
fig_close Close figure. Default: False.
================ ====================================================
"""
)
if wrapper.__doc__ is not None:
# Add s at the end of the docstring.
wrapper.__doc__ += "\n" + s
else:
# Use s
wrapper.__doc__ = s
return wrapper
|
mit
|
PanDAWMS/panda-jedi
|
pandajedi/jedibrokerage/AtlasAnalJobBroker.py
|
1
|
107918
|
import re
import sys
import copy
import random
import datetime
from six import iteritems
from pandajedi.jedicore.MsgWrapper import MsgWrapper
from pandajedi.jedicore.SiteCandidate import SiteCandidate
from pandajedi.jedicore import Interaction
from pandajedi.jedicore import JediCoreUtils
from .JobBrokerBase import JobBrokerBase
from . import AtlasBrokerUtils
from pandaserver.dataservice.DataServiceUtils import select_scope
from pandaserver.taskbuffer import JobUtils
# logger
from pandacommon.pandalogger.PandaLogger import PandaLogger
logger = PandaLogger().getLogger(__name__.split('.')[-1])
APP = 'jedi'
COMPONENT = 'jobbroker'
VO = 'atlas'
# brokerage for ATLAS analysis
class AtlasAnalJobBroker(JobBrokerBase):
# constructor
def __init__(self, ddmIF, taskBufferIF):
JobBrokerBase.__init__(self, ddmIF, taskBufferIF)
self.dataSiteMap = {}
self.summaryList = None
# wrapper for return
def sendLogMessage(self, tmpLog):
# send info to logger
#tmpLog.bulkSendMsg('analy_brokerage')
tmpLog.debug('sent')
# make summary
def add_summary_message(self, old_list, new_list, message):
if len(old_list) != len(new_list):
red = int(((len(old_list) - len(new_list)) * 100) / len(old_list))
self.summaryList.append('{:>5} -> {:>3} candidates, {:>3}% cut : {}'.format(len(old_list),
len(new_list),
red, message))
# dump summary
def dump_summary(self, tmp_log, final_candidates=None):
tmp_log.info('')
for m in self.summaryList:
tmp_log.info(m)
if not final_candidates:
final_candidates = []
tmp_log.info('the number of final candidates: {}'.format(len(final_candidates)))
tmp_log.info('')
# main
def doBrokerage(self, taskSpec, cloudName, inputChunk, taskParamMap):
# make logger
tmpLog = MsgWrapper(logger,'<jediTaskID={0}>'.format(taskSpec.jediTaskID),
monToken='<jediTaskID={0} {1}>'.format(taskSpec.jediTaskID,
datetime.datetime.utcnow().isoformat('/')))
tmpLog.debug('start')
# return for failure
retFatal = self.SC_FATAL,inputChunk
retTmpError = self.SC_FAILED,inputChunk
# new maxwdir
newMaxwdir = {}
# get primary site candidates
sitePreAssigned = False
siteListPreAssigned = False
excludeList = []
includeList = None
scanSiteList = []
# problematic sites
problematic_sites_dict = {}
# get list of site access
siteAccessList = self.taskBufferIF.listSiteAccess(None, taskSpec.userName)
siteAccessMap = {}
for tmpSiteName,tmpAccess in siteAccessList:
siteAccessMap[tmpSiteName] = tmpAccess
# disable VP for merging and forceStaged
if inputChunk.isMerging or taskSpec.avoid_vp():
useVP = False
else:
useVP = True
# get workQueue
workQueue = self.taskBufferIF.getWorkQueueMap().getQueueWithIDGshare(taskSpec.workQueue_ID, taskSpec.gshare)
# site limitation
if taskSpec.useLimitedSites():
if 'excludedSite' in taskParamMap:
excludeList = taskParamMap['excludedSite']
# str to list for task retry
try:
if not isinstance(excludeList, list):
excludeList = excludeList.split(',')
except Exception:
pass
if 'includedSite' in taskParamMap:
includeList = taskParamMap['includedSite']
# str to list for task retry
if includeList == '':
includeList = None
try:
if not isinstance(includeList, list):
includeList = includeList.split(',')
siteListPreAssigned = True
except Exception:
pass
# loop over all sites
for siteName,tmpSiteSpec in iteritems(self.siteMapper.siteSpecList):
if tmpSiteSpec.type == 'analysis' or tmpSiteSpec.is_grandly_unified():
scanSiteList.append(siteName)
# preassigned
preassignedSite = taskSpec.site
if preassignedSite not in ['',None]:
# site is pre-assigned
if not self.siteMapper.checkSite(preassignedSite):
# check ddm for unknown site
includeList = []
for tmpSiteName in self.get_unified_sites(scanSiteList):
tmpSiteSpec = self.siteMapper.getSite(tmpSiteName)
scope_input, scope_output = select_scope(tmpSiteSpec, JobUtils.ANALY_PS, JobUtils.ANALY_PS)
if scope_input in tmpSiteSpec.ddm_endpoints_input and \
preassignedSite in tmpSiteSpec.ddm_endpoints_input[scope_input].all:
includeList.append(tmpSiteName)
if not includeList:
includeList = None
tmpLog.info('site={0} is ignored since unknown'.format(preassignedSite))
else:
tmpLog.info('site={0} is converted to {1}'.format(preassignedSite,
','.join(includeList)))
preassignedSite = None
else:
tmpLog.info('site={0} is pre-assigned'.format(preassignedSite))
sitePreAssigned = True
if preassignedSite not in scanSiteList:
scanSiteList.append(preassignedSite)
tmpLog.info('initial {0} candidates'.format(len(scanSiteList)))
# allowed remote access protocol
allowedRemoteProtocol = 'fax'
# MP
if taskSpec.coreCount is not None and taskSpec.coreCount > 1:
# use MCORE only
useMP = 'only'
elif taskSpec.coreCount == 0:
# use MCORE and normal
useMP = 'any'
else:
# not use MCORE
useMP = 'unuse'
# get statistics of failures
timeWindowForFC = self.taskBufferIF.getConfigValue('anal_jobbroker', 'TW_DONE_JOB_STAT', 'jedi', taskSpec.vo)
if timeWindowForFC is None:
timeWindowForFC = 6
failureCounts = self.taskBufferIF.getFailureCountsForTask_JEDI(taskSpec.jediTaskID, timeWindowForFC)
# two loops with/without data locality check
scanSiteLists = [(copy.copy(scanSiteList), True)]
if len(inputChunk.getDatasets()) > 0:
nRealDS = 0
for datasetSpec in inputChunk.getDatasets():
if not datasetSpec.isPseudo():
nRealDS += 1
if taskSpec.taskPriority >= 2000:
if inputChunk.isMerging:
scanSiteLists.append((copy.copy(scanSiteList), False))
else:
scanSiteLists = [(copy.copy(scanSiteList), False)]
elif taskSpec.taskPriority > 1000 or nRealDS > 1:
scanSiteLists.append((copy.copy(scanSiteList), False))
retVal = None
checkDataLocality = False
scanSiteWoVP = []
avoidVP = False
summaryList = []
for scanSiteList, checkDataLocality in scanSiteLists:
useUnionLocality = False
self.summaryList = []
self.summaryList.append('===== Brokerage summary =====')
self.summaryList.append('data locality check: {}'.format(checkDataLocality))
self.summaryList.append('the number of initial candidates: {}'.format(len(scanSiteList)))
if checkDataLocality:
tmpLog.debug('!!! look for candidates WITH data locality check')
else:
tmpLog.debug('!!! look for candidates WITHOUT data locality check')
######################################
# selection for data availability
hasDDS = False
dataWeight = {}
ddsList = set()
remoteSourceList = {}
for datasetSpec in inputChunk.getDatasets():
datasetSpec.reset_distributed()
if inputChunk.getDatasets() != [] and checkDataLocality:
oldScanSiteList = copy.copy(scanSiteList)
oldScanUnifiedSiteList = self.get_unified_sites(oldScanSiteList)
for datasetSpec in inputChunk.getDatasets():
datasetName = datasetSpec.datasetName
if datasetName not in self.dataSiteMap:
# get the list of sites where data is available
tmpLog.debug('getting the list of sites where {0} is available'.format(datasetName))
tmpSt,tmpRet = AtlasBrokerUtils.getAnalSitesWithData(self.get_unified_sites(scanSiteList),
self.siteMapper,
self.ddmIF,datasetName)
if tmpSt in [Interaction.JEDITemporaryError,Interaction.JEDITimeoutError]:
tmpLog.error('temporary failed to get the list of sites where data is available, since %s' % tmpRet)
taskSpec.setErrDiag(tmpLog.uploadLog(taskSpec.jediTaskID))
# send info to logger
self.sendLogMessage(tmpLog)
return retTmpError
if tmpSt == Interaction.JEDIFatalError:
tmpLog.error('fatal error when getting the list of sites where data is available, since %s' % tmpRet)
taskSpec.setErrDiag(tmpLog.uploadLog(taskSpec.jediTaskID))
# send info to logger
self.sendLogMessage(tmpLog)
return retFatal
# append
self.dataSiteMap[datasetName] = tmpRet
if datasetName.startswith('ddo'):
tmpLog.debug(' {0} sites'.format(len(tmpRet)))
else:
tmpLog.debug(' {0} sites : {1}'.format(len(tmpRet),str(tmpRet)))
# check if distributed
if tmpRet != {}:
isDistributed = True
for tmpMap in tmpRet.values():
for tmpVal in tmpMap.values():
if tmpVal['state'] == 'complete':
isDistributed = False
break
if not isDistributed:
break
if isDistributed or datasetName.endswith('/'):
# check if really distributed
isDistributed = self.ddmIF.isDistributedDataset(datasetName)
if isDistributed or datasetName.endswith('/'):
hasDDS = True
datasetSpec.setDistributed()
tmpLog.debug(' {0} is distributed'.format(datasetName))
ddsList.add(datasetName)
# disable VP since distributed datasets triggers transfers
useVP = False
avoidVP = True
# check if the data is available at somewhere
if self.dataSiteMap[datasetName] == {}:
for tmpSiteName in scanSiteList:
#tmpLog.info(' skip site={0} data is unavailable criteria=-input'.format(tmpSiteName))
pass
tmpLog.error('{0} is unavailable at any site'.format(datasetName))
retVal = retFatal
continue
# get the list of sites where data is available
scanSiteList = None
scanSiteListOnDisk = None
scanSiteListUnion = None
scanSiteListOnDiskUnion = None
scanSiteWoVpUnion = None
normFactor = 0
for datasetName,tmpDataSite in iteritems(self.dataSiteMap):
normFactor += 1
useIncomplete = datasetName in ddsList
# get sites where replica is available
tmpSiteList = AtlasBrokerUtils.getAnalSitesWithDataDisk(tmpDataSite, includeTape=True,
use_incomplete=useIncomplete)
tmpDiskSiteList = AtlasBrokerUtils.getAnalSitesWithDataDisk(tmpDataSite,includeTape=False,
use_vp=useVP,
use_incomplete=useIncomplete)
tmpNonVpSiteList = AtlasBrokerUtils.getAnalSitesWithDataDisk(tmpDataSite, includeTape=True,
use_vp=False,
use_incomplete=useIncomplete)
# get sites which can remotely access source sites
if inputChunk.isMerging or taskSpec.useLocalIO():
# disable remote access for merging
tmpSatelliteSites = {}
elif (not sitePreAssigned) or (sitePreAssigned and preassignedSite not in tmpSiteList):
tmpSatelliteSites = AtlasBrokerUtils.getSatelliteSites(tmpDiskSiteList,
self.taskBufferIF,
self.siteMapper,nSites=50,
protocol=allowedRemoteProtocol)
else:
tmpSatelliteSites = {}
# make weight map for local
for tmpSiteName in tmpSiteList:
if tmpSiteName not in dataWeight:
dataWeight[tmpSiteName] = 0
# give more weight to disk
if tmpSiteName in tmpDiskSiteList:
dataWeight[tmpSiteName] += 1
else:
dataWeight[tmpSiteName] += 0.001
# make weight map for remote
for tmpSiteName,tmpWeightSrcMap in iteritems(tmpSatelliteSites):
# skip since local data is available
if tmpSiteName in tmpSiteList:
continue
tmpSiteSpec = self.siteMapper.getSite(tmpSiteName)
# negative weight for remote access
wRemote = 50.0
if tmpSiteSpec.wansinklimit not in [0,None]:
wRemote /= float(tmpSiteSpec.wansinklimit)
# sum weight
if tmpSiteName not in dataWeight:
dataWeight[tmpSiteName] = float(tmpWeightSrcMap['weight'])/wRemote
else:
dataWeight[tmpSiteName] += float(tmpWeightSrcMap['weight'])/wRemote
# make remote source list
if tmpSiteName not in remoteSourceList:
remoteSourceList[tmpSiteName] = {}
remoteSourceList[tmpSiteName][datasetName] = tmpWeightSrcMap['source']
# first list
if scanSiteList is None:
scanSiteList = []
for tmpSiteName in tmpSiteList + list(tmpSatelliteSites.keys()):
if tmpSiteName not in oldScanUnifiedSiteList:
continue
if tmpSiteName not in scanSiteList:
scanSiteList.append(tmpSiteName)
scanSiteListOnDisk = set()
for tmpSiteName in tmpDiskSiteList + list(tmpSatelliteSites.keys()):
if tmpSiteName not in oldScanUnifiedSiteList:
continue
scanSiteListOnDisk.add(tmpSiteName)
scanSiteWoVP = tmpNonVpSiteList
scanSiteListUnion = set(scanSiteList)
scanSiteListOnDiskUnion = set(scanSiteListOnDisk)
scanSiteWoVpUnion = set(scanSiteWoVP)
continue
# pickup sites which have all data
newScanList = []
for tmpSiteName in tmpSiteList + list(tmpSatelliteSites.keys()):
if tmpSiteName in scanSiteList and tmpSiteName not in newScanList:
newScanList.append(tmpSiteName)
scanSiteListUnion.add(tmpSiteName)
scanSiteList = newScanList
tmpLog.debug('{0} is available at {1} sites'.format(datasetName,len(scanSiteList)))
# pickup sites which have all data on DISK
newScanListOnDisk = set()
for tmpSiteName in tmpDiskSiteList + list(tmpSatelliteSites.keys()):
if tmpSiteName in scanSiteListOnDisk:
newScanListOnDisk.add(tmpSiteName)
scanSiteListOnDiskUnion.add(tmpSiteName)
scanSiteListOnDisk = newScanListOnDisk
# get common elements
scanSiteWoVP = list(set(scanSiteWoVP).intersection(tmpNonVpSiteList))
scanSiteWoVpUnion = scanSiteWoVpUnion.union(tmpNonVpSiteList)
tmpLog.debug('{0} is available at {1} sites on DISK'.format(datasetName,len(scanSiteListOnDisk)))
# check for preassigned
if sitePreAssigned:
if preassignedSite not in scanSiteList and preassignedSite not in scanSiteListUnion:
scanSiteList = []
tmpLog.info('data is unavailable locally or remotely at preassigned site {0}'.format(preassignedSite))
elif preassignedSite not in scanSiteList:
scanSiteList = list(scanSiteListUnion)
elif len(scanSiteListOnDisk) > 0:
# use only disk sites
scanSiteList = list(scanSiteListOnDisk)
elif not scanSiteList and scanSiteListUnion:
tmpLog.info('use union list for data locality check since no site has all data')
if scanSiteListOnDiskUnion:
scanSiteList = list(scanSiteListOnDiskUnion)
elif scanSiteListUnion:
scanSiteList = list(scanSiteListUnion)
scanSiteWoVP = list(scanSiteWoVpUnion)
useUnionLocality = True
scanSiteList = self.get_pseudo_sites(scanSiteList, oldScanSiteList)
# dump
for tmpSiteName in oldScanSiteList:
if tmpSiteName not in scanSiteList:
pass
tmpLog.info('{0} candidates have input data'.format(len(scanSiteList)))
self.add_summary_message(oldScanSiteList, scanSiteList, 'input data check')
if not scanSiteList:
self.dump_summary(tmpLog)
tmpLog.error('no candidates')
retVal = retFatal
continue
######################################
# selection for status
newScanSiteList = []
oldScanSiteList = copy.copy(scanSiteList)
for tmpSiteName in scanSiteList:
tmpSiteSpec = self.siteMapper.getSite(tmpSiteName)
# skip unified queues
if tmpSiteSpec.is_unified:
continue
# check site status
skipFlag = False
if tmpSiteSpec.status in ['offline']:
skipFlag = True
elif tmpSiteSpec.status in ['brokeroff','test']:
if siteListPreAssigned:
pass
elif not sitePreAssigned:
skipFlag = True
elif preassignedSite not in [tmpSiteName, tmpSiteSpec.get_unified_name()]:
skipFlag = True
if not skipFlag:
newScanSiteList.append(tmpSiteName)
else:
tmpLog.info(' skip site=%s due to status=%s criteria=-status' % (tmpSiteName,tmpSiteSpec.status))
scanSiteList = newScanSiteList
tmpLog.info('{0} candidates passed site status check'.format(len(scanSiteList)))
self.add_summary_message(oldScanSiteList, scanSiteList, 'status check')
if not scanSiteList:
self.dump_summary(tmpLog)
tmpLog.error('no candidates')
retVal = retTmpError
continue
######################################
# selection for iointensity limits
# get default disk IO limit from GDP config
max_diskio_per_core_default = self.taskBufferIF.getConfigValue(COMPONENT, 'MAX_DISKIO_DEFAULT', APP, VO)
if not max_diskio_per_core_default:
max_diskio_per_core_default = 10 ** 10
# get the current disk IO usage per site
diskio_percore_usage = self.taskBufferIF.getAvgDiskIO_JEDI()
unified_site_list = self.get_unified_sites(scanSiteList)
newScanSiteList = []
oldScanSiteList = copy.copy(scanSiteList)
for tmpSiteName in unified_site_list:
tmp_site_spec = self.siteMapper.getSite(tmpSiteName)
# measured diskIO at queue
diskio_usage_tmp = diskio_percore_usage.get(tmpSiteName, 0)
# figure out queue or default limit
if tmp_site_spec.maxDiskio and tmp_site_spec.maxDiskio > 0:
# there is a limit specified in AGIS
diskio_limit_tmp = tmp_site_spec.maxDiskio
else:
# we need to use the default value from GDP Config
diskio_limit_tmp = max_diskio_per_core_default
# normalize task diskIO by site corecount
diskio_task_tmp = taskSpec.diskIO
if taskSpec.diskIO is not None and taskSpec.coreCount not in [None, 0, 1] \
and tmp_site_spec.coreCount not in [None, 0]:
diskio_task_tmp = taskSpec.diskIO / tmp_site_spec.coreCount
try: # generate a log message parseable by logstash for monitoring
log_msg = 'diskIO measurements: site={0} jediTaskID={1} '.format(tmpSiteName, taskSpec.jediTaskID)
if diskio_task_tmp is not None:
log_msg += 'diskIO_task={:.2f} '.format(diskio_task_tmp)
if diskio_usage_tmp is not None:
log_msg += 'diskIO_site_usage={:.2f} '.format(diskio_usage_tmp)
if diskio_limit_tmp is not None:
log_msg += 'diskIO_site_limit={:.2f} '.format(diskio_limit_tmp)
#tmpLog.info(log_msg)
except Exception:
tmpLog.debug('diskIO measurements: Error generating diskIO message')
# if the task has a diskIO defined, the queue is over the IO limit and the task IO is over the limit
if diskio_task_tmp and diskio_usage_tmp and diskio_limit_tmp \
and diskio_usage_tmp > diskio_limit_tmp and diskio_task_tmp > diskio_limit_tmp:
tmpLog.info(' skip site={0} due to diskIO overload criteria=-diskIO'.format(tmpSiteName))
continue
newScanSiteList.append(tmpSiteName)
scanSiteList = self.get_pseudo_sites(newScanSiteList, scanSiteList)
tmpLog.info('{0} candidates passed diskIO check'.format(len(scanSiteList)))
self.add_summary_message(oldScanSiteList, scanSiteList, 'diskIO check')
if not scanSiteList:
self.dump_summary(tmpLog)
tmpLog.error('no candidates')
taskSpec.setErrDiag(tmpLog.uploadLog(taskSpec.jediTaskID))
self.sendLogMessage(tmpLog)
return retTmpError
######################################
# selection for VP
if taskSpec.avoid_vp() or avoidVP or not checkDataLocality:
newScanSiteList = []
oldScanSiteList = copy.copy(scanSiteList)
for tmpSiteName in scanSiteList:
tmpSiteSpec = self.siteMapper.getSite(tmpSiteName)
if not tmpSiteSpec.use_vp(JobUtils.ANALY_PS):
newScanSiteList.append(tmpSiteName)
else:
tmpLog.info(' skip site=%s to avoid VP' % tmpSiteName)
scanSiteList = newScanSiteList
tmpLog.info('{0} candidates passed for avoidVP'.format(len(scanSiteList)))
self.add_summary_message(oldScanSiteList, scanSiteList, 'avoid VP check')
if not scanSiteList:
self.dump_summary(tmpLog)
tmpLog.error('no candidates')
retVal = retTmpError
continue
######################################
# selection for MP
newScanSiteList = []
oldScanSiteList = copy.copy(scanSiteList)
for tmpSiteName in scanSiteList:
tmpSiteSpec = self.siteMapper.getSite(tmpSiteName)
# check at the site
if useMP == 'any' or (useMP == 'only' and tmpSiteSpec.coreCount > 1) or \
(useMP =='unuse' and tmpSiteSpec.coreCount in [0,1,None]):
newScanSiteList.append(tmpSiteName)
else:
tmpLog.info(' skip site=%s due to core mismatch cores_site=%s <> cores_task=%s criteria=-cpucore' % \
(tmpSiteName,tmpSiteSpec.coreCount,taskSpec.coreCount))
scanSiteList = newScanSiteList
tmpLog.info('{0} candidates passed for useMP={1}'.format(len(scanSiteList),useMP))
self.add_summary_message(oldScanSiteList, scanSiteList, 'CPU core check')
if not scanSiteList:
self.dump_summary(tmpLog)
tmpLog.error('no candidates')
retVal = retTmpError
continue
######################################
# selection for GPU + architecture
newScanSiteList = []
oldScanSiteList = copy.copy(scanSiteList)
jsonCheck = None
for tmpSiteName in scanSiteList:
tmpSiteSpec = self.siteMapper.getSite(tmpSiteName)
if tmpSiteSpec.isGPU() and not taskSpec.is_hpo_workflow():
if taskSpec.get_sw_platform() in ['', None]:
tmpLog.info(' skip site={0} since architecture is required for GPU queues'.format(tmpSiteName))
continue
if jsonCheck is None:
jsonCheck = AtlasBrokerUtils.JsonSoftwareCheck(self.siteMapper)
siteListWithCMTCONFIG = [tmpSiteSpec.get_unified_name()]
siteListWithCMTCONFIG, sitesNoJsonCheck = jsonCheck.check(siteListWithCMTCONFIG, None,
None, None,
taskSpec.get_sw_platform(),
False, True)
siteListWithCMTCONFIG += self.taskBufferIF.checkSitesWithRelease(sitesNoJsonCheck,
cmtConfig=taskSpec.get_sw_platform(),
onlyCmtConfig=True)
if len(siteListWithCMTCONFIG) == 0:
tmpLog.info(' skip site={0} since architecture={1} is unavailable'.format(tmpSiteName, taskSpec.get_sw_platform()))
continue
newScanSiteList.append(tmpSiteName)
scanSiteList = newScanSiteList
tmpLog.info('{0} candidates passed for architecture check'.format(len(scanSiteList)))
self.add_summary_message(oldScanSiteList, scanSiteList, 'architecture check')
if not scanSiteList:
self.dump_summary(tmpLog)
tmpLog.error('no candidates')
retVal = retTmpError
continue
######################################
# selection for closed
if not sitePreAssigned and not inputChunk.isMerging:
oldScanSiteList = copy.copy(scanSiteList)
newScanSiteList = []
for tmpSiteName in self.get_unified_sites(scanSiteList):
if tmpSiteName in failureCounts and 'closed' in failureCounts[tmpSiteName]:
nClosed = failureCounts[tmpSiteName]['closed']
if nClosed > 0:
tmpLog.info(' skip site=%s due to n_closed=%s criteria=-closed' % \
(tmpSiteName, nClosed))
continue
newScanSiteList.append(tmpSiteName)
scanSiteList = self.get_pseudo_sites(newScanSiteList, scanSiteList)
tmpLog.info('{0} candidates passed for closed'.format(len(scanSiteList)))
self.add_summary_message(oldScanSiteList, scanSiteList, 'too many closed check')
if not scanSiteList:
self.dump_summary(tmpLog)
tmpLog.error('no candidates')
retVal = retTmpError
continue
######################################
# selection for release
host_cpu_spec = taskSpec.get_host_cpu_spec()
host_gpu_spec = taskSpec.get_host_gpu_spec()
if not sitePreAssigned and \
(taskSpec.transHome is not None or host_cpu_spec is not None or host_gpu_spec is not None or \
(taskSpec.processingType is not None and taskSpec.processingType.endswith('jedi-cont'))):
jsonCheck = AtlasBrokerUtils.JsonSoftwareCheck(self.siteMapper)
unified_site_list = self.get_unified_sites(scanSiteList)
if taskSpec.transHome is not None:
transHome = taskSpec.transHome
else:
transHome = ''
# remove AnalysisTransforms-
transHome = re.sub('^[^-]+-*','',transHome)
transHome = re.sub('_','-',transHome)
if re.search('rel_\d+(\n|$)', transHome) is None and \
taskSpec.transHome not in ['AnalysisTransforms', None] and \
re.search('\d{4}-\d{2}-\d{2}T\d{4}$', transHome) is None and \
re.search('-\d+\.\d+\.\d+$', transHome) is None:
# cache is checked
siteListWithSW, sitesNoJsonCheck = jsonCheck.check(unified_site_list, "atlas",
transHome.split('-')[0],
transHome.split('-')[1],
taskSpec.get_sw_platform(),
False, False,
container_name=taskSpec.container_name,
only_tags_fc=taskSpec.use_only_tags_fc(),
host_cpu_spec=host_cpu_spec,
host_gpu_spec=host_gpu_spec,
log_stream=tmpLog)
sitesAuto = copy.copy(siteListWithSW)
tmpListWithSW = self.taskBufferIF.checkSitesWithRelease(sitesNoJsonCheck,
caches=transHome,
cmtConfig=taskSpec.get_sw_platform())
sitesNonAuto = copy.copy(tmpListWithSW)
siteListWithSW += tmpListWithSW
elif (transHome == '' and taskSpec.transUses is not None) or \
(re.search('-\d+\.\d+\.\d+$',transHome) is not None and
(taskSpec.transUses is None or re.search('-\d+\.\d+$', taskSpec.transUses) is None)):
siteListWithSW = []
sitesNoJsonCheck = unified_site_list
# remove Atlas-
if taskSpec.transUses is not None:
transUses = taskSpec.transUses.split('-')[-1]
else:
transUses = None
if transUses is not None:
# release is checked
tmpSiteListWithSW, sitesNoJsonCheck = jsonCheck.check(unified_site_list, "atlas",
"AtlasOffline",
transUses,
taskSpec.get_sw_platform(),
False, False,
container_name=taskSpec.container_name,
only_tags_fc=taskSpec.use_only_tags_fc(),
host_cpu_spec=host_cpu_spec,
host_gpu_spec=host_gpu_spec,
log_stream=tmpLog)
siteListWithSW += tmpSiteListWithSW
if len(transHome.split('-')) == 2:
tmpSiteListWithSW, sitesNoJsonCheck = jsonCheck.check(sitesNoJsonCheck, "atlas",
transHome.split('-')[0],
transHome.split('-')[1],
taskSpec.get_sw_platform(),
False, False,
container_name=taskSpec.container_name,
only_tags_fc=taskSpec.use_only_tags_fc(),
host_cpu_spec=host_cpu_spec,
host_gpu_spec=host_gpu_spec,
log_stream=tmpLog)
siteListWithSW += tmpSiteListWithSW
sitesAuto = copy.copy(siteListWithSW)
tmpListWithSW = []
if transUses is not None:
tmpListWithSW += self.taskBufferIF.checkSitesWithRelease(sitesNoJsonCheck,
releases=transUses,
cmtConfig=taskSpec.get_sw_platform())
tmpListWithSW += self.taskBufferIF.checkSitesWithRelease(sitesNoJsonCheck,
caches=transHome,
cmtConfig=taskSpec.get_sw_platform())
sitesNonAuto = list(set(tmpListWithSW).difference(set(sitesAuto)))
siteListWithSW += tmpListWithSW
else:
# nightlies or standalone
siteListWithCVMFS = self.taskBufferIF.checkSitesWithRelease(unified_site_list,
releases='CVMFS')
if taskSpec.get_sw_platform() in ['', None]:
# architecture is not set
siteListWithCMTCONFIG = copy.copy(unified_site_list)
else:
siteListWithCMTCONFIG = \
self.taskBufferIF.checkSitesWithRelease(unified_site_list,
cmtConfig=taskSpec.get_sw_platform(),
onlyCmtConfig=True)
if taskSpec.transHome is not None:
# CVMFS check for nightlies
siteListWithSW, sitesNoJsonCheck = jsonCheck.check(unified_site_list, "nightlies",
None, None,
taskSpec.get_sw_platform(),
True, False,
container_name=taskSpec.container_name,
only_tags_fc=taskSpec.use_only_tags_fc(),
host_cpu_spec=host_cpu_spec,
host_gpu_spec=host_gpu_spec,
log_stream=tmpLog)
sitesAuto = copy.copy(siteListWithSW)
sitesNonAuto = list((set(siteListWithCVMFS) & set(siteListWithCMTCONFIG)).difference(set(sitesAuto)))
siteListWithSW += sitesNonAuto
else:
# no CVMFS check for standalone SW
siteListWithSW, sitesNoJsonCheck = jsonCheck.check(unified_site_list, None,
None, None,
taskSpec.get_sw_platform(),
False, True,
container_name=taskSpec.container_name,
only_tags_fc=taskSpec.use_only_tags_fc(),
host_cpu_spec=host_cpu_spec,
host_gpu_spec=host_gpu_spec,
log_stream=tmpLog)
sitesAuto = copy.copy(siteListWithSW)
if host_cpu_spec or host_gpu_spec:
sitesNonAuto = []
else:
sitesNonAuto = list(set(siteListWithCMTCONFIG).difference(set(sitesAuto)))
siteListWithSW += sitesNonAuto
newScanSiteList = []
oldScanSiteList = copy.copy(scanSiteList)
sitesAny = []
for tmpSiteName in unified_site_list:
tmpSiteSpec = self.siteMapper.getSite(tmpSiteName)
if tmpSiteName in siteListWithSW:
# passed
newScanSiteList.append(tmpSiteName)
elif host_cpu_spec is None and host_gpu_spec is None and tmpSiteSpec.releases == ['ANY']:
# release check is disabled or release is available
newScanSiteList.append(tmpSiteName)
sitesAny.append(tmpSiteName)
else:
# release is unavailable
tmpLog.info(' skip site=%s due to missing rel/cache %s:%s sw_platform=%s '
' cpu=%s gpu=%s criteria=-cache' % \
(tmpSiteName, taskSpec.transUses, taskSpec.transHome, taskSpec.get_sw_platform(),
str(host_cpu_spec), str(host_gpu_spec)))
sitesAuto = self.get_pseudo_sites(sitesAuto, scanSiteList)
sitesNonAuto = self.get_pseudo_sites(sitesNonAuto, scanSiteList)
sitesAny = self.get_pseudo_sites(sitesAny, scanSiteList)
scanSiteList = self.get_pseudo_sites(newScanSiteList, scanSiteList)
tmpLog.info(
'{} candidates ({} with AUTO, {} without AUTO, {} with ANY) passed SW check '.format(
len(scanSiteList), len(sitesAuto), len(sitesNonAuto), len(sitesAny)))
self.add_summary_message(oldScanSiteList, scanSiteList, 'release/cache/CPU/GPU check')
if not scanSiteList:
self.dump_summary(tmpLog)
tmpLog.error('no candidates')
retVal = retTmpError
continue
######################################
# selection for memory
origMinRamCount = inputChunk.getMaxRamCount()
if origMinRamCount not in [0, None]:
newScanSiteList = []
oldScanSiteList = copy.copy(scanSiteList)
for tmpSiteName in scanSiteList:
tmpSiteSpec = self.siteMapper.getSite(tmpSiteName)
# scale RAM by nCores
minRamCount = origMinRamCount
if not inputChunk.isMerging:
if tmpSiteSpec.coreCount not in [None, 0]:
minRamCount = origMinRamCount * tmpSiteSpec.coreCount
minRamCount = JobUtils.compensate_ram_count(minRamCount)
# site max memory requirement
site_maxmemory = 0
if tmpSiteSpec.maxrss not in [0,None]:
site_maxmemory = tmpSiteSpec.maxrss
if site_maxmemory not in [0,None] and minRamCount != 0 and minRamCount > site_maxmemory:
tmpLog.info(' skip site={0} due to site RAM shortage. site_maxmemory={1} < job_minramcount={2} criteria=-lowmemory'.format(tmpSiteName,
site_maxmemory,
minRamCount))
continue
# site min memory requirement
site_minmemory = 0
if tmpSiteSpec.minrss not in [0,None]:
site_minmemory = tmpSiteSpec.minrss
if site_minmemory not in [0,None] and minRamCount != 0 and minRamCount < site_minmemory:
tmpLog.info(' skip site={0} due to job RAM shortage. site_minmemory={1} > job_minramcount={2} criteria=-highmemory'.format(tmpSiteName,
site_minmemory,
minRamCount))
continue
newScanSiteList.append(tmpSiteName)
scanSiteList = newScanSiteList
ramUnit = taskSpec.ramUnit
if ramUnit is None:
ramUnit = 'MB'
tmpLog.info('{0} candidates passed memory check = {1} {2}'.format(len(scanSiteList),
minRamCount, ramUnit))
self.add_summary_message(oldScanSiteList, scanSiteList, 'memory check')
if not scanSiteList:
self.dump_summary(tmpLog)
tmpLog.error('no candidates')
retVal = retTmpError
continue
######################################
# selection for scratch disk
tmpMaxAtomSize = inputChunk.getMaxAtomSize()
if not inputChunk.isMerging:
tmpEffAtomSize = inputChunk.getMaxAtomSize(effectiveSize=True)
tmpOutDiskSize = taskSpec.getOutDiskSize()
tmpWorkDiskSize = taskSpec.getWorkDiskSize()
minDiskCountS = tmpOutDiskSize*tmpEffAtomSize + tmpWorkDiskSize + tmpMaxAtomSize
minDiskCountS = minDiskCountS // 1024 // 1024
maxSizePerJob = taskSpec.getMaxSizePerJob()
if maxSizePerJob is None:
maxSizePerJob = None
else:
maxSizePerJob //= (1024 * 1024)
# size for direct IO sites
minDiskCountR = tmpOutDiskSize*tmpEffAtomSize + tmpWorkDiskSize
minDiskCountR = minDiskCountR // 1024 // 1024
tmpLog.info('maxAtomSize={0} effectiveAtomSize={1} outDiskCount={2} workDiskSize={3}'.format(tmpMaxAtomSize,
tmpEffAtomSize,
tmpOutDiskSize,
tmpWorkDiskSize))
else:
maxSizePerJob = None
minDiskCountS = 2 * tmpMaxAtomSize // 1024 // 1024
minDiskCountR = 'NA'
tmpLog.info('minDiskCountScratch={0} minDiskCountRemote={1} nGBPerJobInMB={2}'.format(minDiskCountS,
minDiskCountR,
maxSizePerJob))
newScanSiteList = []
oldScanSiteList = copy.copy(scanSiteList)
for tmpSiteName in self.get_unified_sites(scanSiteList):
tmpSiteSpec = self.siteMapper.getSite(tmpSiteName)
# check at the site
if tmpSiteSpec.maxwdir:
if JediCoreUtils.use_direct_io_for_job(taskSpec, tmpSiteSpec, inputChunk):
minDiskCount = minDiskCountR
if maxSizePerJob is not None and not taskSpec.useLocalIO():
tmpMinDiskCountR = tmpOutDiskSize * maxSizePerJob + tmpWorkDiskSize
tmpMinDiskCountR /= (1024 * 1024)
if tmpMinDiskCountR > minDiskCount:
minDiskCount = tmpMinDiskCountR
else:
minDiskCount = minDiskCountS
if maxSizePerJob is not None and maxSizePerJob > minDiskCount:
minDiskCount = maxSizePerJob
# get site and task corecount to scale maxwdir
if tmpSiteSpec.coreCount in [None, 0, 1]:
site_cc = 1
else:
site_cc = tmpSiteSpec.coreCount
if taskSpec.coreCount in [None, 0, 1]:
task_cc = 1
else:
task_cc = site_cc
maxwdir_scaled = tmpSiteSpec.maxwdir * task_cc / site_cc
if minDiskCount > maxwdir_scaled:
tmpLog.info(' skip site={0} due to small scratch disk={1} < {2} criteria=-disk'.format(
tmpSiteName, maxwdir_scaled, minDiskCount))
continue
newMaxwdir[tmpSiteName] = maxwdir_scaled
newScanSiteList.append(tmpSiteName)
scanSiteList = self.get_pseudo_sites(newScanSiteList, scanSiteList)
tmpLog.info('{0} candidates passed scratch disk check'.format(len(scanSiteList)))
self.add_summary_message(oldScanSiteList, scanSiteList, 'scratch disk check')
if not scanSiteList:
self.dump_summary(tmpLog)
tmpLog.error('no candidates')
retVal = retTmpError
continue
######################################
# selection for available space in SE
newScanSiteList = []
oldScanSiteList = copy.copy(scanSiteList)
for tmpSiteName in self.get_unified_sites(scanSiteList):
# check endpoint
tmpSiteSpec = self.siteMapper.getSite(tmpSiteName)
scope_input, scope_output = select_scope(tmpSiteSpec, JobUtils.ANALY_PS, JobUtils.ANALY_PS)
tmpEndPoint = tmpSiteSpec.ddm_endpoints_output[scope_output].getEndPoint(tmpSiteSpec.ddm_output[scope_output])
if tmpEndPoint is not None:
# free space must be >= 200GB
diskThreshold = 200
tmpSpaceSize = 0
if tmpEndPoint['space_expired'] is not None:
tmpSpaceSize += tmpEndPoint['space_expired']
if tmpEndPoint['space_free'] is not None:
tmpSpaceSize += tmpEndPoint['space_free']
if tmpSpaceSize < diskThreshold and 'skip_RSE_check' not in tmpSiteSpec.catchall: # skip_RSE_check: exceptional bypass of RSEs without storage reporting
tmpLog.info(' skip site={0} due to disk shortage in SE {1} < {2}GB criteria=-disk'.format(tmpSiteName, tmpSpaceSize,
diskThreshold))
continue
# check if blacklisted
if tmpEndPoint['blacklisted'] == 'Y':
tmpLog.info(' skip site={0} since {1} is blacklisted in DDM criteria=-blacklist'.format(tmpSiteName, tmpSiteSpec.ddm_output[scope_output]))
continue
newScanSiteList.append(tmpSiteName)
scanSiteList = self.get_pseudo_sites(newScanSiteList, scanSiteList)
tmpLog.info('{0} candidates passed SE space check'.format(len(scanSiteList)))
self.add_summary_message(oldScanSiteList, scanSiteList, 'storage space check')
if not scanSiteList:
self.dump_summary(tmpLog)
tmpLog.error('no candidates')
retVal = retTmpError
continue
######################################
# selection for walltime
minWalltime = taskSpec.walltime
if minWalltime not in [0,None] and minWalltime > 0 and not inputChunk.isMerging:
minWalltime *= tmpEffAtomSize
newScanSiteList = []
oldScanSiteList = copy.copy(scanSiteList)
for tmpSiteName in scanSiteList:
tmpSiteSpec = self.siteMapper.getSite(tmpSiteName)
# check at the site
if tmpSiteSpec.maxtime != 0 and minWalltime > tmpSiteSpec.maxtime:
tmpLog.info(' skip site={0} due to short site walltime={1}(site upper limit) < {2} criteria=-shortwalltime'.format(tmpSiteName,
tmpSiteSpec.maxtime,
minWalltime))
continue
if tmpSiteSpec.mintime != 0 and minWalltime < tmpSiteSpec.mintime:
tmpLog.info(' skip site={0} due to short job walltime={1}(site lower limit) > {2} criteria=-longwalltime'.format(tmpSiteName,
tmpSiteSpec.mintime,
minWalltime))
continue
newScanSiteList.append(tmpSiteName)
scanSiteList = newScanSiteList
tmpLog.info('{0} candidates passed walltime check ={1}{2}'.format(len(scanSiteList),minWalltime,taskSpec.walltimeUnit))
self.add_summary_message(oldScanSiteList, scanSiteList, 'walltime check')
if not scanSiteList:
self.dump_summary(tmpLog)
tmpLog.error('no candidates')
retVal = retTmpError
continue
######################################
# selection for nPilot
nWNmap = self.taskBufferIF.getCurrentSiteData()
nPilotMap = {}
newScanSiteList = []
oldScanSiteList = copy.copy(scanSiteList)
for tmpSiteName in self.get_unified_sites(scanSiteList):
# check at the site
nPilot = 0
if tmpSiteName in nWNmap:
nPilot = nWNmap[tmpSiteName]['getJob'] + nWNmap[tmpSiteName]['updateJob']
if nPilot == 0 and taskSpec.prodSourceLabel not in ['test']:
tmpLog.info(' skip site=%s due to no pilot criteria=-nopilot' % tmpSiteName)
if not self.testMode:
continue
newScanSiteList.append(tmpSiteName)
nPilotMap[tmpSiteName] = nPilot
scanSiteList = self.get_pseudo_sites(newScanSiteList, scanSiteList)
tmpLog.info('{0} candidates passed pilot activity check'.format(len(scanSiteList)))
self.add_summary_message(oldScanSiteList, scanSiteList, 'pilot activity check')
if not scanSiteList:
self.dump_summary(tmpLog)
tmpLog.error('no candidates')
retVal = retTmpError
continue
######################################
# check inclusion and exclusion
newScanSiteList = []
oldScanSiteList = copy.copy(scanSiteList)
sitesForANY = []
for tmpSiteName in self.get_unified_sites(scanSiteList):
autoSite = False
# check exclusion
if AtlasBrokerUtils.isMatched(tmpSiteName,excludeList):
tmpLog.info(' skip site={0} excluded criteria=-excluded'.format(tmpSiteName))
continue
# check inclusion
if includeList is not None and not AtlasBrokerUtils.isMatched(tmpSiteName,includeList):
if 'AUTO' in includeList:
autoSite = True
else:
tmpLog.info(' skip site={0} not included criteria=-notincluded'.format(tmpSiteName))
continue
tmpSiteSpec = self.siteMapper.getSite(tmpSiteName)
# limited access
if tmpSiteSpec.accesscontrol == 'grouplist':
if tmpSiteSpec.sitename not in siteAccessMap or \
siteAccessMap[tmpSiteSpec.sitename] != 'approved':
tmpLog.info(' skip site={0} limited access criteria=-limitedaccess'.format(tmpSiteName))
continue
# check cloud
if taskSpec.cloud not in [None,'','any',tmpSiteSpec.cloud]:
tmpLog.info(' skip site={0} cloud mismatch criteria=-cloudmismatch'.format(tmpSiteName))
continue
if autoSite:
sitesForANY.append(tmpSiteName)
else:
newScanSiteList.append(tmpSiteName)
# use AUTO sites if no sites are included
if newScanSiteList == []:
newScanSiteList = sitesForANY
else:
for tmpSiteName in sitesForANY:
tmpLog.info(' skip site={0} not included criteria=-notincluded'.format(tmpSiteName))
scanSiteList = self.get_pseudo_sites(newScanSiteList, scanSiteList)
tmpLog.info('{0} candidates passed inclusion/exclusion'.format(len(scanSiteList)))
self.add_summary_message(oldScanSiteList, scanSiteList, 'include/exclude check')
if not scanSiteList:
self.dump_summary(tmpLog)
tmpLog.error('no candidates')
retVal = retTmpError
continue
######################################
# sites already used by task
tmpSt,sitesUsedByTask = self.taskBufferIF.getSitesUsedByTask_JEDI(taskSpec.jediTaskID)
if not tmpSt:
tmpLog.error('failed to get sites which already used by task')
retVal = retTmpError
continue
sitesUsedByTask = self.get_unified_sites(sitesUsedByTask)
######################################
# calculate weight
tmpSt, jobStatPrioMap = self.taskBufferIF.getJobStatisticsByGlobalShare(taskSpec.vo)
if not tmpSt:
tmpLog.error('failed to get job statistics with priority')
taskSpec.setErrDiag(tmpLog.uploadLog(taskSpec.jediTaskID))
# send info to logger
self.sendLogMessage(tmpLog)
return retTmpError
tmpSt, siteToRunRateMap = AtlasBrokerUtils.getSiteToRunRateStats(tbIF=self.taskBufferIF, vo=taskSpec.vo)
if not tmpSt:
tmpLog.error('failed to get site to-running rate')
taskSpec.setErrDiag(tmpLog.uploadLog(taskSpec.jediTaskID))
# send info to logger
self.sendLogMessage(tmpLog)
return retTmpError
# check for preassigned
if sitePreAssigned:
oldScanSiteList = copy.copy(scanSiteList)
if preassignedSite not in scanSiteList and preassignedSite not in self.get_unified_sites(scanSiteList):
tmpLog.info("preassigned site {0} did not pass all tests".format(preassignedSite))
self.add_summary_message(oldScanSiteList, [], 'preassign check')
self.dump_summary(tmpLog)
tmpLog.error('no candidates')
retVal = retFatal
continue
else:
newScanSiteList = []
for tmpPseudoSiteName in scanSiteList:
tmpSiteSpec = self.siteMapper.getSite(tmpPseudoSiteName)
tmpSiteName = tmpSiteSpec.get_unified_name()
if tmpSiteName != preassignedSite:
tmpLog.info(' skip site={0} non pre-assigned site criteria=-nonpreassigned'.format(
tmpPseudoSiteName))
continue
newScanSiteList.append(tmpSiteName)
scanSiteList = self.get_pseudo_sites(newScanSiteList, scanSiteList)
tmpLog.info('{0} candidates passed preassigned check'.format(len(scanSiteList)))
self.add_summary_message(oldScanSiteList, scanSiteList, 'preassign check')
######################################
# selection for hospital
newScanSiteList = []
oldScanSiteList = copy.copy(scanSiteList)
hasNormalSite = False
for tmpSiteName in self.get_unified_sites(scanSiteList):
if not tmpSiteName.endswith('_HOSPITAL'):
hasNormalSite = True
break
if hasNormalSite:
for tmpSiteName in self.get_unified_sites(scanSiteList):
# remove hospital
if tmpSiteName.endswith('_HOSPITAL'):
tmpLog.info(' skip site=%s due to hospital queue criteria=-hospital' % tmpSiteName)
continue
newScanSiteList.append(tmpSiteName)
scanSiteList = self.get_pseudo_sites(newScanSiteList, scanSiteList)
tmpLog.info('{0} candidates passed hospital check'.format(len(scanSiteList)))
self.add_summary_message(oldScanSiteList, scanSiteList, 'hospital check')
if not scanSiteList:
self.dump_summary(tmpLog)
tmpLog.error('no candidates')
retVal = retTmpError
continue
######################################
# cap with resource type
if not sitePreAssigned:
# count jobs per resource type
tmpRet, tmpStatMap = self.taskBufferIF.getJobStatisticsByResourceTypeSite(workQueue)
newScanSiteList = []
oldScanSiteList = copy.copy(scanSiteList)
RT_Cap = 2
for tmpSiteName in self.get_unified_sites(scanSiteList):
tmpSiteSpec = self.siteMapper.getSite(tmpSiteName)
tmpUnifiedName = tmpSiteSpec.get_unified_name()
if tmpUnifiedName in tmpStatMap and taskSpec.resource_type in tmpStatMap[tmpUnifiedName]:
tmpSiteStatMap = tmpStatMap[tmpUnifiedName][taskSpec.resource_type]
tmpRTrunning = tmpSiteStatMap.get('running', 0)
tmpRTqueue = tmpSiteStatMap.get('defined', 0)
tmpRTqueue += tmpSiteStatMap.get('assigned', 0)
tmpRTqueue += tmpSiteStatMap.get('activated', 0)
tmpRTqueue += tmpSiteStatMap.get('starting', 0)
if tmpRTqueue > max(20, tmpRTrunning * RT_Cap):
tmpMsg = ' skip site={0} '.format(tmpSiteName)
tmpMsg += 'since nQueue/max(20,nRun) with gshare+resource_type is '
tmpMsg += '{0}/max(20,{1}) > {2} '.format(tmpRTqueue, tmpRTrunning, RT_Cap)
tmpMsg += 'criteria=-cap_rt'
newScanSiteList.append(tmpSiteName)
scanSiteList = self.get_pseudo_sites(newScanSiteList, scanSiteList)
tmpLog.info('{0} candidates passed for cap with gshare+resource_type check'.format(len(scanSiteList)))
self.add_summary_message(oldScanSiteList, scanSiteList, 'cap with gshare+resource_type check')
if not scanSiteList:
self.dump_summary(tmpLog)
tmpLog.error('no candidates')
taskSpec.setErrDiag(tmpLog.uploadLog(taskSpec.jediTaskID))
self.sendLogMessage(tmpLog)
return retTmpError
######################################
# selection for un-overloaded sites
if not inputChunk.isMerging:
newScanSiteList = []
oldScanSiteList = copy.copy(scanSiteList)
overloadedNonVP = []
msgList = []
msgListVP = []
minQueue = self.taskBufferIF.getConfigValue('anal_jobbroker', 'OVERLOAD_MIN_QUEUE', 'jedi', taskSpec.vo)
if minQueue is None:
minQueue = 20
ratioOffset = self.taskBufferIF.getConfigValue('anal_jobbroker', 'OVERLOAD_RATIO_OFFSET', 'jedi',
taskSpec.vo)
if ratioOffset is None:
ratioOffset = 1.2
grandRatio = AtlasBrokerUtils.get_total_nq_nr_ratio(jobStatPrioMap, taskSpec.gshare)
tmpLog.info('grand nQueue/nRunning ratio : {0}'.format(grandRatio))
tmpLog.info('sites with non-VP data : {0}'.format(','.join(scanSiteWoVP)))
for tmpPseudoSiteName in scanSiteList:
tmpSiteSpec = self.siteMapper.getSite(tmpPseudoSiteName)
tmpSiteName = tmpSiteSpec.get_unified_name()
# get nQueue and nRunning
nRunning = AtlasBrokerUtils.getNumJobs(jobStatPrioMap, tmpSiteName, 'running', workQueue_tag=taskSpec.gshare)
nQueue = 0
for jobStatus in ['defined', 'assigned', 'activated', 'starting']:
nQueue += AtlasBrokerUtils.getNumJobs(jobStatPrioMap, tmpSiteName, jobStatus, workQueue_tag=taskSpec.gshare)
# skip if overloaded
if nQueue > minQueue and \
(nRunning == 0 or float(nQueue) / float(nRunning) > grandRatio * ratioOffset):
tmpMsg = ' skip site={0} '.format(tmpPseudoSiteName)
tmpMsg += 'nQueue>minQueue({0}) and '.format(minQueue)
if nRunning == 0:
tmpMsg += 'nRunning=0 '
problematic_sites_dict.setdefault(tmpSiteName, set())
problematic_sites_dict[tmpSiteName].add('nQueue({0})>minQueue({1}) and nRunning=0'.format(nQueue, minQueue))
else:
tmpMsg += 'nQueue({0})/nRunning({1}) > grandRatio({2:.2f})*offset({3}) '.format(nQueue,
nRunning,
grandRatio,
ratioOffset)
if tmpSiteName in scanSiteWoVP or checkDataLocality is False or inputChunk.getDatasets() == []:
tmpMsg += 'criteria=-overloaded'
overloadedNonVP.append(tmpPseudoSiteName)
msgListVP.append(tmpMsg)
else:
tmpMsg += 'and VP criteria=-overloaded_vp'
msgList.append(tmpMsg)
else:
newScanSiteList.append(tmpPseudoSiteName)
if len(newScanSiteList) > 0:
scanSiteList = newScanSiteList
for tmpMsg in msgList+msgListVP:
tmpLog.info(tmpMsg)
else:
scanSiteList = overloadedNonVP
for tmpMsg in msgList:
tmpLog.info(tmpMsg)
tmpLog.info('{0} candidates passed overload check'.format(len(scanSiteList)))
self.add_summary_message(oldScanSiteList, scanSiteList, 'overload check')
if not scanSiteList:
self.dump_summary(tmpLog)
tmpLog.error('no candidates')
retVal = retTmpError
continue
######################################
# skip sites where the user queues too much
user_name = self.taskBufferIF.cleanUserID(taskSpec.userName)
tmpSt, jobsStatsPerUser = AtlasBrokerUtils.getUsersJobsStats( tbIF=self.taskBufferIF,
vo=taskSpec.vo,
prod_source_label=taskSpec.prodSourceLabel,
cache_lifetime=60)
if not tmpSt:
tmpLog.error('failed to get users jobs statistics')
taskSpec.setErrDiag(tmpLog.uploadLog(taskSpec.jediTaskID))
# send info to logger
self.sendLogMessage(tmpLog)
return retTmpError
elif not inputChunk.isMerging:
# parameters
base_queue_length_per_pq = self.taskBufferIF.getConfigValue(
'anal_jobbroker', 'BASE_QUEUE_LENGTH_PER_PQ', 'jedi', taskSpec.vo)
if base_queue_length_per_pq is None:
base_queue_length_per_pq = 100
base_expected_wait_hour_on_pq = self.taskBufferIF.getConfigValue(
'anal_jobbroker', 'BASE_EXPECTED_WAIT_HOUR_ON_PQ', 'jedi', taskSpec.vo)
if base_expected_wait_hour_on_pq is None:
base_expected_wait_hour_on_pq = 8
base_default_queue_length_per_pq_user = self.taskBufferIF.getConfigValue(
'anal_jobbroker', 'BASE_DEFAULT_QUEUE_LENGTH_PER_PQ_USER', 'jedi', taskSpec.vo)
if base_default_queue_length_per_pq_user is None:
base_default_queue_length_per_pq_user = 5
base_queue_ratio_on_pq = self.taskBufferIF.getConfigValue(
'anal_jobbroker', 'BASE_QUEUE_RATIO_ON_PQ', 'jedi', taskSpec.vo)
if base_queue_ratio_on_pq is None:
base_queue_ratio_on_pq = 0.05
static_max_queue_running_ratio = self.taskBufferIF.getConfigValue(
'anal_jobbroker', 'STATIC_MAX_QUEUE_RUNNING_RATIO', 'jedi', taskSpec.vo)
if static_max_queue_running_ratio is None:
static_max_queue_running_ratio = 2.0
max_expected_wait_hour = self.taskBufferIF.getConfigValue(
'anal_jobbroker', 'MAX_EXPECTED_WAIT_HOUR', 'jedi', taskSpec.vo)
if max_expected_wait_hour is None:
max_expected_wait_hour = 12.0
# loop over sites
for tmpPseudoSiteName in scanSiteList:
tmpSiteSpec = self.siteMapper.getSite(tmpPseudoSiteName)
tmpSiteName = tmpSiteSpec.get_unified_name()
# get info about site
nRunning_pq_total = AtlasBrokerUtils.getNumJobs(jobStatPrioMap, tmpSiteName, 'running')
nRunning_pq_in_gshare = AtlasBrokerUtils.getNumJobs(jobStatPrioMap, tmpSiteName, 'running', workQueue_tag=taskSpec.gshare)
nQueue_pq_in_gshare = 0
for jobStatus in ['defined', 'assigned', 'activated', 'starting']:
nQueue_pq_in_gshare += AtlasBrokerUtils.getNumJobs(jobStatPrioMap, tmpSiteName, jobStatus, workQueue_tag=taskSpec.gshare)
# get to-running-rate
try:
site_to_running_rate = siteToRunRateMap[tmpSiteName]
if isinstance(site_to_running_rate, dict):
site_to_running_rate = sum(site_to_running_rate.values())
except KeyError:
site_to_running_rate = 0
finally:
to_running_rate = nRunning_pq_in_gshare*site_to_running_rate/nRunning_pq_total if nRunning_pq_total > 0 else 0
# get conditions of the site whether to throttle
if nQueue_pq_in_gshare < base_queue_length_per_pq:
# not throttle since overall queue length of the site is not large enough
tmpLog.debug('not throttle on {0} since nQ({1}) < base queue length ({2})'.format(
tmpSiteName, nQueue_pq_in_gshare, base_queue_length_per_pq))
continue
allowed_queue_length_from_wait_time = base_expected_wait_hour_on_pq*to_running_rate
if nQueue_pq_in_gshare < allowed_queue_length_from_wait_time:
# not statisfy since overall waiting time of the site is not long enough
tmpLog.debug('not throttle on {0} since nQ({1}) < {2:.3f} = toRunningRate({3:.3f} /hr) * base wait time ({4} hr)'.format(
tmpSiteName, nQueue_pq_in_gshare, allowed_queue_length_from_wait_time,
to_running_rate, base_expected_wait_hour_on_pq))
continue
# get user jobs stats under the gshare
try:
user_jobs_stats_map = jobsStatsPerUser[tmpSiteName][taskSpec.gshare][user_name]
except KeyError:
continue
else:
nQ_pq_user = user_jobs_stats_map['nQueue']
nR_pq_user = user_jobs_stats_map['nRunning']
nUsers_pq = len(jobsStatsPerUser[tmpSiteName][taskSpec.gshare])
try:
nR_pq = jobsStatsPerUser[tmpSiteName][taskSpec.gshare]['_total']['nRunning']
except KeyError:
nR_pq = nRunning_pq_in_gshare
# evaluate max nQueue per PQ
nQ_pq_limit_map = {
'base_limit': base_queue_length_per_pq,
'static_limit': static_max_queue_running_ratio*nR_pq,
'dynamic_limit': max_expected_wait_hour*to_running_rate,
}
max_nQ_pq = max(nQ_pq_limit_map.values())
# description for max nQueue per PQ
description_of_max_nQ_pq = 'max_nQ_pq({maximum:.3f}) '.format(maximum=max_nQ_pq)
for k, v in nQ_pq_limit_map.items():
if v == max_nQ_pq:
if k in ['base_limit']:
description_of_max_nQ_pq += '= {key} = BASE_QUEUE_LENGTH_PER_PQ({value})'.format(
key=k, value=base_queue_length_per_pq)
elif k in ['static_limit']:
description_of_max_nQ_pq += '= {key} = STATIC_MAX_QUEUE_RUNNING_RATIO({value:.3f}) * nR_pq({nR_pq})'.format(
key=k, value=static_max_queue_running_ratio, nR_pq=nR_pq)
elif k in ['dynamic_limit']:
description_of_max_nQ_pq += '= {key} = MAX_EXPECTED_WAIT_HOUR({value:.3f} hr) * toRunningRate_pq({trr:.3f} /hr)'.format(
key=k, value=max_expected_wait_hour, trr=to_running_rate)
break
# evaluate fraction per user
user_fraction_map = {
'equal_distr': 1/nUsers_pq,
'prop_to_nR': nR_pq_user/nR_pq if nR_pq > 0 else 0,
}
max_user_fraction = max(user_fraction_map.values())
# description for max fraction per user
description_of_max_user_fraction = 'max_user_fraction({maximum:.3f}) '.format(maximum=max_user_fraction)
for k, v in user_fraction_map.items():
if v == max_user_fraction:
if k in ['equal_distr']:
description_of_max_user_fraction += '= {key} = 1 / nUsers_pq({nU})'.format(
key=k, nU=nUsers_pq)
elif k in ['prop_to_nR']:
description_of_max_user_fraction += '= {key} = nR_pq_user({nR_pq_user}) / nR_pq({nR_pq})'.format(
key=k, nR_pq_user=nR_pq_user, nR_pq=nR_pq)
break
# evaluate max nQueue per PQ per user
nQ_pq_user_limit_map = {
'constant_base_user_limit': base_default_queue_length_per_pq_user,
'ratio_base_user_limit': base_queue_ratio_on_pq*nR_pq,
'dynamic_user_limit': max_nQ_pq*max_user_fraction,
}
max_nQ_pq_user = max(nQ_pq_user_limit_map.values())
# description for max fraction per user
description_of_max_nQ_pq_user = 'max_nQ_pq_user({maximum:.3f}) '.format(maximum=max_nQ_pq_user)
for k, v in nQ_pq_user_limit_map.items():
if v == max_nQ_pq_user:
if k in ['constant_base_user_limit']:
description_of_max_nQ_pq_user += '= {key} = BASE_DEFAULT_QUEUE_LENGTH_PER_PQ_USER({value})'.format(
key=k, value=base_default_queue_length_per_pq_user)
elif k in ['ratio_base_user_limit']:
description_of_max_nQ_pq_user += '= {key} = BASE_QUEUE_RATIO_ON_PQ({value:.3f}) * nR_pq({nR_pq})'.format(
key=k, value=base_queue_ratio_on_pq, nR_pq=nR_pq)
elif k in ['dynamic_user_limit']:
description_of_max_nQ_pq_user += '= {key} = max_nQ_pq({max_nQ_pq:.3f}) * max_user_fraction({max_user_fraction:.3f})'.format(
key=k, max_nQ_pq=max_nQ_pq, max_user_fraction=max_user_fraction)
description_of_max_nQ_pq_user += ' , where {0} , and {1}'.format(description_of_max_nQ_pq, description_of_max_user_fraction)
break
# check
if nQ_pq_user > max_nQ_pq_user:
tmpMsg = ' consider {0} unsuitable for the user due to long queue of the user: '.format(tmpSiteName)
tmpMsg += 'nQ_pq_user({0}) > {1} '.format(nQ_pq_user, description_of_max_nQ_pq_user)
# view as problematic site in order to throttle
problematic_sites_dict.setdefault(tmpSiteName, set())
problematic_sites_dict[tmpSiteName].add(tmpMsg)
############
# loop end
if len(scanSiteList) > 0:
retVal = None
break
# failed
if retVal is not None:
taskSpec.setErrDiag(tmpLog.uploadLog(taskSpec.jediTaskID))
# send info to logger
self.sendLogMessage(tmpLog)
return retVal
# get list of available files
availableFileMap = {}
for datasetSpec in inputChunk.getDatasets():
try:
# get list of site to be scanned
tmpLog.debug('getting the list of available files for {0}'.format(datasetSpec.datasetName))
fileScanSiteList = []
for tmpPseudoSiteName in scanSiteList:
tmpSiteSpec = self.siteMapper.getSite(tmpPseudoSiteName)
tmpSiteName = tmpSiteSpec.get_unified_name()
if tmpSiteName in fileScanSiteList:
continue
fileScanSiteList.append(tmpSiteName)
if tmpSiteName in remoteSourceList and datasetSpec.datasetName in remoteSourceList[tmpSiteName]:
for tmpRemoteSite in remoteSourceList[tmpSiteName][datasetSpec.datasetName]:
if tmpRemoteSite not in fileScanSiteList:
fileScanSiteList.append(tmpRemoteSite)
# mapping between sites and input storage endpoints
siteStorageEP = AtlasBrokerUtils.getSiteInputStorageEndpointMap(fileScanSiteList, self.siteMapper,
JobUtils.ANALY_PS, JobUtils.ANALY_PS)
# disable file lookup for merge jobs
if inputChunk.isMerging:
checkCompleteness = False
else:
checkCompleteness = True
if not datasetSpec.isMaster():
useCompleteOnly = True
else:
useCompleteOnly = False
# get available files per site/endpoint
tmpAvFileMap = self.ddmIF.getAvailableFiles(datasetSpec,
siteStorageEP,
self.siteMapper,
check_completeness=checkCompleteness,
use_vp=useVP,
file_scan_in_container=False,
complete_only=useCompleteOnly)
if tmpAvFileMap is None:
raise Interaction.JEDITemporaryError('ddmIF.getAvailableFiles failed')
availableFileMap[datasetSpec.datasetName] = tmpAvFileMap
except Exception:
errtype,errvalue = sys.exc_info()[:2]
tmpLog.error('failed to get available files with %s %s' % (errtype.__name__,errvalue))
taskSpec.setErrDiag(tmpLog.uploadLog(taskSpec.jediTaskID))
# send info to logger
self.sendLogMessage(tmpLog)
return retTmpError
# make data weight
totalSize = 0
totalNumFiles = 0
totalDiskSizeMap = dict()
totalTapeSizeMap = dict()
for datasetSpec in inputChunk.getDatasets():
totalNumFiles += len(datasetSpec.Files)
for fileSpec in datasetSpec.Files:
totalSize += fileSpec.fsize
if datasetSpec.datasetName in availableFileMap:
for tmpSiteName, tmpAvFileMap in iteritems(availableFileMap[datasetSpec.datasetName]):
totalDiskSizeMap.setdefault(tmpSiteName, 0)
totalTapeSizeMap.setdefault(tmpSiteName, 0)
for fileSpec in tmpAvFileMap['localdisk']:
totalDiskSizeMap[tmpSiteName] += fileSpec.fsize
for fileSpec in tmpAvFileMap['localtape']:
totalTapeSizeMap[tmpSiteName] += fileSpec.fsize
totalSize //= (1024 * 1024 * 1024)
tmpLog.info('totalInputSize={0} GB'.format(totalSize))
for tmpSiteName in totalDiskSizeMap.keys():
totalDiskSizeMap[tmpSiteName] //= (1024 * 1024 *1024)
for tmpSiteName in totalTapeSizeMap.keys():
totalTapeSizeMap[tmpSiteName] //= (1024 * 1024 *1024)
######################################
# final procedure
tmpLog.info('{0} candidates for final check'.format(len(scanSiteList)))
weightMap = {}
weightStr = {}
candidateSpecList = []
preSiteCandidateSpec = None
basic_weight_comparison_map = {}
workerStat = self.taskBufferIF.ups_load_worker_stats()
minBadJobsToSkipPQ = self.taskBufferIF.getConfigValue('anal_jobbroker', 'MIN_BAD_JOBS_TO_SKIP_PQ',
'jedi', taskSpec.vo)
if minBadJobsToSkipPQ is None:
minBadJobsToSkipPQ = 5
for tmpPseudoSiteName in scanSiteList:
tmpSiteSpec = self.siteMapper.getSite(tmpPseudoSiteName)
tmpSiteName = tmpSiteSpec.get_unified_name()
nRunning = AtlasBrokerUtils.getNumJobs(jobStatPrioMap, tmpSiteName, 'running', workQueue_tag=taskSpec.gshare)
nDefined = AtlasBrokerUtils.getNumJobs(jobStatPrioMap, tmpSiteName, 'defined', workQueue_tag=taskSpec.gshare)
nAssigned = AtlasBrokerUtils.getNumJobs(jobStatPrioMap, tmpSiteName, 'assigned', workQueue_tag=taskSpec.gshare)
nActivated = AtlasBrokerUtils.getNumJobs(jobStatPrioMap, tmpSiteName, 'activated', workQueue_tag=taskSpec.gshare)
nStarting = AtlasBrokerUtils.getNumJobs(jobStatPrioMap, tmpSiteName, 'starting', workQueue_tag=taskSpec.gshare)
# get num workers
nWorkers = 0
nWorkersCutoff = 20
if tmpSiteName in workerStat:
for tmpHarvesterID, tmpLabelStat in iteritems(workerStat[tmpSiteName]):
for tmpHarvesterID, tmpResStat in iteritems(tmpLabelStat):
for tmpResType, tmpCounts in iteritems(tmpResStat):
for tmpStatus, tmpNum in iteritems(tmpCounts):
if tmpStatus in ['running', 'submitted']:
nWorkers += tmpNum
# cap
nWorkers = min(nWorkersCutoff, nWorkers)
# use nWorkers to bootstrap
if tmpSiteName in nPilotMap and nPilotMap[tmpSiteName] > 0 and nRunning < nWorkersCutoff \
and nWorkers > nRunning:
tmpLog.debug('using nWorkers={} as nRunning at {} since original nRunning={} is low'.format(
nWorkers, tmpPseudoSiteName, nRunning))
nRunning = nWorkers
# take into account the number of standby jobs
numStandby = tmpSiteSpec.getNumStandby(taskSpec.gshare, taskSpec.resource_type)
if numStandby is None:
pass
elif numStandby == 0:
# use the number of starting jobs as the number of standby jobs
nRunning = nStarting + nRunning
tmpLog.debug('using dynamic workload provisioning at {0} to set nRunning={1}'.format(tmpPseudoSiteName,
nRunning))
else:
# the number of standby jobs is defined
nRunning = max(int(numStandby / tmpSiteSpec.coreCount), nRunning)
tmpLog.debug('using static workload provisioning at {0} with nStandby={1} to set nRunning={2}'.format(
tmpPseudoSiteName, numStandby, nRunning))
nFailed = 0
nClosed = 0
nFinished = 0
if tmpSiteName in failureCounts:
if 'failed' in failureCounts[tmpSiteName]:
nFailed = failureCounts[tmpSiteName]['failed']
if 'closed' in failureCounts[tmpSiteName]:
nClosed = failureCounts[tmpSiteName]['closed']
if 'finished' in failureCounts[tmpSiteName]:
nFinished = failureCounts[tmpSiteName]['finished']
# problematic sites with too many failed and closed jobs
if not inputChunk.isMerging and (nFailed + nClosed) > max(2*nFinished, minBadJobsToSkipPQ):
problematic_sites_dict.setdefault(tmpSiteName, set())
problematic_sites_dict[tmpSiteName].add('too many failed or closed jobs for last 6h')
# calculate weight
orig_basic_weight = float(nRunning + 1) / float(nActivated + nAssigned + nDefined + nStarting + 1)
weight = orig_basic_weight
try:
site_to_running_rate = siteToRunRateMap[tmpSiteName]
if isinstance(site_to_running_rate, dict):
site_to_running_rate = sum(site_to_running_rate.values())
except KeyError:
to_running_rate_str = '0(unknown)'
to_running_rate = 0
else:
site_n_running = AtlasBrokerUtils.getNumJobs(jobStatPrioMap, tmpSiteName, 'running')
to_running_rate = nRunning*site_to_running_rate/site_n_running if site_n_running > 0 else 0
to_running_rate_str = '{0:.3f}'.format(to_running_rate)
nThrottled = 0
if tmpSiteName in remoteSourceList:
nThrottled = AtlasBrokerUtils.getNumJobs(jobStatPrioMap, tmpSiteName, 'throttled', workQueue_tag=taskSpec.gshare)
weight /= float(nThrottled + 1)
# normalize weights by taking data availability into account
diskNorm = 10
tapeNorm = 1000
localSize = totalSize
if checkDataLocality and not useUnionLocality:
tmpDataWeight = 1
if tmpSiteName in dataWeight:
weight *= dataWeight[tmpSiteName]
tmpDataWeight = dataWeight[tmpSiteName]
else:
tmpDataWeight = 1
if totalSize > 0:
if tmpSiteName in totalDiskSizeMap:
tmpDataWeight += (totalDiskSizeMap[tmpSiteName] / diskNorm)
localSize = totalDiskSizeMap[tmpSiteName]
elif tmpSiteName in totalTapeSizeMap:
tmpDataWeight += (totalTapeSizeMap[tmpSiteName] / tapeNorm)
localSize = totalTapeSizeMap[tmpSiteName]
weight *= tmpDataWeight
# make candidate
siteCandidateSpec = SiteCandidate(tmpPseudoSiteName, tmpSiteName)
# preassigned
if sitePreAssigned and tmpSiteName == preassignedSite:
preSiteCandidateSpec = siteCandidateSpec
# override attributes
siteCandidateSpec.override_attribute('maxwdir', newMaxwdir.get(tmpSiteName))
# available site, take in account of new basic weight
basic_weight_comparison_map[tmpSiteName] = {}
basic_weight_comparison_map[tmpSiteName]['orig'] = orig_basic_weight
basic_weight_comparison_map[tmpSiteName]['trr'] = to_running_rate
basic_weight_comparison_map[tmpSiteName]['nq'] = (nActivated + nAssigned + nDefined + nStarting)
basic_weight_comparison_map[tmpSiteName]['nr'] = nRunning
# set weight
siteCandidateSpec.weight = weight
tmpStr = 'weight={0:.3f} nRunning={1} nDefined={2} nActivated={3} nStarting={4} nAssigned={5} '.format(weight,
nRunning,
nDefined,
nActivated,
nStarting,
nAssigned)
tmpStr += 'nFailed={0} nClosed={1} nFinished={2} dataW={3} '.format(nFailed,
nClosed,
nFinished,
tmpDataWeight)
tmpStr += 'totalInGB={0} localInGB={1} nFiles={2} '.format(totalSize, localSize, totalNumFiles)
tmpStr += 'toRunningRate={0} '.format(to_running_rate_str)
weightStr[tmpPseudoSiteName] = tmpStr
# append
if tmpSiteName in sitesUsedByTask:
candidateSpecList.append(siteCandidateSpec)
else:
if weight not in weightMap:
weightMap[weight] = []
weightMap[weight].append(siteCandidateSpec)
## compute new basic weight
try:
weight_comparison_avail_sites = set(basic_weight_comparison_map.keys())
trr_sum = 0
nq_sum = 0
n_avail_sites = len(basic_weight_comparison_map)
for vv in basic_weight_comparison_map.values():
trr_sum += vv['trr']
nq_sum += vv['nq']
if n_avail_sites == 0:
tmpLog.debug('WEIGHT-COMPAR: zero available sites, skip')
elif trr_sum == 0:
tmpLog.debug('WEIGHT-COMPAR: zero sum of to-running-rate, skip')
else:
_found_weights = False
while not _found_weights:
trr_sum_avail = 0
nq_sum_avail = 0
n_avail_sites = len(weight_comparison_avail_sites)
if n_avail_sites == 0:
break
for site in weight_comparison_avail_sites:
vv = basic_weight_comparison_map[site]
trr_sum_avail += vv['trr']
nq_sum_avail += vv['nq']
if trr_sum_avail == 0:
break
_found_weights = True
for site in list(weight_comparison_avail_sites):
vv = basic_weight_comparison_map[site]
new_basic_weight = (vv['trr']/trr_sum_avail)*(25 + nq_sum_avail - n_avail_sites/2.0) - vv['nq'] + 1/2.0
if new_basic_weight < 0:
vv['new'] = 0
weight_comparison_avail_sites.discard(site)
_found_weights = False
else:
vv['new'] = new_basic_weight
orig_sum = 0
new_sum = 0
for vv in basic_weight_comparison_map.values():
orig_sum += vv['orig']
new_sum += vv['new']
for site in basic_weight_comparison_map:
vv = basic_weight_comparison_map[site]
if vv['nr'] == 0:
trr_over_r = None
else:
trr_over_r = vv['trr']/vv['nr']
vv['trr_over_r'] = '{:6.3f}'.format(trr_over_r) if trr_over_r is not None else 'None'
if orig_sum == 0:
normalized_orig = 0
else:
normalized_orig = vv['orig']/orig_sum
vv['normalized_orig'] = normalized_orig
if new_sum == 0:
normalized_new = 0
else:
normalized_new = vv['new']/new_sum
vv['normalized_new'] = normalized_new
prt_str_list = []
prt_str_temp = (' '
' {site:>24} |'
' {nq:>6} |'
' {nr:>6} |'
' {trr:9.3f} |'
' {trr_over_r} |'
' {orig:9.3f} |'
' {new:9.3f} |'
' {normalized_orig:6.1%} |'
' {normalized_new:6.1%} |')
prt_str_title = ( ' '
' {site:>24} |'
' {nq:>6} |'
' {nr:>6} |'
' {trr:>9} |'
' {trr_over_r:>6} |'
' {orig:>9} |'
' {new:>9} |'
' {normalized_orig:>6} |'
' {normalized_new:>6} |'
).format(
site='Site',
nq='Q',
nr='R',
trr='TRR',
trr_over_r='TRR/R',
orig='Wb_orig',
new='Wb_new',
normalized_orig='orig_%',
normalized_new='new_%')
prt_str_list.append(prt_str_title)
for site in sorted(basic_weight_comparison_map):
vv = basic_weight_comparison_map[site]
prt_str = prt_str_temp.format(site=site, **vv)
prt_str_list.append(prt_str)
tmpLog.debug('WEIGHT-COMPAR: for gshare={0} got \n{1}'.format(taskSpec.gshare, '\n'.join(prt_str_list)))
except Exception as e:
tmpLog.error('{0} {1}'.format(e.__class__.__name__, e))
##
oldScanSiteList = copy.copy(scanSiteList)
# sort candidates by weights
weightList = list(weightMap.keys())
weightList.sort()
weightList.reverse()
for weightVal in weightList:
sitesWithWeight = weightMap[weightVal]
random.shuffle(sitesWithWeight)
candidateSpecList += sitesWithWeight
# limit the number of sites. use all sites for distributed datasets
if not hasDDS:
maxNumSites = 10
else:
maxNumSites = None
# remove problematic sites
oldScanSiteList = copy.copy(scanSiteList)
candidateSpecList = AtlasBrokerUtils.skipProblematicSites(candidateSpecList,
set(problematic_sites_dict),
sitesUsedByTask,
preSiteCandidateSpec,
maxNumSites,
timeWindowForFC,
tmpLog)
# append preassigned
if sitePreAssigned and preSiteCandidateSpec is not None and preSiteCandidateSpec not in candidateSpecList:
candidateSpecList.append(preSiteCandidateSpec)
# collect site names
scanSiteList = []
for siteCandidateSpec in candidateSpecList:
scanSiteList.append(siteCandidateSpec.siteName)
# append candidates
newScanSiteList = []
msgList = []
for siteCandidateSpec in candidateSpecList:
tmpPseudoSiteName = siteCandidateSpec.siteName
tmpSiteSpec = self.siteMapper.getSite(tmpPseudoSiteName)
tmpSiteName = tmpSiteSpec.get_unified_name()
# preassigned
if sitePreAssigned and tmpSiteName != preassignedSite:
tmpLog.info(' skip site={0} non pre-assigned site criteria=-nonpreassigned'.format(tmpPseudoSiteName))
try:
del weightStr[tmpPseudoSiteName]
except Exception:
pass
continue
# set available files
if inputChunk.getDatasets() == [] or (not checkDataLocality and not tmpSiteSpec.use_only_local_data()):
isAvailable = True
else:
isAvailable = False
for tmpDatasetName,availableFiles in iteritems(availableFileMap):
tmpDatasetSpec = inputChunk.getDatasetWithName(tmpDatasetName)
# check remote files
if tmpSiteName in remoteSourceList and tmpDatasetName in remoteSourceList[tmpSiteName] \
and not tmpSiteSpec.use_only_local_data():
for tmpRemoteSite in remoteSourceList[tmpSiteName][tmpDatasetName]:
if tmpRemoteSite in availableFiles and \
len(tmpDatasetSpec.Files) <= len(availableFiles[tmpRemoteSite]['localdisk']):
# use only remote disk files
siteCandidateSpec.add_remote_files(availableFiles[tmpRemoteSite]['localdisk'])
# set remote site and access protocol
siteCandidateSpec.remoteProtocol = allowedRemoteProtocol
siteCandidateSpec.remoteSource = tmpRemoteSite
isAvailable = True
break
# local files
if tmpSiteName in availableFiles:
if len(tmpDatasetSpec.Files) <= len(availableFiles[tmpSiteName]['localdisk']) or \
len(tmpDatasetSpec.Files) <= len(availableFiles[tmpSiteName]['cache']) or \
len(tmpDatasetSpec.Files) <= len(availableFiles[tmpSiteName]['localtape']) or \
(tmpDatasetSpec.isDistributed() and len(availableFiles[tmpSiteName]['all']) > 0) or \
((checkDataLocality is False or useUnionLocality) and not tmpSiteSpec.use_only_local_data()):
siteCandidateSpec.add_local_disk_files(availableFiles[tmpSiteName]['localdisk'])
# add cached files to local list since cached files go to pending when reassigned
siteCandidateSpec.add_local_disk_files(availableFiles[tmpSiteName]['cache'])
siteCandidateSpec.add_local_tape_files(availableFiles[tmpSiteName]['localtape'])
siteCandidateSpec.add_cache_files(availableFiles[tmpSiteName]['cache'])
siteCandidateSpec.add_remote_files(availableFiles[tmpSiteName]['remote'])
siteCandidateSpec.addAvailableFiles(availableFiles[tmpSiteName]['all'])
isAvailable = True
else:
tmpMsg = '{0} is incomplete at {1} : nFiles={2} nLocal={3} nCached={4} nTape={5}'
tmpLog.debug(tmpMsg.format(tmpDatasetName,
tmpPseudoSiteName,
len(tmpDatasetSpec.Files),
len(availableFiles[tmpSiteName]['localdisk']),
len(availableFiles[tmpSiteName]['cache']),
len(availableFiles[tmpSiteName]['localtape']),
))
if not isAvailable:
break
# append
if not isAvailable:
tmpLog.info(' skip site={0} file unavailable criteria=-fileunavailable'.format(siteCandidateSpec.siteName))
try:
del weightStr[siteCandidateSpec.siteName]
except Exception:
pass
continue
inputChunk.addSiteCandidate(siteCandidateSpec)
newScanSiteList.append(siteCandidateSpec.siteName)
tmpMsg = ' use site={0} with {1} nLocalDisk={2} nLocalTape={3} nCache={4} nRemote={5} criteria=+use'.format(siteCandidateSpec.siteName,
weightStr[siteCandidateSpec.siteName],
len(siteCandidateSpec.localDiskFiles),
len(siteCandidateSpec.localTapeFiles),
len(siteCandidateSpec.cacheFiles),
len(siteCandidateSpec.remoteFiles))
msgList.append(tmpMsg)
del weightStr[siteCandidateSpec.siteName]
# dump
for tmpPseudoSiteName in oldScanSiteList:
tmpSiteSpec = self.siteMapper.getSite(tmpPseudoSiteName)
tmpSiteName = tmpSiteSpec.get_unified_name()
tmpWeightStr = None
if tmpSiteName in weightStr:
tmpWeightStr = weightStr[tmpSiteName]
elif tmpPseudoSiteName in weightStr:
tmpWeightStr = weightStr[tmpPseudoSiteName]
if tmpWeightStr is not None:
if tmpSiteName in problematic_sites_dict:
bad_reasons = ' ; '.join(list(problematic_sites_dict[tmpSiteName]))
tmpMsg = ' skip site={0} {1} ; with {2} criteria=-badsite'.format(tmpPseudoSiteName, bad_reasons,
tmpWeightStr)
else:
tmpMsg = (' skip site={0} due to low weight and not-used by old jobs '
'with {1} criteria=-lowweight').format(tmpPseudoSiteName, tmpWeightStr)
tmpLog.info(tmpMsg)
for tmpMsg in msgList:
tmpLog.info(tmpMsg)
scanSiteList = newScanSiteList
self.add_summary_message(oldScanSiteList, scanSiteList, 'final check')
if not scanSiteList:
self.dump_summary(tmpLog)
tmpLog.error('no candidates')
taskSpec.setErrDiag(tmpLog.uploadLog(taskSpec.jediTaskID))
# send info to logger
self.sendLogMessage(tmpLog)
return retTmpError
self.dump_summary(tmpLog, scanSiteList)
# send info to logger
self.sendLogMessage(tmpLog)
# return
tmpLog.debug('done')
return self.SC_SUCCEEDED,inputChunk
|
apache-2.0
|
endolith/scikit-image
|
doc/examples/segmentation/plot_random_walker_segmentation.py
|
13
|
2068
|
"""
==========================
Random walker segmentation
==========================
The random walker algorithm [1]_ determines the segmentation of an image from
a set of markers labeling several phases (2 or more). An anisotropic diffusion
equation is solved with tracers initiated at the markers' position. The local
diffusivity coefficient is greater if neighboring pixels have similar values,
so that diffusion is difficult across high gradients. The label of each unknown
pixel is attributed to the label of the known marker that has the highest
probability to be reached first during this diffusion process.
In this example, two phases are clearly visible, but the data are too
noisy to perform the segmentation from the histogram only. We determine
markers of the two phases from the extreme tails of the histogram of gray
values, and use the random walker for the segmentation.
.. [1] *Random walks for image segmentation*, Leo Grady, IEEE Trans. Pattern
Anal. Mach. Intell. 2006 Nov; 28(11):1768-83
"""
import numpy as np
import matplotlib.pyplot as plt
from skimage.segmentation import random_walker
from skimage.data import binary_blobs
import skimage
# Generate noisy synthetic data
data = skimage.img_as_float(binary_blobs(length=128, seed=1))
data += 0.35 * np.random.randn(*data.shape)
markers = np.zeros(data.shape, dtype=np.uint)
markers[data < -0.3] = 1
markers[data > 1.3] = 2
# Run random walker algorithm
labels = random_walker(data, markers, beta=10, mode='bf')
# Plot results
fig, (ax1, ax2, ax3) = plt.subplots(1, 3, figsize=(8, 3.2),
sharex=True, sharey=True)
ax1.imshow(data, cmap='gray', interpolation='nearest')
ax1.axis('off')
ax1.set_adjustable('box-forced')
ax1.set_title('Noisy data')
ax2.imshow(markers, cmap='hot', interpolation='nearest')
ax2.axis('off')
ax2.set_adjustable('box-forced')
ax2.set_title('Markers')
ax3.imshow(labels, cmap='gray', interpolation='nearest')
ax3.axis('off')
ax3.set_adjustable('box-forced')
ax3.set_title('Segmentation')
fig.tight_layout()
plt.show()
|
bsd-3-clause
|
HKUST-SING/tensorflow
|
tensorflow/contrib/learn/python/learn/learn_io/pandas_io.py
|
8
|
3794
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Methods to allow pandas.DataFrame."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.estimator.inputs.pandas_io import pandas_input_fn # pylint: disable=unused-import
try:
# pylint: disable=g-import-not-at-top
import pandas as pd
HAS_PANDAS = True
except IOError:
# Pandas writes a temporary file during import. If it fails, don't use pandas.
HAS_PANDAS = False
except ImportError:
HAS_PANDAS = False
PANDAS_DTYPES = {
'int8': 'int',
'int16': 'int',
'int32': 'int',
'int64': 'int',
'uint8': 'int',
'uint16': 'int',
'uint32': 'int',
'uint64': 'int',
'float16': 'float',
'float32': 'float',
'float64': 'float',
'bool': 'i'
}
def extract_pandas_data(data):
"""Extract data from pandas.DataFrame for predictors.
Given a DataFrame, will extract the values and cast them to float. The
DataFrame is expected to contain values of type int, float or bool.
Args:
data: `pandas.DataFrame` containing the data to be extracted.
Returns:
A numpy `ndarray` of the DataFrame's values as floats.
Raises:
ValueError: if data contains types other than int, float or bool.
"""
if not isinstance(data, pd.DataFrame):
return data
bad_data = [column for column in data
if data[column].dtype.name not in PANDAS_DTYPES]
if not bad_data:
return data.values.astype('float')
else:
error_report = [("'" + str(column) + "' type='" +
data[column].dtype.name + "'") for column in bad_data]
raise ValueError('Data types for extracting pandas data must be int, '
'float, or bool. Found: ' + ', '.join(error_report))
def extract_pandas_matrix(data):
"""Extracts numpy matrix from pandas DataFrame.
Args:
data: `pandas.DataFrame` containing the data to be extracted.
Returns:
A numpy `ndarray` of the DataFrame's values.
"""
if not isinstance(data, pd.DataFrame):
return data
return data.as_matrix()
def extract_pandas_labels(labels):
"""Extract data from pandas.DataFrame for labels.
Args:
labels: `pandas.DataFrame` or `pandas.Series` containing one column of
labels to be extracted.
Returns:
A numpy `ndarray` of labels from the DataFrame.
Raises:
ValueError: if more than one column is found or type is not int, float or
bool.
"""
if isinstance(labels,
pd.DataFrame): # pandas.Series also belongs to DataFrame
if len(labels.columns) > 1:
raise ValueError('Only one column for labels is allowed.')
bad_data = [column for column in labels
if labels[column].dtype.name not in PANDAS_DTYPES]
if not bad_data:
return labels.values
else:
error_report = ["'" + str(column) + "' type="
+ str(labels[column].dtype.name) for column in bad_data]
raise ValueError('Data types for extracting labels must be int, '
'float, or bool. Found: ' + ', '.join(error_report))
else:
return labels
|
apache-2.0
|
samueljackson92/NDImage
|
ndimage/controllers/mpl_canvas_controller.py
|
1
|
1756
|
import numpy as np
import scipy.spatial as spatial
from matplotlib.widgets import LassoSelector
from matplotlib.path import Path
class MplCanvasLassoSelector(object):
def __init__(self, fig_canvas, parent):
self._parent = parent
self._canvas = fig_canvas
self._lasso = LassoSelector(self._canvas.axes,
onselect=self.onselect)
# Figure MUST be redrawn at this point
self._canvas.draw()
def onselect(self, verts):
df = self._parent.get_projection()
if df is not None:
xys = df.as_matrix()
path = Path(verts)
idx = np.nonzero([path.contains_point(xy) for xy in xys])[0]
self._parent.select_rows(idx)
self._canvas.highlight_points(idx)
self._lasso.disconnect_events()
self._canvas.draw_idle()
class MplCanvasListener(object):
def __init__(self, fig_canvas, parent):
self._parent = parent
self._canvas = fig_canvas
self._canvas.figure.canvas.mpl_connect('button_press_event', self.select_rows)
self._parent.projectionTable.modelReset.connect(self.reset_tree)
self.reset_tree()
def reset_tree(self):
df = self._parent.get_projection()
if df is not None:
self.tree = spatial.cKDTree(df[[0, 1]])
def find_nearest(self, x, y):
dist, idx = self.tree.query((x, y), k=1, p=1)
return idx
def select_rows(self, event):
df = self._parent.get_projection()
if df is not None:
idx = self.find_nearest(event.xdata, event.ydata)
self._parent.select_rows(idx)
self._canvas.highlight_points(idx)
self._canvas.draw_idle()
|
mit
|
harterj/moose
|
modules/geochemistry/test/tests/solubilities_and_activities/gypsum_solubility.py
|
9
|
1232
|
#!/usr/bin/env python3
#* This file is part of the MOOSE framework
#* https://www.mooseframework.org
#*
#* All rights reserved, see COPYRIGHT for full restrictions
#* https://github.com/idaholab/moose/blob/master/COPYRIGHT
#*
#* Licensed under LGPL 2.1, please see LICENSE for details
#* https://www.gnu.org/licenses/lgpl-2.1.html
# Plotting the results of gypsum_solubility and the equivalent GWB simulation
import os
import sys
import matplotlib.pyplot as plt
f = open("gold/gypsum_solubility_out.csv", "r")
data = f.readlines()[2:]
f.close()
cl = [float(line.strip().split(",")[1]) for line in data]
gyp = [float(line.strip().split(",")[2]) for line in data]
gwb_cl_molality = [0.02907, 0.2894, 0.5768, 0.8625, 1.146, 1.428, 1.708, 1.986, 2.261, 2.533, 2.803]
gwb_ca_in_fluid = [0.02386, 0.0417, 0.0559, 0.0682, 0.0796, 0.0904, 0.101, 0.111, 0.121, 0.131, 0.141]
plt.figure()
plt.plot(cl, gyp, 'k-', linewidth = 2.0, label = 'MOOSE')
plt.plot(gwb_cl_molality, gwb_ca_in_fluid, 'rs', markersize = 6.0, label = 'GWB')
plt.legend()
plt.xlabel("Cl molality")
plt.ylabel("Dissolved gypsum (mol)")
plt.title("Gypsum solubility in brine")
plt.savefig("../../../doc/content/media/geochemistry/gypsum_solubility.png")
sys.exit(0)
|
lgpl-2.1
|
cdtait/betfair_pandas
|
samples/sample1.py
|
1
|
6777
|
'''
Created on 4 Nov 2014
@author: obod
'''
from __future__ import print_function
from betfair.models import MarketFilter
from betfair import Betfair
import betfair_pandas as bp
import datetime
import dateutil
import pandas as pd
if __name__ == '__main__':
# ssologin
# To use this you will need app_key,cert_file,username,password
client=Betfair(app_key,cert_file)
client.login(username,password)
# List horse racing event ids
event_types=bp.list_event_types(client,filter={'textQuery':"Horse Racing"}) # First 5 UK horse races, win market, from now
country_code='GB'
marketFilter={'eventTypeIds':[event_types.id[0]],
'marketCountries':[country_code],
'marketTypeCodes':["WIN"],
'marketStartTime':{'from':datetime.datetime.now()}}
# First 5 horse races, win market, from now
races=bp.list_market_catalogue(client,
filter=marketFilter,
market_projection=['COMPETITION','EVENT','EVENT_TYPE','MARKET_DESCRIPTION',
'RUNNER_DESCRIPTION','MARKET_START_TIME'],
sort="FIRST_TO_START",
max_results=5
)
# Get a summary set of columns for winHorseRacing from description
summaryDesc=races['description'][['marketId','marketName','event.venue',
'event.name','marketStartTime']]
# Get a summary set of the runners names
summaryRunners=races['runners'][['marketId','selectionId','runnerName']]
# Join the 2 together baes on the marketId to show summary of the runners in the races together
summaryRaces=pd.merge(summaryDesc,summaryRunners,on='marketId') # First race
summaryDesc.groupby(['marketStartTime','event.venue'])
print('Races:')
for name, group in summaryDesc.groupby(['marketStartTime','event.venue']):
print("{0:s} {1:s} {2:s} {3:%I:%M%p}".format(group.values[0][1],
group.values[0][2],
group.values[0][3],
dateutil.parser.parse(group.values[0][4])
))
# First race
marketId=races['description'].marketId[0]
# First race summary
firstRaceDesc=summaryDesc.query('marketId=="'+marketId+'"')[['marketId','marketName',
'event.name','event.venue','marketStartTime']]
firstRaceRunners=summaryRaces.query('marketId=="'+marketId+'"')[['selectionId','runnerName']]
print(firstRaceDesc)
print(firstRaceRunners)
# All exchange and starting prices to a depth of 2 max price of 20
projection={'priceData':['EX_BEST_OFFERS','SP_AVAILABLE','SP_TRADED'],
'virtualise':False,
'exBestOffersOverrides':{'bestPricesDepth':5L,
'rollupModel':"STAKE",
'rollupLimit':20L},
'rolloverStakes':False
}
# Get all the runners/prices book for this market
# According to the projections
runnersPriceInFirstRace=bp.list_market_book(client,
market_ids=[marketId],
price_projection=projection,
order_projection='ALL',
match_projection='ROLLED_UP_BY_PRICE'
)
# Note the book time
priceTime=datetime.datetime.now()
#
print(runnersPriceInFirstRace['market.book'][['marketId','lastMatchTime',
'totalAvailable','totalMatched',
'numberOfActiveRunners']])
# Runner with the most matched
runnerWithMostTotalMatched=runnersPriceInFirstRace['runners'].sort('totalMatched',ascending=False)
# This is one particular runner id
runnerIdWithMostTotalMatched=runnerWithMostTotalMatched.ix[0,'selectionId']
# Getthe overview price and volume for this selection
runners=runnersPriceInFirstRace['runners']
overview=runners[runners.selectionId==runnerIdWithMostTotalMatched]
# Get the overview price and volume for this selected runner
runnerOverview=pd.merge(overview,summaryRaces[['selectionId','marketId','runnerName']],
on=['selectionId','marketId'])
#
allsp=runnersPriceInFirstRace['runners.sp']
sp=allsp[allsp.selectionId==runnerIdWithMostTotalMatched]
# Show starting price summary
print(sp)
# Show back stake taken
backStakeTaken=runnersPriceInFirstRace['runners.sp.backStakeTaken']
print(backStakeTaken[backStakeTaken.selectionId==runnerIdWithMostTotalMatched])
# Show lay liabilty taken
layLiabilityTaken=runnersPriceInFirstRace['runners.sp.layLiabilityTaken']
print(layLiabilityTaken[layLiabilityTaken.selectionId==runnerIdWithMostTotalMatched])
# Get all lay prices for all the runners in the first race
availableToLay=runnersPriceInFirstRace['runners.ex.availableToLay']
# Get the lay prices for the one with the most total matched
runnerIdWithMostTotalMatchedLayPrices=availableToLay[availableToLay.selectionId == runnerIdWithMostTotalMatched]
# Rename to TotalAvailableToLay
runnerIdWithMostTotalMatchedLayPrices=runnerIdWithMostTotalMatchedLayPrices.rename(
columns={'size': 'LayTotal','price':'LayPrice'})
# Get all back prices for all the runners in the first race
availableToBack=runnersPriceInFirstRace['runners.ex.availableToBack']
# Get the back prices for the one with the most total matched
runnerIdWithMostTotalMatchedBackPrices=availableToBack[availableToBack.selectionId == runnerIdWithMostTotalMatched]
# Rename to TotalAvailableToBack
runnerIdWithMostTotalMatchedBackPrices=runnerIdWithMostTotalMatchedBackPrices.rename(
columns={'size': 'BackTotal', 'price':'BackPrice'})
#
# Merge the prices by appending to make a price ladder
priceLadder=runnerIdWithMostTotalMatchedBackPrices[['BackTotal','BackPrice']].join(
runnerIdWithMostTotalMatchedLayPrices[['LayPrice','LayTotal']])
print("Market:{0:s} {1:s} {2:s} {3:%I:%M%p}".format(firstRaceDesc['marketName'][0],
firstRaceDesc['event.name'][0],
firstRaceDesc['event.venue'][0],
dateutil.parser.parse(firstRaceDesc['marketStartTime'][0]))
)
print("Runner:{0:s} Total {1:f} ".format(runnerOverview['runnerName'][0],
runnerOverview['totalMatched'][0]))
print('Book at {0:s}'.format(priceTime.isoformat(' ')))
print(priceLadder)
#
client.logout()
|
gpl-2.0
|
nan86150/ImageFusion
|
lib/python2.7/site-packages/matplotlib/tests/test_cbook.py
|
9
|
8125
|
from __future__ import (absolute_import, division, print_function,
unicode_literals)
import six
from datetime import datetime
import numpy as np
from numpy.testing.utils import (assert_array_equal, assert_approx_equal,
assert_array_almost_equal)
from nose.tools import assert_equal, raises, assert_true
import matplotlib.cbook as cbook
import matplotlib.colors as mcolors
from matplotlib.cbook import delete_masked_points as dmp
def test_is_string_like():
y = np.arange(10)
assert_equal(cbook.is_string_like(y), False)
y.shape = 10, 1
assert_equal(cbook.is_string_like(y), False)
y.shape = 1, 10
assert_equal(cbook.is_string_like(y), False)
assert cbook.is_string_like("hello world")
assert_equal(cbook.is_string_like(10), False)
def test_restrict_dict():
d = {'foo': 'bar', 1: 2}
d1 = cbook.restrict_dict(d, ['foo', 1])
assert_equal(d1, d)
d2 = cbook.restrict_dict(d, ['bar', 2])
assert_equal(d2, {})
d3 = cbook.restrict_dict(d, {'foo': 1})
assert_equal(d3, {'foo': 'bar'})
d4 = cbook.restrict_dict(d, {})
assert_equal(d4, {})
d5 = cbook.restrict_dict(d, set(['foo', 2]))
assert_equal(d5, {'foo': 'bar'})
# check that d was not modified
assert_equal(d, {'foo': 'bar', 1: 2})
class Test_delete_masked_points:
def setUp(self):
self.mask1 = [False, False, True, True, False, False]
self.arr0 = np.arange(1.0, 7.0)
self.arr1 = [1, 2, 3, np.nan, np.nan, 6]
self.arr2 = np.array(self.arr1)
self.arr3 = np.ma.array(self.arr2, mask=self.mask1)
self.arr_s = ['a', 'b', 'c', 'd', 'e', 'f']
self.arr_s2 = np.array(self.arr_s)
self.arr_dt = [datetime(2008, 1, 1), datetime(2008, 1, 2),
datetime(2008, 1, 3), datetime(2008, 1, 4),
datetime(2008, 1, 5), datetime(2008, 1, 6)]
self.arr_dt2 = np.array(self.arr_dt)
self.arr_colors = ['r', 'g', 'b', 'c', 'm', 'y']
self.arr_rgba = mcolors.colorConverter.to_rgba_array(self.arr_colors)
@raises(ValueError)
def test_bad_first_arg(self):
dmp('a string', self.arr0)
def test_string_seq(self):
actual = dmp(self.arr_s, self.arr1)
ind = [0, 1, 2, 5]
expected = (self.arr_s2.take(ind), self.arr2.take(ind))
assert_array_equal(actual[0], expected[0])
assert_array_equal(actual[1], expected[1])
def test_datetime(self):
actual = dmp(self.arr_dt, self.arr3)
ind = [0, 1, 5]
expected = (self.arr_dt2.take(ind),
self.arr3.take(ind).compressed())
assert_array_equal(actual[0], expected[0])
assert_array_equal(actual[1], expected[1])
def test_rgba(self):
actual = dmp(self.arr3, self.arr_rgba)
ind = [0, 1, 5]
expected = (self.arr3.take(ind).compressed(),
self.arr_rgba.take(ind, axis=0))
assert_array_equal(actual[0], expected[0])
assert_array_equal(actual[1], expected[1])
def test_allequal():
assert(cbook.allequal([1, 1, 1]))
assert(not cbook.allequal([1, 1, 0]))
assert(cbook.allequal([]))
assert(cbook.allequal(('a', 'a')))
assert(not cbook.allequal(('a', 'b')))
class Test_boxplot_stats:
def setup(self):
np.random.seed(937)
self.nrows = 37
self.ncols = 4
self.data = np.random.lognormal(size=(self.nrows, self.ncols),
mean=1.5, sigma=1.75)
self.known_keys = sorted([
'mean', 'med', 'q1', 'q3', 'iqr',
'cilo', 'cihi', 'whislo', 'whishi',
'fliers', 'label'
])
self.std_results = cbook.boxplot_stats(self.data)
self.known_nonbootstrapped_res = {
'cihi': 6.8161283264444847,
'cilo': -0.1489815330368689,
'iqr': 13.492709959447094,
'mean': 13.00447442387868,
'med': 3.3335733967038079,
'fliers': np.array([
92.55467075, 87.03819018, 42.23204914, 39.29390996
]),
'q1': 1.3597529879465153,
'q3': 14.85246294739361,
'whishi': 27.899688243699629,
'whislo': 0.042143774965502923
}
self.known_bootstrapped_ci = {
'cihi': 8.939577523357828,
'cilo': 1.8692703958676578,
}
self.known_whis3_res = {
'whishi': 42.232049135969874,
'whislo': 0.042143774965502923,
'fliers': np.array([92.55467075, 87.03819018]),
}
self.known_res_percentiles = {
'whislo': 0.1933685896907924,
'whishi': 42.232049135969874
}
self.known_res_range = {
'whislo': 0.042143774965502923,
'whishi': 92.554670752188699
}
def test_form_main_list(self):
assert_true(isinstance(self.std_results, list))
def test_form_each_dict(self):
for res in self.std_results:
assert_true(isinstance(res, dict))
def test_form_dict_keys(self):
for res in self.std_results:
keys = sorted(list(res.keys()))
for key in keys:
assert_true(key in self.known_keys)
def test_results_baseline(self):
res = self.std_results[0]
for key in list(self.known_nonbootstrapped_res.keys()):
if key != 'fliers':
assert_statement = assert_approx_equal
else:
assert_statement = assert_array_almost_equal
assert_statement(
res[key],
self.known_nonbootstrapped_res[key]
)
def test_results_bootstrapped(self):
results = cbook.boxplot_stats(self.data, bootstrap=10000)
res = results[0]
for key in list(self.known_bootstrapped_ci.keys()):
assert_approx_equal(
res[key],
self.known_bootstrapped_ci[key]
)
def test_results_whiskers_float(self):
results = cbook.boxplot_stats(self.data, whis=3)
res = results[0]
for key in list(self.known_whis3_res.keys()):
if key != 'fliers':
assert_statement = assert_approx_equal
else:
assert_statement = assert_array_almost_equal
assert_statement(
res[key],
self.known_whis3_res[key]
)
def test_results_whiskers_range(self):
results = cbook.boxplot_stats(self.data, whis='range')
res = results[0]
for key in list(self.known_res_range.keys()):
if key != 'fliers':
assert_statement = assert_approx_equal
else:
assert_statement = assert_array_almost_equal
assert_statement(
res[key],
self.known_res_range[key]
)
def test_results_whiskers_percentiles(self):
results = cbook.boxplot_stats(self.data, whis=[5, 95])
res = results[0]
for key in list(self.known_res_percentiles.keys()):
if key != 'fliers':
assert_statement = assert_approx_equal
else:
assert_statement = assert_array_almost_equal
assert_statement(
res[key],
self.known_res_percentiles[key]
)
def test_results_withlabels(self):
labels = ['Test1', 2, 'ardvark', 4]
results = cbook.boxplot_stats(self.data, labels=labels)
res = results[0]
for lab, res in zip(labels, results):
assert_equal(res['label'], lab)
results = cbook.boxplot_stats(self.data)
for res in results:
assert('label' not in res)
@raises(ValueError)
def test_label_error(self):
labels = [1, 2]
results = cbook.boxplot_stats(self.data, labels=labels)
@raises(ValueError)
def test_bad_dims(self):
data = np.random.normal(size=(34, 34, 34))
results = cbook.boxplot_stats(data)
|
mit
|
orion-42/numerics-physics-stuff
|
tschebyschew.py
|
1
|
2284
|
import numpy as np
import matplotlib.pyplot as plt
def tschebyschow(n):
T1 = np.zeros(n + 1)
T1[0] = 1.0
T2 = np.zeros(n + 1)
T2[1] = 1.0
T = [T1, T2]
for i in range(2, n + 1):
T.append(2*np.roll(T[i - 1], 1) - T[i - 2])
return T
def poly_to_fn(poly):
return lambda x: np.sum(poly*x**np.arange(poly.size))
def poly_to_string(poly):
ans = ""
first = True # first term with coeff. != 0?
for i in range(poly.size - 1, -1, -1):
# only display term with a coeff. != 0
# but display a 0 if all terms are 0
if poly[i] != 0.0 or i == 0 and first:
if poly[i] > 0.0:
if not first: # we don't need a + if we are in the first term
ans += " + "
else:
# in the first term we use a - without a sign
# e.g. -x^2 - 2
if first:
ans += "-"
else:
ans += " - "
# we don't want to display a coeff. with 0 decimals as e.g. 1.0 but as 1
# and we already dealt with the sign
if round(poly[i]) == poly[i]:
val = abs(int(round(poly[i])))
else:
val = abs(poly[i])
# display the constant term with only its value
if i == 0:
ans += str(val)
# ommit the exponent for the linear term (x instead of x^1)
elif i == 1:
# ommit the coeff. if the coeff is 1
if val == 1:
ans += "x"
else:
ans += "{}*x".format(val)
else:
# ommit the coeff. if the coeff is 1
if val == 1:
ans += "x^{}".format(i)
else:
ans += "{}*x^{}".format(val, i)
first = False # we had a term != 0
return ans
n = 5
for i, p in enumerate(tschebyschow(n)):
xs = list(np.linspace(-1, 1, 100))
f = poly_to_fn(p)
#plt.plot(xs, map(f, xs), label="n = {}".format(i))
plt.plot(xs, list(map(f, xs)), label=poly_to_string(p))
# plt.plot(xs, map(f, xs))
plt.legend()
plt.title("Tschebyschow Polynomials")
plt.xlabel("x")
plt.ylabel("y")
plt.show()
|
mit
|
pombo-lab/gamtools
|
lib/gamtools/radial_position.py
|
1
|
1622
|
"""
==========================
The radial position module
==========================
The radial position module contains functions for calculating chromatin
radial position from GAM :ref:`segregation tables <segregation_table>`.
"""
import numpy as np
from .segregation import open_segregation
def get_radial_position(segregation_data, no_blanks=False):
"""Get the radial position of each genomic window from a segregation table
:param segregation_data: Segregation table generated by gamtools
:returns: :class:`pandas.DataFrame` giving the radial position of each window
"""
# Get the percentage genome coverage for each NP
cov_per_np = 100 * segregation_data.mean()
def get_window_radial_pos(segregation_row):
"""Internal function that calculates radial position for each row"""
# Which NPs are positive for this window?
nps_with_window = segregation_row.values.astype(bool)
# Get the mean genome coverage of NPs positive for this window
return cov_per_np[nps_with_window].mean()
radial_position = segregation_data.apply(get_window_radial_pos, axis=1)
if no_blanks:
radial_position = radial_position[
np.logical_not(radial_position.isnull())]
return radial_position
def radial_position_from_args(args):
"""Helper function to call get_radial_position from doit"""
segregation_data = open_segregation(args.segregation_file)
radial_position = get_radial_position(segregation_data,
args.no_blanks)
radial_position.to_csv(args.output_file, sep='\t')
|
apache-2.0
|
bzamecnik/ml-playground
|
chord-recognition/convnet_chord_classification_training.py
|
2
|
7659
|
# Chord classification
#
# The task is to classify chords (or more precisely pitch class sets) based on chromagram features.
#
# We use a the whole Beatles dataset (ie. many songs).
#
# The task is in fact multilabel classification, since each pitch class is generally independent.
import numpy as np
import pandas as pd
import matplotlib as mpl
# do not use Qt/X that require $DISPLAY
mpl.use('Agg')
import matplotlib.pyplot as plt
import arrow
import os
import scipy.signal
import scipy.misc
from sklearn.metrics import hamming_loss, accuracy_score, roc_auc_score
from keras.models import Sequential
from keras.layers.core import Dense, Activation, Flatten, Dropout
from keras.layers.convolutional import Convolution1D, MaxPooling1D
from keras.layers.normalization import BatchNormalization
from keras.callbacks import ModelCheckpoint
## Data loading
dataset_file = '../data/beatles/ml_dataset/block=4096_hop=2048_bins=-48,67_div=1/dataset_2016-05-15.npz'
dataset = np.load(dataset_file)
X_train, Y_train, X_valid, Y_valid, X_test, Y_test = \
dataset['X_train'], dataset['Y_train'], \
dataset['X_valid'], dataset['Y_valid'], \
dataset['X_test'], dataset['Y_test']
## Data preprocessing
### Features
# scaler = MinMaxScaler()
# X = scaler.fit_transform(features).astype('float32')
# let's rescale the features manually so that the're the same in all songs
# the range (in dB) is -120 to X.shape[1] (= 115)
# TODO: there's a bug: should be + 120 on both places!!!
def normalize(X):
return (X.astype('float32') - 120) / (X.shape[1] - 120)
X_train = normalize(X_train)
X_valid = normalize(X_valid)
X_test = normalize(X_test)
for d in [X_train, X_valid, X_test, Y_train, Y_valid, Y_test]:
print(d.shape)
# reshape for 1D convolution
def conv_reshape(X):
return X.reshape(X.shape[0], X.shape[1], 1)
X_conv_train = conv_reshape(X_train)
X_conv_valid = conv_reshape(X_valid)
X_conv_test = conv_reshape(X_test)
## Model training and evaluation
def new_model_id():
return 'model_%s' % arrow.get().format('YYYY-MM-DD-HH-mm-ss')
def save_model_arch(model_id, model):
arch_file = '%s/%s_arch.yaml' % (model_dir, model_id)
print('architecture:', arch_file)
open(arch_file, 'w').write(model.to_yaml())
def weights_file(model_id, suffix=''):
return '%s/%s_weights%s.h5' % (model_dir, model_id, suffix)
def report_model_parameters(model):
print('number of parameters:', model.count_params())
print('weights:', [w.shape for w in model.get_weights()])
# #### Notes
#
# - the last layer has to be sigmoid, not softmax
# - since each output label should be independent a multiple can be active at the same time
# - very sparse inputs can easily saturate sigmoid activation if it's near the first layer
# - class_mode='binary' for multi-label classification
# - predict_classes() then returns a binary vector
# - loss: MAE or binary_crossentropy?
# - why binary_crossentropy gives worse accuracy than MAE?
# - binary_crossentropy works ok
# - problems with loss going to NAN after the first training iteration
# - optimizer clipnorm doesn't help
# - BatchNormalization doesn't help
# - BatchNormalization between convolution and activation works
# - BatchNormalization might be useful
# - be aware to use scaled inputs, not raw ones
model_id = new_model_id()
print('model id:', model_id)
model_dir = '../data/beatles/models/' + model_id
os.makedirs(model_dir, exist_ok=True)
model = Sequential()
model.add(Convolution1D(32, 3, input_shape=(X_train.shape[1], 1)))
# model.add(BatchNormalization())
model.add(Activation('relu'))
model.add(Convolution1D(32, 3))
model.add(Activation('relu'))
model.add(MaxPooling1D(2, 2))
model.add(Dropout(0.25))
model.add(Convolution1D(64, 3))
# model.add(BatchNormalization())
model.add(Activation('relu'))
model.add(Convolution1D(64, 3))
model.add(Activation('relu'))
model.add(MaxPooling1D(2, 2))
model.add(Dropout(0.25))
model.add(Flatten())
model.add(Dense(256))
model.add(Activation('relu'))
model.add(Dropout(0.5))
model.add(Dense(12))
model.add(Activation('sigmoid'))
report_model_parameters(model)
print('compiling the model')
model.compile(class_mode='binary', loss='binary_crossentropy', optimizer='adam')
save_model_arch(model_id, model)
print('training the model')
checkpointer = ModelCheckpoint(filepath=weights_file(model_id, '_checkpoint'), verbose=1, save_best_only=True)
epoch_count = 10
batch_size = 512
training_hist = model.fit(
X_conv_train, Y_train,
validation_data=(X_conv_valid, Y_valid),
nb_epoch=epoch_count,
batch_size=batch_size,
callbacks=[checkpointer],
verbose=1)
# There's a problem with checkpointer that it produces weight with one more layer
# and the weights cannot be easily imported.
model.save_weights(weights_file(model_id, ''))
def report_training_curve(training_hist):
history = training_hist.history
pd.DataFrame(history).to_csv(model_dir+'/'+model_id+'_training_history.tsv', header=True)
plt.figure()
for label in history:
plt.plot(history[label], label=label)
plt.xlabel('epochs')
plt.title('%s - learning curves' % model_id)
plt.suptitle('validation loss: %s' % history['val_loss'][-1])
plt.legend()
plt.savefig(model_dir+'/'+model_id+'_learning_curves.png')
report_training_curve(training_hist)
def model_report_multilabel(model, X_train, Y_train, X_valid, Y_valid):
def report_dataset(X, y_true, title):
y_proba = model.predict_proba(X, batch_size=batch_size)
# multi-label classes with default threshold
y_pred = y_proba >= 0.5
print(title + ' accuracy (exatch match):', accuracy_score(y_true, y_pred))
print(title + ' hamming score (non-exatch match):', 1 - hamming_loss(y_true, y_pred))
print(title + 'AUC:', roc_auc_score(y_true.flatten(), y_proba.flatten()))
report_dataset(X_train, Y_train, 'training')
report_dataset(X_valid, Y_valid, 'validation')
model_report_multilabel(model, X_conv_train, Y_train, X_conv_valid, Y_valid)
# visualization
def plot_labels(l, title, fifths=False, resample=True, exact=False):
if fifths:
l = l[:,np.arange(12)*7 % 12]
l = l.T
file = model_dir+'/'+model_id+'_'+title+'.png'
if exact:
scipy.misc.imsave(file, l)
else:
if resample:
l = scipy.signal.resample(l, 200, axis=1)
plt.figure(figsize=(20, 2))
plt.imshow(l, cmap='gray', interpolation='none')
plt.tight_layout()
plt.savefig(file)
# # true labels
# plot_labels(labels_pcs, 'true')
# plot_labels(labels_pcs, 'exact_true', exact=True)
#
# # predicted labels
# labels_pred_full = model.predict_classes(conv_reshape(X))
# plot_labels(labels_pred_full, 'pred')
# plot_labels(labels_pred_full, 'exact_pred', exact=True)
#
# # difference
# plot_labels(labels_pcs - labels_pred_full, 'diff')
# plot_labels(labels_pcs - labels_pred_full, 'exact_diff', exact=True)
# plot_labels(labels_pred_full[:100], resample=False)
# plot_labels(labels_pcs[:100] - labels_pred_full[:100], resample=False)
# in case of input features with original time order we can apply median filter:
# medfilt(labels_pred_full, (15, 1))
def plot_labels_true_pred_diff():
def plot2d(x):
plt.imshow(scipy.signal.resample(x.T, 200, axis=1), cmap='gray', interpolation='none')
plt.figure(figsize=(20, 6))
ax = plt.subplot(3,1,1)
plot2d(labels_pcs)
ax.set_title('true')
ax = plt.subplot(3,1,2)
plot2d(labels_pred_full)
ax.set_title('predicted')
ax = plt.subplot(3,1,3)
plot2d(labels_pred_full - labels_pcs)
ax.set_title('difference')
plt.tight_layout()
|
mit
|
dingocuster/scikit-learn
|
sklearn/linear_model/__init__.py
|
270
|
3096
|
"""
The :mod:`sklearn.linear_model` module implements generalized linear models. It
includes Ridge regression, Bayesian Regression, Lasso and Elastic Net
estimators computed with Least Angle Regression and coordinate descent. It also
implements Stochastic Gradient Descent related algorithms.
"""
# See http://scikit-learn.sourceforge.net/modules/sgd.html and
# http://scikit-learn.sourceforge.net/modules/linear_model.html for
# complete documentation.
from .base import LinearRegression
from .bayes import BayesianRidge, ARDRegression
from .least_angle import (Lars, LassoLars, lars_path, LarsCV, LassoLarsCV,
LassoLarsIC)
from .coordinate_descent import (Lasso, ElasticNet, LassoCV, ElasticNetCV,
lasso_path, enet_path, MultiTaskLasso,
MultiTaskElasticNet, MultiTaskElasticNetCV,
MultiTaskLassoCV)
from .sgd_fast import Hinge, Log, ModifiedHuber, SquaredLoss, Huber
from .stochastic_gradient import SGDClassifier, SGDRegressor
from .ridge import (Ridge, RidgeCV, RidgeClassifier, RidgeClassifierCV,
ridge_regression)
from .logistic import (LogisticRegression, LogisticRegressionCV,
logistic_regression_path)
from .omp import (orthogonal_mp, orthogonal_mp_gram, OrthogonalMatchingPursuit,
OrthogonalMatchingPursuitCV)
from .passive_aggressive import PassiveAggressiveClassifier
from .passive_aggressive import PassiveAggressiveRegressor
from .perceptron import Perceptron
from .randomized_l1 import (RandomizedLasso, RandomizedLogisticRegression,
lasso_stability_path)
from .ransac import RANSACRegressor
from .theil_sen import TheilSenRegressor
__all__ = ['ARDRegression',
'BayesianRidge',
'ElasticNet',
'ElasticNetCV',
'Hinge',
'Huber',
'Lars',
'LarsCV',
'Lasso',
'LassoCV',
'LassoLars',
'LassoLarsCV',
'LassoLarsIC',
'LinearRegression',
'Log',
'LogisticRegression',
'LogisticRegressionCV',
'ModifiedHuber',
'MultiTaskElasticNet',
'MultiTaskElasticNetCV',
'MultiTaskLasso',
'MultiTaskLassoCV',
'OrthogonalMatchingPursuit',
'OrthogonalMatchingPursuitCV',
'PassiveAggressiveClassifier',
'PassiveAggressiveRegressor',
'Perceptron',
'RandomizedLasso',
'RandomizedLogisticRegression',
'Ridge',
'RidgeCV',
'RidgeClassifier',
'RidgeClassifierCV',
'SGDClassifier',
'SGDRegressor',
'SquaredLoss',
'TheilSenRegressor',
'enet_path',
'lars_path',
'lasso_path',
'lasso_stability_path',
'logistic_regression_path',
'orthogonal_mp',
'orthogonal_mp_gram',
'ridge_regression',
'RANSACRegressor']
|
bsd-3-clause
|
nsoojin/coursera-ml-py
|
machine-learning-ex5/ex5/ex5.py
|
1
|
5989
|
import matplotlib.pyplot as plt
import numpy as np
import scipy.io as scio
import linearRegCostFunction as lrcf
import trainLinearReg as tlr
import learningCurve as lc
import polyFeatures as pf
import featureNormalize as fn
import plotFit as plotft
import validationCurve as vc
plt.ion()
np.set_printoptions(formatter={'float': '{: 0.6f}'.format})
# ===================== Part 1: Loading and Visualizing Data =====================
# We start the exercise by first loading and visualizing the dataset.
# The following code will load the dataset into your environment and pot
# the data.
#
# Load Training data
print('Loading and Visualizing data ...')
# Load from ex5data1:
data = scio.loadmat('ex5data1.mat')
X = data['X']
y = data['y'].flatten()
Xval = data['Xval']
yval = data['yval'].flatten()
Xtest = data['Xtest']
ytest = data['ytest'].flatten()
m = y.size
# Plot training data
plt.figure()
plt.scatter(X, y, c='r', marker="x")
plt.xlabel('Change in water level (x)')
plt.ylabel('Water folowing out of the dam (y)')
input('Program paused. Press ENTER to continue')
# ===================== Part 2: Regularized Linear Regression Cost =====================
# You should now implement the cost function for regularized linear regression
#
theta = np.ones(2)
cost, _ = lrcf.linear_reg_cost_function(theta, np.c_[np.ones(m), X], y, 1)
print('Cost at theta = [1 1]: {:0.6f}\n(this value should be about 303.993192'.format(cost))
input('Program paused. Press ENTER to continue')
# ===================== Part 3: Regularized Linear Regression Gradient =====================
# You should now implement the gradient for regularized linear regression
#
theta = np.ones(2)
cost, grad = lrcf.linear_reg_cost_function(theta, np.c_[np.ones(m), X], y, 1)
print('Gradient at theta = [1 1]: {}\n(this value should be about [-15.303016 598.250744]'.format(grad))
input('Program paused. Press ENTER to continue')
# ===================== Part 4: Train Linear Regression =====================
# Once you have implemented the cost and gradient correctly, the
# train_linear_reg function will use your cost function to train regularzized linear regression.
#
# Write Up Note : The data is non-linear, so this will not give a great fit.
#
# Train linear regression with lambda = 0
lmd = 0
theta = tlr.train_linear_reg(np.c_[np.ones(m), X], y, lmd)
# Plot fit over the data
plt.plot(X, np.dot(np.c_[np.ones(m), X], theta))
input('Program paused. Press ENTER to continue')
# ===================== Part 5: Learning Curve for Linear Regression =====================
# Next, you should implement the learning_curve function.
#
# Write up note : Since the model is underfitting the data, we expect to
# see a graph with "high bias" -- Figure 3 in ex5.pdf
#
lmd = 0
error_train, error_val = lc.learning_curve(np.c_[np.ones(m), X], y, np.c_[np.ones(Xval.shape[0]), Xval], yval, lmd)
plt.figure()
plt.plot(np.arange(m), error_train, np.arange(m), error_val)
plt.title('Learning Curve for Linear Regression')
plt.legend(['Train', 'Cross Validation'])
plt.xlabel('Number of Training Examples')
plt.ylabel('Error')
plt.axis([0, 13, 0, 150])
input('Program paused. Press ENTER to continue')
# ===================== Part 6 : Feature Mapping for Polynomial Regression =====================
# One solution to this is to use polynomial regression. You should now
# complete polyFeatures to map each example into its powers
#
p = 5
# Map X onto Polynomial Features and Normalize
X_poly = pf.poly_features(X, p)
X_poly, mu, sigma = fn.feature_normalize(X_poly)
X_poly = np.c_[np.ones(m), X_poly]
# Map X_poly_test and normalize (using mu and sigma)
X_poly_test = pf.poly_features(Xtest, p)
X_poly_test -= mu
X_poly_test /= sigma
X_poly_test = np.c_[np.ones(X_poly_test.shape[0]), X_poly_test]
# Map X_poly_val and normalize (using mu and sigma)
X_poly_val = pf.poly_features(Xval, p)
X_poly_val -= mu
X_poly_val /= sigma
X_poly_val = np.c_[np.ones(X_poly_val.shape[0]), X_poly_val]
print('Normalized Training Example 1 : \n{}'.format(X_poly[0]))
input('Program paused. Press ENTER to continue')
# ===================== Part 7 : Learning Curve for Polynomial Regression =====================
# Now, you will get to experiment with polynomial regression with multiple
# values of lambda. The code below runs polynomial regression with
# lambda = 0. You should try running the code with different values of
# lambda to see how the fit and learning curve change.
#
lmd = 0
theta = tlr.train_linear_reg(X_poly, y, lmd)
# Plot trainint data and fit
plt.figure()
plt.scatter(X, y, c='r', marker="x")
plotft.plot_fit(np.min(X), np.max(X), mu, sigma, theta, p)
plt.xlabel('Change in water level (x)')
plt.ylabel('Water folowing out of the dam (y)')
plt.ylim([0, 60])
plt.title('Polynomial Regression Fit (lambda = {})'.format(lmd))
error_train, error_val = lc.learning_curve(X_poly, y, X_poly_val, yval, lmd)
plt.figure()
plt.plot(np.arange(m), error_train, np.arange(m), error_val)
plt.title('Polynomial Regression Learning Curve (lambda = {})'.format(lmd))
plt.legend(['Train', 'Cross Validation'])
plt.xlabel('Number of Training Examples')
plt.ylabel('Error')
plt.axis([0, 13, 0, 150])
print('Polynomial Regression (lambda = {})'.format(lmd))
print('# Training Examples\tTrain Error\t\tCross Validation Error')
for i in range(m):
print(' \t{}\t\t{}\t{}'.format(i, error_train[i], error_val[i]))
input('Program paused. Press ENTER to continue')
# ===================== Part 8 : Validation for Selecting Lambda =====================
# You will now implement validationCurve to test various values of
# lambda on a validation set. You will then use this to select the
# 'best' lambda value.
lambda_vec, error_train, error_val = vc.validation_curve(X_poly, y, X_poly_val, yval)
plt.figure()
plt.plot(lambda_vec, error_train, lambda_vec, error_val)
plt.legend(['Train', 'Cross Validation'])
plt.xlabel('lambda')
plt.ylabel('Error')
input('ex5 Finished. Press ENTER to exit')
|
mit
|
ndingwall/scikit-learn
|
examples/linear_model/plot_nnls.py
|
15
|
2019
|
"""
==========================
Non-negative least squares
==========================
In this example, we fit a linear model with positive constraints on the
regression coefficients and compare the estimated coefficients to a classic
linear regression.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from sklearn.metrics import r2_score
# %%
# Generate some random data
np.random.seed(42)
n_samples, n_features = 200, 50
X = np.random.randn(n_samples, n_features)
true_coef = 3 * np.random.randn(n_features)
# Threshold coefficients to render them non-negative
true_coef[true_coef < 0] = 0
y = np.dot(X, true_coef)
# Add some noise
y += 5 * np.random.normal(size=(n_samples, ))
# %%
# Split the data in train set and test set
from sklearn.model_selection import train_test_split
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.5)
# %%
# Fit the Non-Negative least squares.
from sklearn.linear_model import LinearRegression
reg_nnls = LinearRegression(positive=True)
y_pred_nnls = reg_nnls.fit(X_train, y_train).predict(X_test)
r2_score_nnls = r2_score(y_test, y_pred_nnls)
print("NNLS R2 score", r2_score_nnls)
# %%
# Fit an OLS.
reg_ols = LinearRegression()
y_pred_ols = reg_ols.fit(X_train, y_train).predict(X_test)
r2_score_ols = r2_score(y_test, y_pred_ols)
print("OLS R2 score", r2_score_ols)
# %%
# Comparing the regression coefficients between OLS and NNLS, we can observe
# they are highly correlated (the dashed line is the identity relation),
# but the non-negative constraint shrinks some to 0.
# The Non-Negative Least squares inherently yield sparse results.
fig, ax = plt.subplots()
ax.plot(reg_ols.coef_, reg_nnls.coef_, linewidth=0, marker=".")
low_x, high_x = ax.get_xlim()
low_y, high_y = ax.get_ylim()
low = max(low_x, low_y)
high = min(high_x, high_y)
ax.plot([low, high], [low, high], ls="--", c=".3", alpha=.5)
ax.set_xlabel("OLS regression coefficients", fontweight="bold")
ax.set_ylabel("NNLS regression coefficients", fontweight="bold")
|
bsd-3-clause
|
Intel-Corporation/tensorflow
|
tensorflow/contrib/learn/python/learn/learn_io/data_feeder.py
|
39
|
32726
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Implementations of different data feeders to provide data for TF trainer (deprecated).
This module and all its submodules are deprecated. See
[contrib/learn/README.md](https://www.tensorflow.org/code/tensorflow/contrib/learn/README.md)
for migration instructions.
"""
# TODO(ipolosukhin): Replace this module with feed-dict queue runners & queues.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import itertools
import math
import numpy as np
import six
from six.moves import xrange # pylint: disable=redefined-builtin
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import tensor_util
from tensorflow.python.ops import array_ops
from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.util.deprecation import deprecated
# pylint: disable=g-multiple-import,g-bad-import-order
from .pandas_io import HAS_PANDAS, extract_pandas_data, extract_pandas_matrix, extract_pandas_labels
from .dask_io import HAS_DASK, extract_dask_data, extract_dask_labels
# pylint: enable=g-multiple-import,g-bad-import-order
def _get_in_out_shape(x_shape, y_shape, n_classes, batch_size=None):
"""Returns shape for input and output of the data feeder."""
x_is_dict, y_is_dict = isinstance(
x_shape, dict), y_shape is not None and isinstance(y_shape, dict)
if y_is_dict and n_classes is not None:
assert isinstance(n_classes, dict)
if batch_size is None:
batch_size = list(x_shape.values())[0][0] if x_is_dict else x_shape[0]
elif batch_size <= 0:
raise ValueError('Invalid batch_size %d.' % batch_size)
if x_is_dict:
input_shape = {}
for k, v in list(x_shape.items()):
input_shape[k] = [batch_size] + (list(v[1:]) if len(v) > 1 else [1])
else:
x_shape = list(x_shape[1:]) if len(x_shape) > 1 else [1]
input_shape = [batch_size] + x_shape
if y_shape is None:
return input_shape, None, batch_size
def out_el_shape(out_shape, num_classes):
out_shape = list(out_shape[1:]) if len(out_shape) > 1 else []
# Skip first dimension if it is 1.
if out_shape and out_shape[0] == 1:
out_shape = out_shape[1:]
if num_classes is not None and num_classes > 1:
return [batch_size] + out_shape + [num_classes]
else:
return [batch_size] + out_shape
if not y_is_dict:
output_shape = out_el_shape(y_shape, n_classes)
else:
output_shape = dict([(k,
out_el_shape(v, n_classes[k]
if n_classes is not None and
k in n_classes else None))
for k, v in list(y_shape.items())])
return input_shape, output_shape, batch_size
def _data_type_filter(x, y):
"""Filter data types into acceptable format."""
if HAS_DASK:
x = extract_dask_data(x)
if y is not None:
y = extract_dask_labels(y)
if HAS_PANDAS:
x = extract_pandas_data(x)
if y is not None:
y = extract_pandas_labels(y)
return x, y
def _is_iterable(x):
return hasattr(x, 'next') or hasattr(x, '__next__')
@deprecated(None, 'Please use tensorflow/transform or tf.data.')
def setup_train_data_feeder(x,
y,
n_classes,
batch_size=None,
shuffle=True,
epochs=None):
"""Create data feeder, to sample inputs from dataset.
If `x` and `y` are iterators, use `StreamingDataFeeder`.
Args:
x: numpy, pandas or Dask matrix or dictionary of aforementioned. Also
supports iterables.
y: numpy, pandas or Dask array or dictionary of aforementioned. Also
supports
iterables.
n_classes: number of classes. Must be None or same type as y. In case, `y`
is `dict`
(or iterable which returns dict) such that `n_classes[key] = n_classes for
y[key]`
batch_size: size to split data into parts. Must be >= 1.
shuffle: Whether to shuffle the inputs.
epochs: Number of epochs to run.
Returns:
DataFeeder object that returns training data.
Raises:
ValueError: if one of `x` and `y` is iterable and the other is not.
"""
x, y = _data_type_filter(x, y)
if HAS_DASK:
# pylint: disable=g-import-not-at-top
import dask.dataframe as dd
if (isinstance(x, (dd.Series, dd.DataFrame)) and
(y is None or isinstance(y, (dd.Series, dd.DataFrame)))):
data_feeder_cls = DaskDataFeeder
else:
data_feeder_cls = DataFeeder
else:
data_feeder_cls = DataFeeder
if _is_iterable(x):
if y is not None and not _is_iterable(y):
raise ValueError('Both x and y should be iterators for '
'streaming learning to work.')
return StreamingDataFeeder(x, y, n_classes, batch_size)
return data_feeder_cls(
x, y, n_classes, batch_size, shuffle=shuffle, epochs=epochs)
def _batch_data(x, batch_size=None):
if (batch_size is not None) and (batch_size <= 0):
raise ValueError('Invalid batch_size %d.' % batch_size)
x_first_el = six.next(x)
x = itertools.chain([x_first_el], x)
chunk = dict([(k, []) for k in list(x_first_el.keys())]) if isinstance(
x_first_el, dict) else []
chunk_filled = False
for data in x:
if isinstance(data, dict):
for k, v in list(data.items()):
chunk[k].append(v)
if (batch_size is not None) and (len(chunk[k]) >= batch_size):
chunk[k] = np.matrix(chunk[k])
chunk_filled = True
if chunk_filled:
yield chunk
chunk = dict([(k, []) for k in list(x_first_el.keys())]) if isinstance(
x_first_el, dict) else []
chunk_filled = False
else:
chunk.append(data)
if (batch_size is not None) and (len(chunk) >= batch_size):
yield np.matrix(chunk)
chunk = []
if isinstance(x_first_el, dict):
for k, v in list(data.items()):
chunk[k] = np.matrix(chunk[k])
yield chunk
else:
yield np.matrix(chunk)
@deprecated(None, 'Please use tensorflow/transform or tf.data.')
def setup_predict_data_feeder(x, batch_size=None):
"""Returns an iterable for feeding into predict step.
Args:
x: numpy, pandas, Dask array or dictionary of aforementioned. Also supports
iterable.
batch_size: Size of batches to split data into. If `None`, returns one
batch of full size.
Returns:
List or iterator (or dictionary thereof) of parts of data to predict on.
Raises:
ValueError: if `batch_size` <= 0.
"""
if HAS_DASK:
x = extract_dask_data(x)
if HAS_PANDAS:
x = extract_pandas_data(x)
if _is_iterable(x):
return _batch_data(x, batch_size)
if len(x.shape) == 1:
x = np.reshape(x, (-1, 1))
if batch_size is not None:
if batch_size <= 0:
raise ValueError('Invalid batch_size %d.' % batch_size)
n_batches = int(math.ceil(float(len(x)) / batch_size))
return [x[i * batch_size:(i + 1) * batch_size] for i in xrange(n_batches)]
return [x]
@deprecated(None, 'Please use tensorflow/transform or tf.data.')
def setup_processor_data_feeder(x):
"""Sets up processor iterable.
Args:
x: numpy, pandas or iterable.
Returns:
Iterable of data to process.
"""
if HAS_PANDAS:
x = extract_pandas_matrix(x)
return x
@deprecated(None, 'Please convert numpy dtypes explicitly.')
def check_array(array, dtype):
"""Checks array on dtype and converts it if different.
Args:
array: Input array.
dtype: Expected dtype.
Returns:
Original array or converted.
"""
# skip check if array is instance of other classes, e.g. h5py.Dataset
# to avoid copying array and loading whole data into memory
if isinstance(array, (np.ndarray, list)):
array = np.array(array, dtype=dtype, order=None, copy=False)
return array
def _access(data, iloc):
"""Accesses an element from collection, using integer location based indexing.
Args:
data: array-like. The collection to access
iloc: `int` or `list` of `int`s. Location(s) to access in `collection`
Returns:
The element of `a` found at location(s) `iloc`.
"""
if HAS_PANDAS:
import pandas as pd # pylint: disable=g-import-not-at-top
if isinstance(data, pd.Series) or isinstance(data, pd.DataFrame):
return data.iloc[iloc]
return data[iloc]
def _check_dtype(dtype):
if dtypes.as_dtype(dtype) == dtypes.float64:
logging.warn(
'float64 is not supported by many models, consider casting to float32.')
return dtype
class DataFeeder(object):
"""Data feeder is an example class to sample data for TF trainer.
THIS CLASS IS DEPRECATED. See
[contrib/learn/README.md](https://www.tensorflow.org/code/tensorflow/contrib/learn/README.md)
for general migration instructions.
"""
@deprecated(None, 'Please use tensorflow/transform or tf.data.')
def __init__(self,
x,
y,
n_classes,
batch_size=None,
shuffle=True,
random_state=None,
epochs=None):
"""Initializes a DataFeeder instance.
Args:
x: One feature sample which can either Nd numpy matrix of shape
`[n_samples, n_features, ...]` or dictionary of Nd numpy matrix.
y: label vector, either floats for regression or class id for
classification. If matrix, will consider as a sequence of labels.
Can be `None` for unsupervised setting. Also supports dictionary of
labels.
n_classes: Number of classes, 0 and 1 are considered regression, `None`
will pass through the input labels without one-hot conversion. Also, if
`y` is `dict`, then `n_classes` must be `dict` such that
`n_classes[key] = n_classes for label y[key]`, `None` otherwise.
batch_size: Mini-batch size to accumulate samples in one mini batch.
shuffle: Whether to shuffle `x`.
random_state: Numpy `RandomState` object to reproduce sampling.
epochs: Number of times to iterate over input data before raising
`StopIteration` exception.
Attributes:
x: Input features (ndarray or dictionary of ndarrays).
y: Input label (ndarray or dictionary of ndarrays).
n_classes: Number of classes (if `None`, pass through indices without
one-hot conversion).
batch_size: Mini-batch size to accumulate.
input_shape: Shape of the input (or dictionary of shapes).
output_shape: Shape of the output (or dictionary of shapes).
input_dtype: DType of input (or dictionary of shapes).
output_dtype: DType of output (or dictionary of shapes.
"""
x_is_dict, y_is_dict = isinstance(
x, dict), y is not None and isinstance(y, dict)
if isinstance(y, list):
y = np.array(y)
self._x = dict([(k, check_array(v, v.dtype)) for k, v in list(x.items())
]) if x_is_dict else check_array(x, x.dtype)
self._y = None if y is None else (dict(
[(k, check_array(v, v.dtype)) for k, v in list(y.items())])
if y_is_dict else check_array(y, y.dtype))
# self.n_classes is not None means we're converting raw target indices
# to one-hot.
if n_classes is not None:
if not y_is_dict:
y_dtype = (
np.int64 if n_classes is not None and n_classes > 1 else np.float32)
self._y = (None if y is None else check_array(y, dtype=y_dtype))
self.n_classes = n_classes
self.max_epochs = epochs
x_shape = dict([(k, v.shape) for k, v in list(self._x.items())
]) if x_is_dict else self._x.shape
y_shape = dict([(k, v.shape) for k, v in list(self._y.items())
]) if y_is_dict else None if y is None else self._y.shape
self.input_shape, self.output_shape, self._batch_size = _get_in_out_shape(
x_shape, y_shape, n_classes, batch_size)
# Input dtype matches dtype of x.
self._input_dtype = (
dict([(k, _check_dtype(v.dtype)) for k, v in list(self._x.items())])
if x_is_dict else _check_dtype(self._x.dtype))
# self._output_dtype == np.float32 when y is None
self._output_dtype = (
dict([(k, _check_dtype(v.dtype)) for k, v in list(self._y.items())])
if y_is_dict else (_check_dtype(self._y.dtype)
if y is not None else np.float32))
# self.n_classes is None means we're passing in raw target indices
if n_classes is not None and y_is_dict:
for key in list(n_classes.keys()):
if key in self._output_dtype:
self._output_dtype[key] = np.float32
self._shuffle = shuffle
self.random_state = np.random.RandomState(
42) if random_state is None else random_state
if x_is_dict:
num_samples = list(self._x.values())[0].shape[0]
elif tensor_util.is_tensor(self._x):
num_samples = self._x.shape[
0].value # shape will be a Dimension, extract an int
else:
num_samples = self._x.shape[0]
if self._shuffle:
self.indices = self.random_state.permutation(num_samples)
else:
self.indices = np.array(range(num_samples))
self.offset = 0
self.epoch = 0
self._epoch_placeholder = None
@property
def x(self):
return self._x
@property
def y(self):
return self._y
@property
def shuffle(self):
return self._shuffle
@property
def input_dtype(self):
return self._input_dtype
@property
def output_dtype(self):
return self._output_dtype
@property
def batch_size(self):
return self._batch_size
def make_epoch_variable(self):
"""Adds a placeholder variable for the epoch to the graph.
Returns:
The epoch placeholder.
"""
self._epoch_placeholder = array_ops.placeholder(
dtypes.int32, [1], name='epoch')
return self._epoch_placeholder
def input_builder(self):
"""Builds inputs in the graph.
Returns:
Two placeholders for inputs and outputs.
"""
def get_placeholder(shape, dtype, name_prepend):
if shape is None:
return None
if isinstance(shape, dict):
placeholder = {}
for key in list(shape.keys()):
placeholder[key] = array_ops.placeholder(
dtypes.as_dtype(dtype[key]), [None] + shape[key][1:],
name=name_prepend + '_' + key)
else:
placeholder = array_ops.placeholder(
dtypes.as_dtype(dtype), [None] + shape[1:], name=name_prepend)
return placeholder
self._input_placeholder = get_placeholder(self.input_shape,
self._input_dtype, 'input')
self._output_placeholder = get_placeholder(self.output_shape,
self._output_dtype, 'output')
return self._input_placeholder, self._output_placeholder
def set_placeholders(self, input_placeholder, output_placeholder):
"""Sets placeholders for this data feeder.
Args:
input_placeholder: Placeholder for `x` variable. Should match shape
of the examples in the x dataset.
output_placeholder: Placeholder for `y` variable. Should match
shape of the examples in the y dataset. Can be `None`.
"""
self._input_placeholder = input_placeholder
self._output_placeholder = output_placeholder
def get_feed_params(self):
"""Function returns a `dict` with data feed params while training.
Returns:
A `dict` with data feed params while training.
"""
return {
'epoch': self.epoch,
'offset': self.offset,
'batch_size': self._batch_size
}
def get_feed_dict_fn(self):
"""Returns a function that samples data into given placeholders.
Returns:
A function that when called samples a random subset of batch size
from `x` and `y`.
"""
x_is_dict, y_is_dict = isinstance(
self._x, dict), self._y is not None and isinstance(self._y, dict)
# Assign input features from random indices.
def extract(data, indices):
return (np.array(_access(data, indices)).reshape((indices.shape[0], 1))
if len(data.shape) == 1 else _access(data, indices))
# assign labels from random indices
def assign_label(data, shape, dtype, n_classes, indices):
shape[0] = indices.shape[0]
out = np.zeros(shape, dtype=dtype)
for i in xrange(out.shape[0]):
sample = indices[i]
# self.n_classes is None means we're passing in raw target indices
if n_classes is None:
out[i] = _access(data, sample)
else:
if n_classes > 1:
if len(shape) == 2:
out.itemset((i, int(_access(data, sample))), 1.0)
else:
for idx, value in enumerate(_access(data, sample)):
out.itemset(tuple([i, idx, value]), 1.0)
else:
out[i] = _access(data, sample)
return out
def _feed_dict_fn():
"""Function that samples data into given placeholders."""
if self.max_epochs is not None and self.epoch + 1 > self.max_epochs:
raise StopIteration
assert self._input_placeholder is not None
feed_dict = {}
if self._epoch_placeholder is not None:
feed_dict[self._epoch_placeholder.name] = [self.epoch]
# Take next batch of indices.
x_len = list(
self._x.values())[0].shape[0] if x_is_dict else self._x.shape[0]
end = min(x_len, self.offset + self._batch_size)
batch_indices = self.indices[self.offset:end]
# adding input placeholder
feed_dict.update(
dict([(self._input_placeholder[k].name, extract(v, batch_indices))
for k, v in list(self._x.items())]) if x_is_dict else {
self._input_placeholder.name:
extract(self._x, batch_indices)
})
# move offset and reset it if necessary
self.offset += self._batch_size
if self.offset >= x_len:
self.indices = self.random_state.permutation(
x_len) if self._shuffle else np.array(range(x_len))
self.offset = 0
self.epoch += 1
# return early if there are no labels
if self._output_placeholder is None:
return feed_dict
# adding output placeholders
if y_is_dict:
for k, v in list(self._y.items()):
n_classes = (self.n_classes[k] if k in self.n_classes else
None) if self.n_classes is not None else None
shape, dtype = self.output_shape[k], self._output_dtype[k]
feed_dict.update({
self._output_placeholder[k].name:
assign_label(v, shape, dtype, n_classes, batch_indices)
})
else:
shape, dtype, n_classes = (self.output_shape, self._output_dtype,
self.n_classes)
feed_dict.update({
self._output_placeholder.name:
assign_label(self._y, shape, dtype, n_classes, batch_indices)
})
return feed_dict
return _feed_dict_fn
class StreamingDataFeeder(DataFeeder):
"""Data feeder for TF trainer that reads data from iterator.
THIS CLASS IS DEPRECATED. See
[contrib/learn/README.md](https://www.tensorflow.org/code/tensorflow/contrib/learn/README.md)
for general migration instructions.
Streaming data feeder allows to read data as it comes it from disk or
somewhere else. It's custom to have this iterators rotate infinetly over
the dataset, to allow control of how much to learn on the trainer side.
"""
def __init__(self, x, y, n_classes, batch_size):
"""Initializes a StreamingDataFeeder instance.
Args:
x: iterator each element of which returns one feature sample. Sample can
be a Nd numpy matrix or dictionary of Nd numpy matrices.
y: iterator each element of which returns one label sample. Sample can be
a Nd numpy matrix or dictionary of Nd numpy matrices with 1 or many
classes regression values.
n_classes: indicator of how many classes the corresponding label sample
has for the purposes of one-hot conversion of label. In case where `y`
is a dictionary, `n_classes` must be dictionary (with same keys as `y`)
of how many classes there are in each label in `y`. If key is
present in `y` and missing in `n_classes`, the value is assumed `None`
and no one-hot conversion will be applied to the label with that key.
batch_size: Mini batch size to accumulate samples in one batch. If set
`None`, then assumes that iterator to return already batched element.
Attributes:
x: input features (or dictionary of input features).
y: input label (or dictionary of output features).
n_classes: number of classes.
batch_size: mini batch size to accumulate.
input_shape: shape of the input (can be dictionary depending on `x`).
output_shape: shape of the output (can be dictionary depending on `y`).
input_dtype: dtype of input (can be dictionary depending on `x`).
output_dtype: dtype of output (can be dictionary depending on `y`).
"""
# pylint: disable=invalid-name,super-init-not-called
x_first_el = six.next(x)
self._x = itertools.chain([x_first_el], x)
if y is not None:
y_first_el = six.next(y)
self._y = itertools.chain([y_first_el], y)
else:
y_first_el = None
self._y = None
self.n_classes = n_classes
x_is_dict = isinstance(x_first_el, dict)
y_is_dict = y is not None and isinstance(y_first_el, dict)
if y_is_dict and n_classes is not None:
assert isinstance(n_classes, dict)
# extract shapes for first_elements
if x_is_dict:
x_first_el_shape = dict(
[(k, [1] + list(v.shape)) for k, v in list(x_first_el.items())])
else:
x_first_el_shape = [1] + list(x_first_el.shape)
if y_is_dict:
y_first_el_shape = dict(
[(k, [1] + list(v.shape)) for k, v in list(y_first_el.items())])
elif y is None:
y_first_el_shape = None
else:
y_first_el_shape = (
[1] + list(y_first_el[0].shape
if isinstance(y_first_el, list) else y_first_el.shape))
self.input_shape, self.output_shape, self._batch_size = _get_in_out_shape(
x_first_el_shape, y_first_el_shape, n_classes, batch_size)
# Input dtype of x_first_el.
if x_is_dict:
self._input_dtype = dict(
[(k, _check_dtype(v.dtype)) for k, v in list(x_first_el.items())])
else:
self._input_dtype = _check_dtype(x_first_el.dtype)
# Output dtype of y_first_el.
def check_y_dtype(el):
if isinstance(el, np.ndarray):
return el.dtype
elif isinstance(el, list):
return check_y_dtype(el[0])
else:
return _check_dtype(np.dtype(type(el)))
# Output types are floats, due to both softmaxes and regression req.
if n_classes is not None and (y is None or not y_is_dict) and n_classes > 0:
self._output_dtype = np.float32
elif y_is_dict:
self._output_dtype = dict(
[(k, check_y_dtype(v)) for k, v in list(y_first_el.items())])
elif y is None:
self._output_dtype = None
else:
self._output_dtype = check_y_dtype(y_first_el)
def get_feed_params(self):
"""Function returns a `dict` with data feed params while training.
Returns:
A `dict` with data feed params while training.
"""
return {'batch_size': self._batch_size}
def get_feed_dict_fn(self):
"""Returns a function, that will sample data and provide it to placeholders.
Returns:
A function that when called samples a random subset of batch size
from x and y.
"""
self.stopped = False
def _feed_dict_fn():
"""Samples data and provides it to placeholders.
Returns:
`dict` of input and output tensors.
"""
def init_array(shape, dtype):
"""Initialize array of given shape or dict of shapes and dtype."""
if shape is None:
return None
elif isinstance(shape, dict):
return dict(
[(k, np.zeros(shape[k], dtype[k])) for k in list(shape.keys())])
else:
return np.zeros(shape, dtype=dtype)
def put_data_array(dest, index, source=None, n_classes=None):
"""Puts data array into container."""
if source is None:
dest = dest[:index]
elif n_classes is not None and n_classes > 1:
if len(self.output_shape) == 2:
dest.itemset((index, source), 1.0)
else:
for idx, value in enumerate(source):
dest.itemset(tuple([index, idx, value]), 1.0)
else:
if len(dest.shape) > 1:
dest[index, :] = source
else:
dest[index] = source[0] if isinstance(source, list) else source
return dest
def put_data_array_or_dict(holder, index, data=None, n_classes=None):
"""Puts data array or data dictionary into container."""
if holder is None:
return None
if isinstance(holder, dict):
if data is None:
data = {k: None for k in holder.keys()}
assert isinstance(data, dict)
for k in holder.keys():
num_classes = n_classes[k] if (n_classes is not None and
k in n_classes) else None
holder[k] = put_data_array(holder[k], index, data[k], num_classes)
else:
holder = put_data_array(holder, index, data, n_classes)
return holder
if self.stopped:
raise StopIteration
inp = init_array(self.input_shape, self._input_dtype)
out = init_array(self.output_shape, self._output_dtype)
for i in xrange(self._batch_size):
# Add handling when queue ends.
try:
next_inp = six.next(self._x)
inp = put_data_array_or_dict(inp, i, next_inp, None)
except StopIteration:
self.stopped = True
if i == 0:
raise
inp = put_data_array_or_dict(inp, i, None, None)
out = put_data_array_or_dict(out, i, None, None)
break
if self._y is not None:
next_out = six.next(self._y)
out = put_data_array_or_dict(out, i, next_out, self.n_classes)
# creating feed_dict
if isinstance(inp, dict):
feed_dict = dict([(self._input_placeholder[k].name, inp[k])
for k in list(self._input_placeholder.keys())])
else:
feed_dict = {self._input_placeholder.name: inp}
if self._y is not None:
if isinstance(out, dict):
feed_dict.update(
dict([(self._output_placeholder[k].name, out[k])
for k in list(self._output_placeholder.keys())]))
else:
feed_dict.update({self._output_placeholder.name: out})
return feed_dict
return _feed_dict_fn
class DaskDataFeeder(object):
"""Data feeder for that reads data from dask.Series and dask.DataFrame.
THIS CLASS IS DEPRECATED. See
[contrib/learn/README.md](https://www.tensorflow.org/code/tensorflow/contrib/learn/README.md)
for general migration instructions.
Numpy arrays can be serialized to disk and it's possible to do random seeks
into them. DaskDataFeeder will remove requirement to have full dataset in the
memory and still do random seeks for sampling of batches.
"""
@deprecated(None, 'Please feed input to tf.data to support dask.')
def __init__(self,
x,
y,
n_classes,
batch_size,
shuffle=True,
random_state=None,
epochs=None):
"""Initializes a DaskDataFeeder instance.
Args:
x: iterator that returns for each element, returns features.
y: iterator that returns for each element, returns 1 or many classes /
regression values.
n_classes: indicator of how many classes the label has.
batch_size: Mini batch size to accumulate.
shuffle: Whether to shuffle the inputs.
random_state: random state for RNG. Note that it will mutate so use a
int value for this if you want consistent sized batches.
epochs: Number of epochs to run.
Attributes:
x: input features.
y: input label.
n_classes: number of classes.
batch_size: mini batch size to accumulate.
input_shape: shape of the input.
output_shape: shape of the output.
input_dtype: dtype of input.
output_dtype: dtype of output.
Raises:
ValueError: if `x` or `y` are `dict`, as they are not supported currently.
"""
if isinstance(x, dict) or isinstance(y, dict):
raise ValueError(
'DaskDataFeeder does not support dictionaries at the moment.')
# pylint: disable=invalid-name,super-init-not-called
import dask.dataframe as dd # pylint: disable=g-import-not-at-top
# TODO(terrytangyuan): check x and y dtypes in dask_io like pandas
self._x = x
self._y = y
# save column names
self._x_columns = list(x.columns)
if isinstance(y.columns[0], str):
self._y_columns = list(y.columns)
else:
# deal with cases where two DFs have overlapped default numeric colnames
self._y_columns = len(self._x_columns) + 1
self._y = self._y.rename(columns={y.columns[0]: self._y_columns})
# TODO(terrytangyuan): deal with unsupervised cases
# combine into a data frame
self.df = dd.multi.concat([self._x, self._y], axis=1)
self.n_classes = n_classes
x_count = x.count().compute()[0]
x_shape = (x_count, len(self._x.columns))
y_shape = (x_count, len(self._y.columns))
# TODO(terrytangyuan): Add support for shuffle and epochs.
self._shuffle = shuffle
self.epochs = epochs
self.input_shape, self.output_shape, self._batch_size = _get_in_out_shape(
x_shape, y_shape, n_classes, batch_size)
self.sample_fraction = self._batch_size / float(x_count)
self._input_dtype = _check_dtype(self._x.dtypes[0])
self._output_dtype = _check_dtype(self._y.dtypes[self._y_columns])
if random_state is None:
self.random_state = 66
else:
self.random_state = random_state
def get_feed_params(self):
"""Function returns a `dict` with data feed params while training.
Returns:
A `dict` with data feed params while training.
"""
return {'batch_size': self._batch_size}
def get_feed_dict_fn(self, input_placeholder, output_placeholder):
"""Returns a function, that will sample data and provide it to placeholders.
Args:
input_placeholder: tf.placeholder for input features mini batch.
output_placeholder: tf.placeholder for output labels.
Returns:
A function that when called samples a random subset of batch size
from x and y.
"""
def _feed_dict_fn():
"""Samples data and provides it to placeholders."""
# TODO(ipolosukhin): option for with/without replacement (dev version of
# dask)
sample = self.df.random_split(
[self.sample_fraction, 1 - self.sample_fraction],
random_state=self.random_state)
inp = extract_pandas_matrix(sample[0][self._x_columns].compute()).tolist()
out = extract_pandas_matrix(sample[0][self._y_columns].compute())
# convert to correct dtype
inp = np.array(inp, dtype=self._input_dtype)
# one-hot encode out for each class for cross entropy loss
if HAS_PANDAS:
import pandas as pd # pylint: disable=g-import-not-at-top
if not isinstance(out, pd.Series):
out = out.flatten()
out_max = self._y.max().compute().values[0]
encoded_out = np.zeros((out.size, out_max + 1), dtype=self._output_dtype)
encoded_out[np.arange(out.size), out] = 1
return {input_placeholder.name: inp, output_placeholder.name: encoded_out}
return _feed_dict_fn
|
apache-2.0
|
jcasner/nupic
|
external/linux32/lib/python2.6/site-packages/matplotlib/backends/backend_gtkcairo.py
|
69
|
2207
|
"""
GTK+ Matplotlib interface using cairo (not GDK) drawing operations.
Author: Steve Chaplin
"""
import gtk
if gtk.pygtk_version < (2,7,0):
import cairo.gtk
from matplotlib.backends import backend_cairo
from matplotlib.backends.backend_gtk import *
backend_version = 'PyGTK(%d.%d.%d) ' % gtk.pygtk_version + \
'Pycairo(%s)' % backend_cairo.backend_version
_debug = False
#_debug = True
def new_figure_manager(num, *args, **kwargs):
"""
Create a new figure manager instance
"""
if _debug: print 'backend_gtkcairo.%s()' % fn_name()
FigureClass = kwargs.pop('FigureClass', Figure)
thisFig = FigureClass(*args, **kwargs)
canvas = FigureCanvasGTKCairo(thisFig)
return FigureManagerGTK(canvas, num)
class RendererGTKCairo (backend_cairo.RendererCairo):
if gtk.pygtk_version >= (2,7,0):
def set_pixmap (self, pixmap):
self.ctx = pixmap.cairo_create()
self.ctx.save() # restore, save - when call new_gc()
else:
def set_pixmap (self, pixmap):
self.ctx = cairo.gtk.gdk_cairo_create (pixmap)
self.ctx.save() # restore, save - when call new_gc()
class FigureCanvasGTKCairo(backend_cairo.FigureCanvasCairo, FigureCanvasGTK):
filetypes = FigureCanvasGTK.filetypes.copy()
filetypes.update(backend_cairo.FigureCanvasCairo.filetypes)
def _renderer_init(self):
"""Override to use cairo (rather than GDK) renderer"""
if _debug: print '%s.%s()' % (self.__class__.__name__, _fn_name())
self._renderer = RendererGTKCairo (self.figure.dpi)
class FigureManagerGTKCairo(FigureManagerGTK):
def _get_toolbar(self, canvas):
# must be inited after the window, drawingArea and figure
# attrs are set
if matplotlib.rcParams['toolbar']=='classic':
toolbar = NavigationToolbar (canvas, self.window)
elif matplotlib.rcParams['toolbar']=='toolbar2':
toolbar = NavigationToolbar2GTKCairo (canvas, self.window)
else:
toolbar = None
return toolbar
class NavigationToolbar2Cairo(NavigationToolbar2GTK):
def _get_canvas(self, fig):
return FigureCanvasGTKCairo(fig)
|
agpl-3.0
|
tgtubbs/cbb_database
|
scrapers/teams.py
|
1
|
1043
|
from bs4 import BeautifulSoup
import pandas
import requests
# scrape bbref team ids and school index
url = "http://www.sports-reference.com/cbb/schools/"
soup = BeautifulSoup(requests.get(url).text)
team_hrefs = [a["href"] for a in soup.find_all("tbody")[0].find_all("a")]
bbref_ids = [href[13:-1] for href in team_hrefs]
table_headers = [th.text for th in soup.find_all("tbody")[0].find_all("th")][1:18]
table_rows = soup.find_all("tbody")[0].find_all("tr", attrs={"class": ""})
row_data = [[td.text for td in table_rows[i].find_all("td")[1:]] for i in range(0, len(table_rows))]
# fill dataframe
team_index_df = pandas.DataFrame(row_data, columns=table_headers)
team_index_df["bbref_id"] = bbref_ids
column_names = team_index_df.columns.tolist()
column_names = column_names[-1:] + column_names[:-1]
team_index_df = team_index_df[column_names]
# Some team_index_df["City, State"] data are incorrect. Modify manually
# before running geocoder.
team_index_df.to_csv("/Users/travistubbs/cbb_database/data/teams.txt", sep="\t", index=False)
|
mit
|
466152112/scikit-learn
|
sklearn/neural_network/tests/test_rbm.py
|
142
|
6276
|
import sys
import re
import numpy as np
from scipy.sparse import csc_matrix, csr_matrix, lil_matrix
from sklearn.utils.testing import (assert_almost_equal, assert_array_equal,
assert_true)
from sklearn.datasets import load_digits
from sklearn.externals.six.moves import cStringIO as StringIO
from sklearn.neural_network import BernoulliRBM
from sklearn.utils.validation import assert_all_finite
np.seterr(all='warn')
Xdigits = load_digits().data
Xdigits -= Xdigits.min()
Xdigits /= Xdigits.max()
def test_fit():
X = Xdigits.copy()
rbm = BernoulliRBM(n_components=64, learning_rate=0.1,
batch_size=10, n_iter=7, random_state=9)
rbm.fit(X)
assert_almost_equal(rbm.score_samples(X).mean(), -21., decimal=0)
# in-place tricks shouldn't have modified X
assert_array_equal(X, Xdigits)
def test_partial_fit():
X = Xdigits.copy()
rbm = BernoulliRBM(n_components=64, learning_rate=0.1,
batch_size=20, random_state=9)
n_samples = X.shape[0]
n_batches = int(np.ceil(float(n_samples) / rbm.batch_size))
batch_slices = np.array_split(X, n_batches)
for i in range(7):
for batch in batch_slices:
rbm.partial_fit(batch)
assert_almost_equal(rbm.score_samples(X).mean(), -21., decimal=0)
assert_array_equal(X, Xdigits)
def test_transform():
X = Xdigits[:100]
rbm1 = BernoulliRBM(n_components=16, batch_size=5,
n_iter=5, random_state=42)
rbm1.fit(X)
Xt1 = rbm1.transform(X)
Xt2 = rbm1._mean_hiddens(X)
assert_array_equal(Xt1, Xt2)
def test_small_sparse():
# BernoulliRBM should work on small sparse matrices.
X = csr_matrix(Xdigits[:4])
BernoulliRBM().fit(X) # no exception
def test_small_sparse_partial_fit():
for sparse in [csc_matrix, csr_matrix]:
X_sparse = sparse(Xdigits[:100])
X = Xdigits[:100].copy()
rbm1 = BernoulliRBM(n_components=64, learning_rate=0.1,
batch_size=10, random_state=9)
rbm2 = BernoulliRBM(n_components=64, learning_rate=0.1,
batch_size=10, random_state=9)
rbm1.partial_fit(X_sparse)
rbm2.partial_fit(X)
assert_almost_equal(rbm1.score_samples(X).mean(),
rbm2.score_samples(X).mean(),
decimal=0)
def test_sample_hiddens():
rng = np.random.RandomState(0)
X = Xdigits[:100]
rbm1 = BernoulliRBM(n_components=2, batch_size=5,
n_iter=5, random_state=42)
rbm1.fit(X)
h = rbm1._mean_hiddens(X[0])
hs = np.mean([rbm1._sample_hiddens(X[0], rng) for i in range(100)], 0)
assert_almost_equal(h, hs, decimal=1)
def test_fit_gibbs():
# Gibbs on the RBM hidden layer should be able to recreate [[0], [1]]
# from the same input
rng = np.random.RandomState(42)
X = np.array([[0.], [1.]])
rbm1 = BernoulliRBM(n_components=2, batch_size=2,
n_iter=42, random_state=rng)
# you need that much iters
rbm1.fit(X)
assert_almost_equal(rbm1.components_,
np.array([[0.02649814], [0.02009084]]), decimal=4)
assert_almost_equal(rbm1.gibbs(X), X)
return rbm1
def test_fit_gibbs_sparse():
# Gibbs on the RBM hidden layer should be able to recreate [[0], [1]] from
# the same input even when the input is sparse, and test against non-sparse
rbm1 = test_fit_gibbs()
rng = np.random.RandomState(42)
from scipy.sparse import csc_matrix
X = csc_matrix([[0.], [1.]])
rbm2 = BernoulliRBM(n_components=2, batch_size=2,
n_iter=42, random_state=rng)
rbm2.fit(X)
assert_almost_equal(rbm2.components_,
np.array([[0.02649814], [0.02009084]]), decimal=4)
assert_almost_equal(rbm2.gibbs(X), X.toarray())
assert_almost_equal(rbm1.components_, rbm2.components_)
def test_gibbs_smoke():
# Check if we don't get NaNs sampling the full digits dataset.
# Also check that sampling again will yield different results.
X = Xdigits
rbm1 = BernoulliRBM(n_components=42, batch_size=40,
n_iter=20, random_state=42)
rbm1.fit(X)
X_sampled = rbm1.gibbs(X)
assert_all_finite(X_sampled)
X_sampled2 = rbm1.gibbs(X)
assert_true(np.all((X_sampled != X_sampled2).max(axis=1)))
def test_score_samples():
# Test score_samples (pseudo-likelihood) method.
# Assert that pseudo-likelihood is computed without clipping.
# See Fabian's blog, http://bit.ly/1iYefRk
rng = np.random.RandomState(42)
X = np.vstack([np.zeros(1000), np.ones(1000)])
rbm1 = BernoulliRBM(n_components=10, batch_size=2,
n_iter=10, random_state=rng)
rbm1.fit(X)
assert_true((rbm1.score_samples(X) < -300).all())
# Sparse vs. dense should not affect the output. Also test sparse input
# validation.
rbm1.random_state = 42
d_score = rbm1.score_samples(X)
rbm1.random_state = 42
s_score = rbm1.score_samples(lil_matrix(X))
assert_almost_equal(d_score, s_score)
# Test numerical stability (#2785): would previously generate infinities
# and crash with an exception.
with np.errstate(under='ignore'):
rbm1.score_samples(np.arange(1000) * 100)
def test_rbm_verbose():
rbm = BernoulliRBM(n_iter=2, verbose=10)
old_stdout = sys.stdout
sys.stdout = StringIO()
try:
rbm.fit(Xdigits)
finally:
sys.stdout = old_stdout
def test_sparse_and_verbose():
# Make sure RBM works with sparse input when verbose=True
old_stdout = sys.stdout
sys.stdout = StringIO()
from scipy.sparse import csc_matrix
X = csc_matrix([[0.], [1.]])
rbm = BernoulliRBM(n_components=2, batch_size=2, n_iter=1,
random_state=42, verbose=True)
try:
rbm.fit(X)
s = sys.stdout.getvalue()
# make sure output is sound
assert_true(re.match(r"\[BernoulliRBM\] Iteration 1,"
r" pseudo-likelihood = -?(\d)+(\.\d+)?,"
r" time = (\d|\.)+s",
s))
finally:
sys.stdout = old_stdout
|
bsd-3-clause
|
jonyroda97/redbot-amigosprovaveis
|
lib/matplotlib/backends/backend_qt5agg.py
|
2
|
6829
|
"""
Render to qt from agg
"""
from __future__ import (absolute_import, division, print_function,
unicode_literals)
import six
import ctypes
import traceback
from matplotlib import cbook
from matplotlib.transforms import Bbox
from .backend_agg import FigureCanvasAgg
from .backend_qt5 import (
QtCore, QtGui, QtWidgets, _BackendQT5, FigureCanvasQT, FigureManagerQT,
NavigationToolbar2QT, backend_version)
from .qt_compat import QT_API
class FigureCanvasQTAggBase(FigureCanvasAgg):
"""
The canvas the figure renders into. Calls the draw and print fig
methods, creates the renderers, etc...
Attributes
----------
figure : `matplotlib.figure.Figure`
A high-level Figure instance
"""
def __init__(self, figure):
super(FigureCanvasQTAggBase, self).__init__(figure=figure)
self.setAttribute(QtCore.Qt.WA_OpaquePaintEvent)
self._agg_draw_pending = False
self._bbox_queue = []
self._drawRect = None
def drawRectangle(self, rect):
if rect is not None:
self._drawRect = [pt / self._dpi_ratio for pt in rect]
else:
self._drawRect = None
self.update()
@property
@cbook.deprecated("2.1")
def blitbox(self):
return self._bbox_queue
def paintEvent(self, e):
"""Copy the image from the Agg canvas to the qt.drawable.
In Qt, all drawing should be done inside of here when a widget is
shown onscreen.
"""
# if there is a pending draw, run it now as we need the updated render
# to paint the widget
if self._agg_draw_pending:
self.__draw_idle_agg()
# As described in __init__ above, we need to be careful in cases with
# mixed resolution displays if dpi_ratio is changing between painting
# events.
if self._dpi_ratio != self._dpi_ratio_prev:
# We need to update the figure DPI
self._update_figure_dpi()
self._dpi_ratio_prev = self._dpi_ratio
# The easiest way to resize the canvas is to emit a resizeEvent
# since we implement all the logic for resizing the canvas for
# that event.
event = QtGui.QResizeEvent(self.size(), self.size())
# We use self.resizeEvent here instead of QApplication.postEvent
# since the latter doesn't guarantee that the event will be emitted
# straight away, and this causes visual delays in the changes.
self.resizeEvent(event)
# resizeEvent triggers a paintEvent itself, so we exit this one.
return
# if the canvas does not have a renderer, then give up and wait for
# FigureCanvasAgg.draw(self) to be called
if not hasattr(self, 'renderer'):
return
painter = QtGui.QPainter(self)
if self._bbox_queue:
bbox_queue = self._bbox_queue
else:
painter.eraseRect(self.rect())
bbox_queue = [
Bbox([[0, 0], [self.renderer.width, self.renderer.height]])]
self._bbox_queue = []
for bbox in bbox_queue:
l, b, r, t = map(int, bbox.extents)
w = r - l
h = t - b
reg = self.copy_from_bbox(bbox)
buf = reg.to_string_argb()
qimage = QtGui.QImage(buf, w, h, QtGui.QImage.Format_ARGB32)
if hasattr(qimage, 'setDevicePixelRatio'):
# Not available on Qt4 or some older Qt5.
qimage.setDevicePixelRatio(self._dpi_ratio)
origin = QtCore.QPoint(l, self.renderer.height - t)
painter.drawImage(origin / self._dpi_ratio, qimage)
# Adjust the buf reference count to work around a memory
# leak bug in QImage under PySide on Python 3.
if QT_API == 'PySide' and six.PY3:
ctypes.c_long.from_address(id(buf)).value = 1
# draw the zoom rectangle to the QPainter
if self._drawRect is not None:
pen = QtGui.QPen(QtCore.Qt.black, 1 / self._dpi_ratio,
QtCore.Qt.DotLine)
painter.setPen(pen)
x, y, w, h = self._drawRect
painter.drawRect(x, y, w, h)
painter.end()
def draw(self):
"""Draw the figure with Agg, and queue a request for a Qt draw.
"""
# The Agg draw is done here; delaying causes problems with code that
# uses the result of the draw() to update plot elements.
super(FigureCanvasQTAggBase, self).draw()
self.update()
def draw_idle(self):
"""Queue redraw of the Agg buffer and request Qt paintEvent.
"""
# The Agg draw needs to be handled by the same thread matplotlib
# modifies the scene graph from. Post Agg draw request to the
# current event loop in order to ensure thread affinity and to
# accumulate multiple draw requests from event handling.
# TODO: queued signal connection might be safer than singleShot
if not self._agg_draw_pending:
self._agg_draw_pending = True
QtCore.QTimer.singleShot(0, self.__draw_idle_agg)
def __draw_idle_agg(self, *args):
if not self._agg_draw_pending:
return
if self.height() < 0 or self.width() < 0:
self._agg_draw_pending = False
return
try:
self.draw()
except Exception:
# Uncaught exceptions are fatal for PyQt5, so catch them instead.
traceback.print_exc()
finally:
self._agg_draw_pending = False
def blit(self, bbox=None):
"""Blit the region in bbox.
"""
# If bbox is None, blit the entire canvas. Otherwise
# blit only the area defined by the bbox.
if bbox is None and self.figure:
bbox = self.figure.bbox
self._bbox_queue.append(bbox)
# repaint uses logical pixels, not physical pixels like the renderer.
l, b, w, h = [pt / self._dpi_ratio for pt in bbox.bounds]
t = b + h
self.repaint(l, self.renderer.height / self._dpi_ratio - t, w, h)
def print_figure(self, *args, **kwargs):
super(FigureCanvasQTAggBase, self).print_figure(*args, **kwargs)
self.draw()
class FigureCanvasQTAgg(FigureCanvasQTAggBase, FigureCanvasQT):
"""
The canvas the figure renders into. Calls the draw and print fig
methods, creates the renderers, etc.
Modified to import from Qt5 backend for new-style mouse events.
Attributes
----------
figure : `matplotlib.figure.Figure`
A high-level Figure instance
"""
@_BackendQT5.export
class _BackendQT5Agg(_BackendQT5):
FigureCanvas = FigureCanvasQTAgg
|
gpl-3.0
|
apache/incubator-superset
|
superset/examples/bart_lines.py
|
3
|
2172
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import json
import pandas as pd
import polyline
from sqlalchemy import String, Text
from superset import db
from superset.utils.core import get_example_database
from .helpers import get_example_data, TBL
def load_bart_lines(only_metadata: bool = False, force: bool = False) -> None:
tbl_name = "bart_lines"
database = get_example_database()
table_exists = database.has_table_by_name(tbl_name)
if not only_metadata and (not table_exists or force):
content = get_example_data("bart-lines.json.gz")
df = pd.read_json(content, encoding="latin-1")
df["path_json"] = df.path.map(json.dumps)
df["polyline"] = df.path.map(polyline.encode)
del df["path"]
df.to_sql(
tbl_name,
database.get_sqla_engine(),
if_exists="replace",
chunksize=500,
dtype={
"color": String(255),
"name": String(255),
"polyline": Text,
"path_json": Text,
},
index=False,
)
print("Creating table {} reference".format(tbl_name))
tbl = db.session.query(TBL).filter_by(table_name=tbl_name).first()
if not tbl:
tbl = TBL(table_name=tbl_name)
tbl.description = "BART lines"
tbl.database = database
db.session.merge(tbl)
db.session.commit()
tbl.fetch_metadata()
|
apache-2.0
|
zafarali/emdp
|
tests/test_plotting.py
|
1
|
1124
|
"""Integration tests for plotting tools."""
from emdp import examples
from emdp.gridworld import GridWorldPlotter
from emdp import actions
import random
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
def test_plotting_integration():
mdp = examples.build_SB_example35()
trajectories = []
for _ in range(3): # 3 trajectories
trajectory = [mdp.reset()]
for _ in range(10): # 10 steps maximum
state, reward, done, info = mdp.step(random.sample([actions.LEFT, actions.RIGHT,
actions.UP, actions.DOWN], 1)[0])
trajectory.append(state)
trajectories.append(trajectory)
gwp = GridWorldPlotter(mdp.size,
mdp.has_absorbing_state) # alternatively you can use GridWorldPlotter.from_mdp(mdp)
fig = plt.figure(figsize=(10, 4))
ax = fig.add_subplot(121)
# trajectory
gwp.plot_trajectories(ax, trajectories)
gwp.plot_grid(ax)
# heatmap
ax = fig.add_subplot(122)
gwp.plot_heatmap(ax, trajectories)
gwp.plot_grid(ax)
|
mit
|
kyleabeauchamp/mdtraj
|
mdtraj/utils/validation.py
|
1
|
7987
|
##############################################################################
# MDTraj: A Python Library for Loading, Saving, and Manipulating
# Molecular Dynamics Trajectories.
# Copyright 2012-2013 Stanford University and the Authors
#
# Authors: Robert McGibbon
# Contributors:
#
# MDTraj is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as
# published by the Free Software Foundation, either version 2.1
# of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with MDTraj. If not, see <http://www.gnu.org/licenses/>.
##############################################################################
##############################################################################
# imports
##############################################################################
from __future__ import print_function, division
import warnings
import numbers
import numpy as np
import collections
from mdtraj.utils.six.moves import zip_longest
##############################################################################
# functions / classes
##############################################################################
class TypeCastPerformanceWarning(RuntimeWarning):
pass
def ensure_type(val, dtype, ndim, name, length=None, can_be_none=False, shape=None,
warn_on_cast=True, add_newaxis_on_deficient_ndim=False):
"""Typecheck the size, shape and dtype of a numpy array, with optional
casting.
Parameters
----------
val : {np.ndaraay, None}
The array to check
dtype : {nd.dtype, str}
The dtype you'd like the array to have
ndim : int
The number of dimensions you'd like the array to have
name : str
name of the array. This is used when throwing exceptions, so that
we can describe to the user which array is messed up.
length : int, optional
How long should the array be?
can_be_none : bool
Is ``val == None`` acceptable?
shape : tuple, optional
What should be shape of the array be? If the provided tuple has
Nones in it, those will be semantically interpreted as matching
any length in that dimension. So, for example, using the shape
spec ``(None, None, 3)`` will ensure that the last dimension is of
length three without constraining the first two dimensions
warn_on_cast : bool, default=True
Raise a warning when the dtypes don't match and a cast is done.
add_newaxis_on_deficient_ndim : bool, default=True
Add a new axis to the beginining of the array if the number of
dimensions is deficient by one compared to your specification. For
instance, if you're trying to get out an array of ``ndim == 3``,
but the user provides an array of ``shape == (10, 10)``, a new axis will
be created with length 1 in front, so that the return value is of
shape ``(1, 10, 10)``.
Notes
-----
The returned value will always be C-contiguous.
Returns
-------
typechecked_val : np.ndarray, None
If `val=None` and `can_be_none=True`, then this will return None.
Otherwise, it will return val (or a copy of val). If the dtype wasn't right,
it'll be casted to the right shape. If the array was not C-contiguous, it'll
be copied as well.
"""
if can_be_none and val is None:
return None
if not isinstance(val, np.ndarray):
if isinstance(val, collections.Iterable):
# If they give us an iterator, let's try...
if isinstance(val, collections.Sequence):
# sequences are easy. these are like lists and stuff
val = np.array(val, dtype=dtype)
else:
# this is a generator...
val = np.array(list(val), dtype=dtype)
elif np.isscalar(val) and add_newaxis_on_deficient_ndim and ndim == 1:
# special case: if the user is looking for a 1d array, and
# they request newaxis upconversion, and provided a scalar
# then we should reshape the scalar to be a 1d length-1 array
val = np.array([val])
else:
raise TypeError(("%s must be numpy array. "
" You supplied type %s" % (name, type(val))))
if warn_on_cast and val.dtype != dtype:
warnings.warn("Casting %s dtype=%s to %s " % (name, val.dtype, dtype),
TypeCastPerformanceWarning)
if not val.ndim == ndim:
if add_newaxis_on_deficient_ndim and val.ndim + 1 == ndim:
val = val[np.newaxis, ...]
else:
raise ValueError(("%s must be ndim %s. "
"You supplied %s" % (name, ndim, val.ndim)))
val = np.ascontiguousarray(val, dtype=dtype)
if length is not None and len(val) != length:
raise ValueError(("%s must be length %s. "
"You supplied %s" % (name, length, len(val))))
if shape is not None:
# the shape specified given by the user can look like (None, None 3)
# which indicates that ANY length is accepted in dimension 0 or
# dimension 1
sentenel = object()
error = ValueError(("%s must be shape %s. You supplied "
"%s" % (name, str(shape).replace('None', 'Any'), val.shape)))
for a, b in zip_longest(val.shape, shape, fillvalue=sentenel):
if a is sentenel or b is sentenel:
# if the sentenel was reached, it means that the ndim didn't
# match or something. this really shouldn't happen
raise error
if b is None:
# if the user's shape spec has a None in it, it matches anything
continue
if a != b:
# check for equality
raise error
return val
def cast_indices(indices):
"""Check that ``indices`` are appropriate for indexing an array
Parameters
----------
indices : {None, array_like, slice}
If indices is None or slice, it'll just pass through. Otherwise, it'll
be converted to a numpy array and checked to make sure it contains
unique integers.
Returns
-------
value : {slice, np.ndarray}
Either a slice or an array of integers, depending on the input type
"""
if indices is None or isinstance(indices, slice):
return indices
if not len(indices) == len(set(indices)):
raise ValueError("indices must be unique.")
out = np.asarray(indices)
if not issubclass(out.dtype.type, np.integer):
raise ValueError('indices must be of an integer type. %s is not an integer type' % out.dtype)
return out
def check_random_state(seed):
"""Turn seed into a np.random.RandomState instance
Notes
-----
If seed is None, return the RandomState singleton used by np.random.
If seed is an int, return a new RandomState instance seeded with seed.
If seed is already a RandomState instance, return it.
Otherwise raise ValueError.
"""
# This code is direcly from the scikit-learn project (sklearn/utils/validation.py)
# Authors: Olivier Grisel and Gael Varoquaux and others (please update me)
# License: BSD 3 clause
if seed is None or seed is np.random:
return np.random.mtrand._rand
if isinstance(seed, (numbers.Integral, np.integer)):
return np.random.RandomState(seed)
if isinstance(seed, np.random.RandomState):
return seed
raise ValueError('%r cannot be used to seed a numpy.random.RandomState'
' instance' % seed)
|
lgpl-2.1
|
Srisai85/scikit-learn
|
sklearn/linear_model/tests/test_logistic.py
|
105
|
26588
|
import numpy as np
import scipy.sparse as sp
from scipy import linalg, optimize, sparse
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_greater
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_warns
from sklearn.utils.testing import raises
from sklearn.utils.testing import ignore_warnings
from sklearn.utils.testing import assert_raise_message
from sklearn.utils import ConvergenceWarning
from sklearn.linear_model.logistic import (
LogisticRegression,
logistic_regression_path, LogisticRegressionCV,
_logistic_loss_and_grad, _logistic_grad_hess,
_multinomial_grad_hess, _logistic_loss,
)
from sklearn.cross_validation import StratifiedKFold
from sklearn.datasets import load_iris, make_classification
X = [[-1, 0], [0, 1], [1, 1]]
X_sp = sp.csr_matrix(X)
Y1 = [0, 1, 1]
Y2 = [2, 1, 0]
iris = load_iris()
def check_predictions(clf, X, y):
"""Check that the model is able to fit the classification data"""
n_samples = len(y)
classes = np.unique(y)
n_classes = classes.shape[0]
predicted = clf.fit(X, y).predict(X)
assert_array_equal(clf.classes_, classes)
assert_equal(predicted.shape, (n_samples,))
assert_array_equal(predicted, y)
probabilities = clf.predict_proba(X)
assert_equal(probabilities.shape, (n_samples, n_classes))
assert_array_almost_equal(probabilities.sum(axis=1), np.ones(n_samples))
assert_array_equal(probabilities.argmax(axis=1), y)
def test_predict_2_classes():
# Simple sanity check on a 2 classes dataset
# Make sure it predicts the correct result on simple datasets.
check_predictions(LogisticRegression(random_state=0), X, Y1)
check_predictions(LogisticRegression(random_state=0), X_sp, Y1)
check_predictions(LogisticRegression(C=100, random_state=0), X, Y1)
check_predictions(LogisticRegression(C=100, random_state=0), X_sp, Y1)
check_predictions(LogisticRegression(fit_intercept=False,
random_state=0), X, Y1)
check_predictions(LogisticRegression(fit_intercept=False,
random_state=0), X_sp, Y1)
def test_error():
# Test for appropriate exception on errors
msg = "Penalty term must be positive"
assert_raise_message(ValueError, msg,
LogisticRegression(C=-1).fit, X, Y1)
assert_raise_message(ValueError, msg,
LogisticRegression(C="test").fit, X, Y1)
for LR in [LogisticRegression, LogisticRegressionCV]:
msg = "Tolerance for stopping criteria must be positive"
assert_raise_message(ValueError, msg, LR(tol=-1).fit, X, Y1)
assert_raise_message(ValueError, msg, LR(tol="test").fit, X, Y1)
msg = "Maximum number of iteration must be positive"
assert_raise_message(ValueError, msg, LR(max_iter=-1).fit, X, Y1)
assert_raise_message(ValueError, msg, LR(max_iter="test").fit, X, Y1)
def test_predict_3_classes():
check_predictions(LogisticRegression(C=10), X, Y2)
check_predictions(LogisticRegression(C=10), X_sp, Y2)
def test_predict_iris():
# Test logistic regression with the iris dataset
n_samples, n_features = iris.data.shape
target = iris.target_names[iris.target]
# Test that both multinomial and OvR solvers handle
# multiclass data correctly and give good accuracy
# score (>0.95) for the training data.
for clf in [LogisticRegression(C=len(iris.data)),
LogisticRegression(C=len(iris.data), solver='lbfgs',
multi_class='multinomial'),
LogisticRegression(C=len(iris.data), solver='newton-cg',
multi_class='multinomial')]:
clf.fit(iris.data, target)
assert_array_equal(np.unique(target), clf.classes_)
pred = clf.predict(iris.data)
assert_greater(np.mean(pred == target), .95)
probabilities = clf.predict_proba(iris.data)
assert_array_almost_equal(probabilities.sum(axis=1),
np.ones(n_samples))
pred = iris.target_names[probabilities.argmax(axis=1)]
assert_greater(np.mean(pred == target), .95)
def test_multinomial_validation():
for solver in ['lbfgs', 'newton-cg']:
lr = LogisticRegression(C=-1, solver=solver, multi_class='multinomial')
assert_raises(ValueError, lr.fit, [[0, 1], [1, 0]], [0, 1])
def test_check_solver_option():
X, y = iris.data, iris.target
for LR in [LogisticRegression, LogisticRegressionCV]:
msg = ("Logistic Regression supports only liblinear, newton-cg and"
" lbfgs solvers, got wrong_name")
lr = LR(solver="wrong_name")
assert_raise_message(ValueError, msg, lr.fit, X, y)
msg = "multi_class should be either multinomial or ovr, got wrong_name"
lr = LR(solver='newton-cg', multi_class="wrong_name")
assert_raise_message(ValueError, msg, lr.fit, X, y)
# all solver except 'newton-cg' and 'lfbgs'
for solver in ['liblinear']:
msg = ("Solver %s does not support a multinomial backend." %
solver)
lr = LR(solver=solver, multi_class='multinomial')
assert_raise_message(ValueError, msg, lr.fit, X, y)
# all solvers except 'liblinear'
for solver in ['newton-cg', 'lbfgs']:
msg = ("Solver %s supports only l2 penalties, got l1 penalty." %
solver)
lr = LR(solver=solver, penalty='l1')
assert_raise_message(ValueError, msg, lr.fit, X, y)
msg = ("Solver %s supports only dual=False, got dual=True" %
solver)
lr = LR(solver=solver, dual=True)
assert_raise_message(ValueError, msg, lr.fit, X, y)
def test_multinomial_binary():
# Test multinomial LR on a binary problem.
target = (iris.target > 0).astype(np.intp)
target = np.array(["setosa", "not-setosa"])[target]
for solver in ['lbfgs', 'newton-cg']:
clf = LogisticRegression(solver=solver, multi_class='multinomial')
clf.fit(iris.data, target)
assert_equal(clf.coef_.shape, (1, iris.data.shape[1]))
assert_equal(clf.intercept_.shape, (1,))
assert_array_equal(clf.predict(iris.data), target)
mlr = LogisticRegression(solver=solver, multi_class='multinomial',
fit_intercept=False)
mlr.fit(iris.data, target)
pred = clf.classes_[np.argmax(clf.predict_log_proba(iris.data),
axis=1)]
assert_greater(np.mean(pred == target), .9)
def test_sparsify():
# Test sparsify and densify members.
n_samples, n_features = iris.data.shape
target = iris.target_names[iris.target]
clf = LogisticRegression(random_state=0).fit(iris.data, target)
pred_d_d = clf.decision_function(iris.data)
clf.sparsify()
assert_true(sp.issparse(clf.coef_))
pred_s_d = clf.decision_function(iris.data)
sp_data = sp.coo_matrix(iris.data)
pred_s_s = clf.decision_function(sp_data)
clf.densify()
pred_d_s = clf.decision_function(sp_data)
assert_array_almost_equal(pred_d_d, pred_s_d)
assert_array_almost_equal(pred_d_d, pred_s_s)
assert_array_almost_equal(pred_d_d, pred_d_s)
def test_inconsistent_input():
# Test that an exception is raised on inconsistent input
rng = np.random.RandomState(0)
X_ = rng.random_sample((5, 10))
y_ = np.ones(X_.shape[0])
y_[0] = 0
clf = LogisticRegression(random_state=0)
# Wrong dimensions for training data
y_wrong = y_[:-1]
assert_raises(ValueError, clf.fit, X, y_wrong)
# Wrong dimensions for test data
assert_raises(ValueError, clf.fit(X_, y_).predict,
rng.random_sample((3, 12)))
def test_write_parameters():
# Test that we can write to coef_ and intercept_
clf = LogisticRegression(random_state=0)
clf.fit(X, Y1)
clf.coef_[:] = 0
clf.intercept_[:] = 0
assert_array_almost_equal(clf.decision_function(X), 0)
@raises(ValueError)
def test_nan():
# Test proper NaN handling.
# Regression test for Issue #252: fit used to go into an infinite loop.
Xnan = np.array(X, dtype=np.float64)
Xnan[0, 1] = np.nan
LogisticRegression(random_state=0).fit(Xnan, Y1)
def test_consistency_path():
# Test that the path algorithm is consistent
rng = np.random.RandomState(0)
X = np.concatenate((rng.randn(100, 2) + [1, 1], rng.randn(100, 2)))
y = [1] * 100 + [-1] * 100
Cs = np.logspace(0, 4, 10)
f = ignore_warnings
# can't test with fit_intercept=True since LIBLINEAR
# penalizes the intercept
for method in ('lbfgs', 'newton-cg', 'liblinear'):
coefs, Cs = f(logistic_regression_path)(
X, y, Cs=Cs, fit_intercept=False, tol=1e-16, solver=method)
for i, C in enumerate(Cs):
lr = LogisticRegression(C=C, fit_intercept=False, tol=1e-16)
lr.fit(X, y)
lr_coef = lr.coef_.ravel()
assert_array_almost_equal(lr_coef, coefs[i], decimal=4)
# test for fit_intercept=True
for method in ('lbfgs', 'newton-cg', 'liblinear'):
Cs = [1e3]
coefs, Cs = f(logistic_regression_path)(
X, y, Cs=Cs, fit_intercept=True, tol=1e-4, solver=method)
lr = LogisticRegression(C=Cs[0], fit_intercept=True, tol=1e-4,
intercept_scaling=10000)
lr.fit(X, y)
lr_coef = np.concatenate([lr.coef_.ravel(), lr.intercept_])
assert_array_almost_equal(lr_coef, coefs[0], decimal=4)
def test_liblinear_dual_random_state():
# random_state is relevant for liblinear solver only if dual=True
X, y = make_classification(n_samples=20)
lr1 = LogisticRegression(random_state=0, dual=True, max_iter=1, tol=1e-15)
lr1.fit(X, y)
lr2 = LogisticRegression(random_state=0, dual=True, max_iter=1, tol=1e-15)
lr2.fit(X, y)
lr3 = LogisticRegression(random_state=8, dual=True, max_iter=1, tol=1e-15)
lr3.fit(X, y)
# same result for same random state
assert_array_almost_equal(lr1.coef_, lr2.coef_)
# different results for different random states
msg = "Arrays are not almost equal to 6 decimals"
assert_raise_message(AssertionError, msg,
assert_array_almost_equal, lr1.coef_, lr3.coef_)
def test_logistic_loss_and_grad():
X_ref, y = make_classification(n_samples=20)
n_features = X_ref.shape[1]
X_sp = X_ref.copy()
X_sp[X_sp < .1] = 0
X_sp = sp.csr_matrix(X_sp)
for X in (X_ref, X_sp):
w = np.zeros(n_features)
# First check that our derivation of the grad is correct
loss, grad = _logistic_loss_and_grad(w, X, y, alpha=1.)
approx_grad = optimize.approx_fprime(
w, lambda w: _logistic_loss_and_grad(w, X, y, alpha=1.)[0], 1e-3
)
assert_array_almost_equal(grad, approx_grad, decimal=2)
# Second check that our intercept implementation is good
w = np.zeros(n_features + 1)
loss_interp, grad_interp = _logistic_loss_and_grad(
w, X, y, alpha=1.
)
assert_array_almost_equal(loss, loss_interp)
approx_grad = optimize.approx_fprime(
w, lambda w: _logistic_loss_and_grad(w, X, y, alpha=1.)[0], 1e-3
)
assert_array_almost_equal(grad_interp, approx_grad, decimal=2)
def test_logistic_grad_hess():
rng = np.random.RandomState(0)
n_samples, n_features = 50, 5
X_ref = rng.randn(n_samples, n_features)
y = np.sign(X_ref.dot(5 * rng.randn(n_features)))
X_ref -= X_ref.mean()
X_ref /= X_ref.std()
X_sp = X_ref.copy()
X_sp[X_sp < .1] = 0
X_sp = sp.csr_matrix(X_sp)
for X in (X_ref, X_sp):
w = .1 * np.ones(n_features)
# First check that _logistic_grad_hess is consistent
# with _logistic_loss_and_grad
loss, grad = _logistic_loss_and_grad(w, X, y, alpha=1.)
grad_2, hess = _logistic_grad_hess(w, X, y, alpha=1.)
assert_array_almost_equal(grad, grad_2)
# Now check our hessian along the second direction of the grad
vector = np.zeros_like(grad)
vector[1] = 1
hess_col = hess(vector)
# Computation of the Hessian is particularly fragile to numerical
# errors when doing simple finite differences. Here we compute the
# grad along a path in the direction of the vector and then use a
# least-square regression to estimate the slope
e = 1e-3
d_x = np.linspace(-e, e, 30)
d_grad = np.array([
_logistic_loss_and_grad(w + t * vector, X, y, alpha=1.)[1]
for t in d_x
])
d_grad -= d_grad.mean(axis=0)
approx_hess_col = linalg.lstsq(d_x[:, np.newaxis], d_grad)[0].ravel()
assert_array_almost_equal(approx_hess_col, hess_col, decimal=3)
# Second check that our intercept implementation is good
w = np.zeros(n_features + 1)
loss_interp, grad_interp = _logistic_loss_and_grad(w, X, y, alpha=1.)
loss_interp_2 = _logistic_loss(w, X, y, alpha=1.)
grad_interp_2, hess = _logistic_grad_hess(w, X, y, alpha=1.)
assert_array_almost_equal(loss_interp, loss_interp_2)
assert_array_almost_equal(grad_interp, grad_interp_2)
def test_logistic_cv():
# test for LogisticRegressionCV object
n_samples, n_features = 50, 5
rng = np.random.RandomState(0)
X_ref = rng.randn(n_samples, n_features)
y = np.sign(X_ref.dot(5 * rng.randn(n_features)))
X_ref -= X_ref.mean()
X_ref /= X_ref.std()
lr_cv = LogisticRegressionCV(Cs=[1.], fit_intercept=False,
solver='liblinear')
lr_cv.fit(X_ref, y)
lr = LogisticRegression(C=1., fit_intercept=False)
lr.fit(X_ref, y)
assert_array_almost_equal(lr.coef_, lr_cv.coef_)
assert_array_equal(lr_cv.coef_.shape, (1, n_features))
assert_array_equal(lr_cv.classes_, [-1, 1])
assert_equal(len(lr_cv.classes_), 2)
coefs_paths = np.asarray(list(lr_cv.coefs_paths_.values()))
assert_array_equal(coefs_paths.shape, (1, 3, 1, n_features))
assert_array_equal(lr_cv.Cs_.shape, (1, ))
scores = np.asarray(list(lr_cv.scores_.values()))
assert_array_equal(scores.shape, (1, 3, 1))
def test_logistic_cv_sparse():
X, y = make_classification(n_samples=50, n_features=5,
random_state=0)
X[X < 1.0] = 0.0
csr = sp.csr_matrix(X)
clf = LogisticRegressionCV(fit_intercept=True)
clf.fit(X, y)
clfs = LogisticRegressionCV(fit_intercept=True)
clfs.fit(csr, y)
assert_array_almost_equal(clfs.coef_, clf.coef_)
assert_array_almost_equal(clfs.intercept_, clf.intercept_)
assert_equal(clfs.C_, clf.C_)
def test_intercept_logistic_helper():
n_samples, n_features = 10, 5
X, y = make_classification(n_samples=n_samples, n_features=n_features,
random_state=0)
# Fit intercept case.
alpha = 1.
w = np.ones(n_features + 1)
grad_interp, hess_interp = _logistic_grad_hess(w, X, y, alpha)
loss_interp = _logistic_loss(w, X, y, alpha)
# Do not fit intercept. This can be considered equivalent to adding
# a feature vector of ones, i.e column of one vectors.
X_ = np.hstack((X, np.ones(10)[:, np.newaxis]))
grad, hess = _logistic_grad_hess(w, X_, y, alpha)
loss = _logistic_loss(w, X_, y, alpha)
# In the fit_intercept=False case, the feature vector of ones is
# penalized. This should be taken care of.
assert_almost_equal(loss_interp + 0.5 * (w[-1] ** 2), loss)
# Check gradient.
assert_array_almost_equal(grad_interp[:n_features], grad[:n_features])
assert_almost_equal(grad_interp[-1] + alpha * w[-1], grad[-1])
rng = np.random.RandomState(0)
grad = rng.rand(n_features + 1)
hess_interp = hess_interp(grad)
hess = hess(grad)
assert_array_almost_equal(hess_interp[:n_features], hess[:n_features])
assert_almost_equal(hess_interp[-1] + alpha * grad[-1], hess[-1])
def test_ovr_multinomial_iris():
# Test that OvR and multinomial are correct using the iris dataset.
train, target = iris.data, iris.target
n_samples, n_features = train.shape
# Use pre-defined fold as folds generated for different y
cv = StratifiedKFold(target, 3)
clf = LogisticRegressionCV(cv=cv)
clf.fit(train, target)
clf1 = LogisticRegressionCV(cv=cv)
target_copy = target.copy()
target_copy[target_copy == 0] = 1
clf1.fit(train, target_copy)
assert_array_almost_equal(clf.scores_[2], clf1.scores_[2])
assert_array_almost_equal(clf.intercept_[2:], clf1.intercept_)
assert_array_almost_equal(clf.coef_[2][np.newaxis, :], clf1.coef_)
# Test the shape of various attributes.
assert_equal(clf.coef_.shape, (3, n_features))
assert_array_equal(clf.classes_, [0, 1, 2])
coefs_paths = np.asarray(list(clf.coefs_paths_.values()))
assert_array_almost_equal(coefs_paths.shape, (3, 3, 10, n_features + 1))
assert_equal(clf.Cs_.shape, (10, ))
scores = np.asarray(list(clf.scores_.values()))
assert_equal(scores.shape, (3, 3, 10))
# Test that for the iris data multinomial gives a better accuracy than OvR
for solver in ['lbfgs', 'newton-cg']:
clf_multi = LogisticRegressionCV(
solver=solver, multi_class='multinomial', max_iter=15
)
clf_multi.fit(train, target)
multi_score = clf_multi.score(train, target)
ovr_score = clf.score(train, target)
assert_greater(multi_score, ovr_score)
# Test attributes of LogisticRegressionCV
assert_equal(clf.coef_.shape, clf_multi.coef_.shape)
assert_array_equal(clf_multi.classes_, [0, 1, 2])
coefs_paths = np.asarray(list(clf_multi.coefs_paths_.values()))
assert_array_almost_equal(coefs_paths.shape, (3, 3, 10,
n_features + 1))
assert_equal(clf_multi.Cs_.shape, (10, ))
scores = np.asarray(list(clf_multi.scores_.values()))
assert_equal(scores.shape, (3, 3, 10))
def test_logistic_regression_solvers():
X, y = make_classification(n_features=10, n_informative=5, random_state=0)
clf_n = LogisticRegression(solver='newton-cg', fit_intercept=False)
clf_n.fit(X, y)
clf_lbf = LogisticRegression(solver='lbfgs', fit_intercept=False)
clf_lbf.fit(X, y)
clf_lib = LogisticRegression(fit_intercept=False)
clf_lib.fit(X, y)
assert_array_almost_equal(clf_n.coef_, clf_lib.coef_, decimal=3)
assert_array_almost_equal(clf_lib.coef_, clf_lbf.coef_, decimal=3)
assert_array_almost_equal(clf_n.coef_, clf_lbf.coef_, decimal=3)
def test_logistic_regression_solvers_multiclass():
X, y = make_classification(n_samples=20, n_features=20, n_informative=10,
n_classes=3, random_state=0)
clf_n = LogisticRegression(solver='newton-cg', fit_intercept=False)
clf_n.fit(X, y)
clf_lbf = LogisticRegression(solver='lbfgs', fit_intercept=False)
clf_lbf.fit(X, y)
clf_lib = LogisticRegression(fit_intercept=False)
clf_lib.fit(X, y)
assert_array_almost_equal(clf_n.coef_, clf_lib.coef_, decimal=4)
assert_array_almost_equal(clf_lib.coef_, clf_lbf.coef_, decimal=4)
assert_array_almost_equal(clf_n.coef_, clf_lbf.coef_, decimal=4)
def test_logistic_regressioncv_class_weights():
X, y = make_classification(n_samples=20, n_features=20, n_informative=10,
n_classes=3, random_state=0)
# Test the liblinear fails when class_weight of type dict is
# provided, when it is multiclass. However it can handle
# binary problems.
clf_lib = LogisticRegressionCV(class_weight={0: 0.1, 1: 0.2},
solver='liblinear')
assert_raises(ValueError, clf_lib.fit, X, y)
y_ = y.copy()
y_[y == 2] = 1
clf_lib.fit(X, y_)
assert_array_equal(clf_lib.classes_, [0, 1])
# Test for class_weight=balanced
X, y = make_classification(n_samples=20, n_features=20, n_informative=10,
random_state=0)
clf_lbf = LogisticRegressionCV(solver='lbfgs', fit_intercept=False,
class_weight='balanced')
clf_lbf.fit(X, y)
clf_lib = LogisticRegressionCV(solver='liblinear', fit_intercept=False,
class_weight='balanced')
clf_lib.fit(X, y)
assert_array_almost_equal(clf_lib.coef_, clf_lbf.coef_, decimal=4)
def test_logistic_regression_convergence_warnings():
# Test that warnings are raised if model does not converge
X, y = make_classification(n_samples=20, n_features=20)
clf_lib = LogisticRegression(solver='liblinear', max_iter=2, verbose=1)
assert_warns(ConvergenceWarning, clf_lib.fit, X, y)
assert_equal(clf_lib.n_iter_, 2)
def test_logistic_regression_multinomial():
# Tests for the multinomial option in logistic regression
# Some basic attributes of Logistic Regression
n_samples, n_features, n_classes = 50, 20, 3
X, y = make_classification(n_samples=n_samples,
n_features=n_features,
n_informative=10,
n_classes=n_classes, random_state=0)
clf_int = LogisticRegression(solver='lbfgs', multi_class='multinomial')
clf_int.fit(X, y)
assert_array_equal(clf_int.coef_.shape, (n_classes, n_features))
clf_wint = LogisticRegression(solver='lbfgs', multi_class='multinomial',
fit_intercept=False)
clf_wint.fit(X, y)
assert_array_equal(clf_wint.coef_.shape, (n_classes, n_features))
# Similar tests for newton-cg solver option
clf_ncg_int = LogisticRegression(solver='newton-cg',
multi_class='multinomial')
clf_ncg_int.fit(X, y)
assert_array_equal(clf_ncg_int.coef_.shape, (n_classes, n_features))
clf_ncg_wint = LogisticRegression(solver='newton-cg', fit_intercept=False,
multi_class='multinomial')
clf_ncg_wint.fit(X, y)
assert_array_equal(clf_ncg_wint.coef_.shape, (n_classes, n_features))
# Compare solutions between lbfgs and newton-cg
assert_almost_equal(clf_int.coef_, clf_ncg_int.coef_, decimal=3)
assert_almost_equal(clf_wint.coef_, clf_ncg_wint.coef_, decimal=3)
assert_almost_equal(clf_int.intercept_, clf_ncg_int.intercept_, decimal=3)
# Test that the path give almost the same results. However since in this
# case we take the average of the coefs after fitting across all the
# folds, it need not be exactly the same.
for solver in ['lbfgs', 'newton-cg']:
clf_path = LogisticRegressionCV(solver=solver,
multi_class='multinomial', Cs=[1.])
clf_path.fit(X, y)
assert_array_almost_equal(clf_path.coef_, clf_int.coef_, decimal=3)
assert_almost_equal(clf_path.intercept_, clf_int.intercept_, decimal=3)
def test_multinomial_grad_hess():
rng = np.random.RandomState(0)
n_samples, n_features, n_classes = 100, 5, 3
X = rng.randn(n_samples, n_features)
w = rng.rand(n_classes, n_features)
Y = np.zeros((n_samples, n_classes))
ind = np.argmax(np.dot(X, w.T), axis=1)
Y[range(0, n_samples), ind] = 1
w = w.ravel()
sample_weights = np.ones(X.shape[0])
grad, hessp = _multinomial_grad_hess(w, X, Y, alpha=1.,
sample_weight=sample_weights)
# extract first column of hessian matrix
vec = np.zeros(n_features * n_classes)
vec[0] = 1
hess_col = hessp(vec)
# Estimate hessian using least squares as done in
# test_logistic_grad_hess
e = 1e-3
d_x = np.linspace(-e, e, 30)
d_grad = np.array([
_multinomial_grad_hess(w + t * vec, X, Y, alpha=1.,
sample_weight=sample_weights)[0]
for t in d_x
])
d_grad -= d_grad.mean(axis=0)
approx_hess_col = linalg.lstsq(d_x[:, np.newaxis], d_grad)[0].ravel()
assert_array_almost_equal(hess_col, approx_hess_col)
def test_liblinear_decision_function_zero():
# Test negative prediction when decision_function values are zero.
# Liblinear predicts the positive class when decision_function values
# are zero. This is a test to verify that we do not do the same.
# See Issue: https://github.com/scikit-learn/scikit-learn/issues/3600
# and the PR https://github.com/scikit-learn/scikit-learn/pull/3623
X, y = make_classification(n_samples=5, n_features=5)
clf = LogisticRegression(fit_intercept=False)
clf.fit(X, y)
# Dummy data such that the decision function becomes zero.
X = np.zeros((5, 5))
assert_array_equal(clf.predict(X), np.zeros(5))
def test_liblinear_logregcv_sparse():
# Test LogRegCV with solver='liblinear' works for sparse matrices
X, y = make_classification(n_samples=10, n_features=5)
clf = LogisticRegressionCV(solver='liblinear')
clf.fit(sparse.csr_matrix(X), y)
def test_logreg_intercept_scaling():
# Test that the right error message is thrown when intercept_scaling <= 0
for i in [-1, 0]:
clf = LogisticRegression(intercept_scaling=i)
msg = ('Intercept scaling is %r but needs to be greater than 0.'
' To disable fitting an intercept,'
' set fit_intercept=False.' % clf.intercept_scaling)
assert_raise_message(ValueError, msg, clf.fit, X, Y1)
def test_logreg_intercept_scaling_zero():
# Test that intercept_scaling is ignored when fit_intercept is False
clf = LogisticRegression(fit_intercept=False)
clf.fit(X, Y1)
assert_equal(clf.intercept_, 0.)
def test_logreg_cv_penalty():
# Test that the correct penalty is passed to the final fit.
X, y = make_classification(n_samples=50, n_features=20, random_state=0)
lr_cv = LogisticRegressionCV(penalty="l1", Cs=[1.0], solver='liblinear')
lr_cv.fit(X, y)
lr = LogisticRegression(penalty="l1", C=1.0, solver='liblinear')
lr.fit(X, y)
assert_equal(np.count_nonzero(lr_cv.coef_), np.count_nonzero(lr.coef_))
|
bsd-3-clause
|
heli522/scikit-learn
|
sklearn/utils/tests/test_murmurhash.py
|
261
|
2836
|
# Author: Olivier Grisel <[email protected]>
#
# License: BSD 3 clause
import numpy as np
from sklearn.externals.six import b, u
from sklearn.utils.murmurhash import murmurhash3_32
from numpy.testing import assert_array_almost_equal
from numpy.testing import assert_array_equal
from nose.tools import assert_equal, assert_true
def test_mmhash3_int():
assert_equal(murmurhash3_32(3), 847579505)
assert_equal(murmurhash3_32(3, seed=0), 847579505)
assert_equal(murmurhash3_32(3, seed=42), -1823081949)
assert_equal(murmurhash3_32(3, positive=False), 847579505)
assert_equal(murmurhash3_32(3, seed=0, positive=False), 847579505)
assert_equal(murmurhash3_32(3, seed=42, positive=False), -1823081949)
assert_equal(murmurhash3_32(3, positive=True), 847579505)
assert_equal(murmurhash3_32(3, seed=0, positive=True), 847579505)
assert_equal(murmurhash3_32(3, seed=42, positive=True), 2471885347)
def test_mmhash3_int_array():
rng = np.random.RandomState(42)
keys = rng.randint(-5342534, 345345, size=3 * 2 * 1).astype(np.int32)
keys = keys.reshape((3, 2, 1))
for seed in [0, 42]:
expected = np.array([murmurhash3_32(int(k), seed)
for k in keys.flat])
expected = expected.reshape(keys.shape)
assert_array_equal(murmurhash3_32(keys, seed), expected)
for seed in [0, 42]:
expected = np.array([murmurhash3_32(k, seed, positive=True)
for k in keys.flat])
expected = expected.reshape(keys.shape)
assert_array_equal(murmurhash3_32(keys, seed, positive=True),
expected)
def test_mmhash3_bytes():
assert_equal(murmurhash3_32(b('foo'), 0), -156908512)
assert_equal(murmurhash3_32(b('foo'), 42), -1322301282)
assert_equal(murmurhash3_32(b('foo'), 0, positive=True), 4138058784)
assert_equal(murmurhash3_32(b('foo'), 42, positive=True), 2972666014)
def test_mmhash3_unicode():
assert_equal(murmurhash3_32(u('foo'), 0), -156908512)
assert_equal(murmurhash3_32(u('foo'), 42), -1322301282)
assert_equal(murmurhash3_32(u('foo'), 0, positive=True), 4138058784)
assert_equal(murmurhash3_32(u('foo'), 42, positive=True), 2972666014)
def test_no_collision_on_byte_range():
previous_hashes = set()
for i in range(100):
h = murmurhash3_32(' ' * i, 0)
assert_true(h not in previous_hashes,
"Found collision on growing empty string")
def test_uniform_distribution():
n_bins, n_samples = 10, 100000
bins = np.zeros(n_bins, dtype=np.float)
for i in range(n_samples):
bins[murmurhash3_32(i, positive=True) % n_bins] += 1
means = bins / n_samples
expected = np.ones(n_bins) / n_bins
assert_array_almost_equal(means / expected, np.ones(n_bins), 2)
|
bsd-3-clause
|
m4rx9/rna-pdb-tools
|
rna_tools/Seq.py
|
1
|
27464
|
#!/usr/bin/env python3
#-*- coding: utf-8 -*-
"""RNA Sequence with secondary structure prediction methods.
This tool takes a given sequence and returns the secondary structure prediction provided by 5 different tools: RNAfold, RNAsubopt, ipknot, contextfold and centroid_fold. You must have these tools installed. You don't have to install all tools if you want to use only one of the methods.
It's easy to add more methods of your choince to this class.
Installation
~~~~~~~~~~~~~
Depends on what tools you want to use, follow the instructions below.
ContextFold
^^^^^^^^^^^^^^^^^^^^^
https://www.cs.bgu.ac.il/~negevcb/contextfold/
needs Java. Try this on Ubuntu 14-04 https://askubuntu.com/questions/521145/how-to-install-oracle-java-on-ubuntu-14-04 Single chain only!
ViennaRNA
^^^^^^^^^^^^^^
https://www.tbi.univie.ac.at/RNA/
For OSX install from the binary Installer from the page.
ipknot OSX
^^^^^^^^^^^^^
https://github.com/satoken/homebrew-rnatools
If one encounters a problem::
[mm] Desktop$ /usr/local/opt/bin/ipknot
dyld: Library not loaded: /usr/local/opt/glpk/lib/libglpk.40.dylib
Referenced from: /usr/local/opt/bin/ipknot
Reason: image not found
[1] 51654 abort /usr/local/opt/bin/ipknot
the solution is::
brew install glpk # on OSX
RNA Structure
^^^^^^^^^^^^^
http://rna.urmc.rochester.edu/
Works with 5.8.1; Jun 16, 2016.
Download http://rna.urmc.rochester.edu/RNAstructureDownload.html and untar it in ``<RNA_PDB_TOOLS>/opt/RNAstructure/``; run make, the tools will be compiled in a folder ``exe``. Set up ``DATPATH`` in your bashrc to ``<RNA_PDB_TOOLS>/opt/RNAstructure/data_tables`` ``DATAPATH=/home/magnus/work/src/rna-pdb-tools/opt/RNAstructure/data_tables/`` (read more http://rna.urmc.rochester.edu/Text/Thermodynamics.html). RNAstructure can be run with SHAPE restraints, read more http://rna.urmc.rochester.edu/Text/File_Formats.html#Constraint about the format. The file format for SHAPE reactivity comprises two columns. The first column is the nucleotide number, and the second is the reactivity. Nucleotides for which there is no SHAPE data can either be left out of the file, or the reactivity can be entered as less than -500. Columns are separated by any white space.
MC-Sym
^^^^^^^^^^^^^
FAQ
~~~~~~~~~~~~~
- Does it work for more than one chain??? Hmm.. I think it's not. You have to check on your own. --magnus
TIPS
~~~~~~~~~~~~~
Should you need to run it on a list of sequences, use the following script::
from rna_tools import Seq
f = open("listOfSequences.fasta")
for line in f:
if line.startswith('>'):
print line,
else:
print line,
s = Seq.Seq(line.strip()) # module first Seq and class second Seq #without strip this has two lines
print s.predict_ss(method="contextfold"),
#print s.predict_ss(method="centroid_fold")
TODO
~~~~~~~~~~~~~
- This calss should be renamed to RNASeq and merged with RNASeq class from RNAalignment
""" # noqa
import os
import subprocess
import tempfile
import sys
from rna_tools.SecondaryStructure import parse_vienna_to_pairs
from rna_tools.rna_tools_config import CONTEXTFOLD_PATH, RNASTRUCTURE_PATH, ENTRNA_PATH, IPKNOT_PATH
class MethodNotChosen(Exception):
pass
class RNASequence(object):
"""RNASequence.
Usage::
>>> seq = RNASequence("CCCCUUUUGGGG")
>>> seq.name = 'RNA03'
>>> print(seq.predict_ss("RNAfold", constraints="((((....))))"))
>RNA03
CCCCUUUUGGGG
((((....)))) ( -6.40)
"""
def __init__(self, seq, ss='', name='rna_seq'):
self.seq = seq
self.ss = ss
self.ss_log = ''
self.name = name
def __repr__(self):
return self.name + '\n' + self.seq + '\n' + self.ss
def eval(self, ss='', no_dangling_end_energies=False, verbose=False):
"""Evaluate energy of RNA sequence.
Args:
ss (optional), if not set, then self.ss is taken for calc
no_dangling_end_energies (Boolean)
verbose (Boolean)
Returns:
Energy (float)
The RNAeval web server calculates the energy of a RNA sequence on a given secondary structure.
You can use it to get a detailed thermodynamic description (loop free-energy decomposition) of your RNA structures.
Simply paste or upload your sequence below and click Proceed. To get more information on the meaning of the options click the help symbols. You can test the server using this sample sequence/structure pair.
An equivalent RNAeval command line call would have been::
RNAeval -v -d0 < input.txt
Read more: <http://rna.tbi.univie.ac.at//cgi-bin/RNAWebSuite/RNAeval.cgi>
"""
tf = tempfile.NamedTemporaryFile(delete=False)
if not ss:
ss = self.ss
tf.name += '.fa'
with open(tf.name, 'w') as f:
f.write('>' + self.name + '\n')
f.write(self.seq + '\n')
f.write(ss + '\n')
dopt = ' -d2 '
if no_dangling_end_energies:
dopt = ' -d0 '
cmd = 'RNAeval ' + dopt + ' < ' + tf.name
if verbose:
print(cmd)
self.ss_log = subprocess.check_output(cmd, shell=True).decode()
# [u'>rna_seq\nGGCAGGGGCGCUUCGGCCCCCUAUGCC\n((((((((.((....)).)))).))))', u'(-13.50)']
return float(self.ss_log.strip().split(' ')[-1].replace('(','').replace(')', ''))
def get_foldability(self, ss='', verbose=False):
"""Calculate foldability based on EntRNA.
Steps:
- parse SS into basepairs,
- calculate foldabilty
Configuration:
- Set ENTRNA_PATH to the folder where ENTRNA_predict.py is.
Cmd wrapper in here::
python ENTRNA_predict.py --seq_file pseudoknotted_seq.txt --str_file pseudoknotted_str.txt
Su, C., Weir, J. D., Zhang, F., Yan, H., & Wu, T. (2019).
ENTRNA: a framework to predict RNA foldability. BMC Bioinformatics, 20(1), 1–11.
http://doi.org/10.1186/s12859-019-2948-5
"""
if ss:
self.ss = ss
# parse SS into base-pairs
def dp_to_bp(dp):
import numpy as np
a_list = []
bp_array = np.zeros(len(dp),dtype = int)
for i in range(len(dp)):
if dp[i] == "(":
a_list.append(i)
if dp[i] == ")":
bp_array[i] = a_list[-1] + 1
bp_array[a_list[-1]] = i + 1
a_list.pop()
return list(bp_array)
bp = dp_to_bp(self.ss)
if verbose: print(bp)
tempstr = tempfile.NamedTemporaryFile(delete=False)
with open(tempstr.name, 'w') as f:
f.write(str(bp))
tempseq = tempfile.NamedTemporaryFile(delete=False)
with open(tempseq.name, 'w') as f:
f.write(self.seq)
# -W to silent warnings See [1]
cmd = "cd " + ENTRNA_PATH + " && python -W ignore ENTRNA_predict.py --seq_file " + tempseq.name + " --str_file " + tempstr.name
log = subprocess.check_output(cmd, shell=True).decode()
if verbose:
print(cmd)
print(log)
for l in log.split('\n'):
if l.startswith('Foldability: '):
return round(float(l.replace('Foldability: ', '')), 2)
return -1
## [1]:
## /Users/magnus/work/evoClustRNA/rna-foldability/ENTRNA/util/pseudoknot_free.py:22: SettingWithCopyWarning:
## A value is trying to be set on a copy of a slice from a DataFrame.
## Try using .loc[row_indexer,col_indexer] = value instead
## See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
## df_v1['length'] = df_v1['seq'].apply(lambda x:len(x))
## /home/magnus/miniconda2/lib/python2.7/site-packages/sklearn/linear_model/logistic.py:433: FutureWarning: Default solver will be changed to 'lbfgs' in 0.22. Specify a solver to silence this warning.
## FutureWarning)
## cd /Users/magnus/work/evoClustRNA/rna-foldability/ENTRNA/ && python ENTRNA_predict.py --seq_file /var/folders/yc/ssr9692s5fzf7k165grnhpk80000gp/T/tmpUORegp --str_file /var/folders/yc/ssr9692s5fzf7k165grnhpk80000gp/T/tmp1ERCcD
def predict_ss(self, method="RNAfold", constraints='', enforce_constraint=False, shapefn='', explore='', verbose=0, path=''):
"""Predict secondary structure of the seq.
Args:
method:
onstraints:
shapefn (str): path to a file with shape reactivites
verbose (boolean)
It creates a seq fasta file and runs various methods for secondary structure
prediction. You can provide also a constraints file for RNAfold and RNAsubopt.
Methods that can be used with contraints: RNAsubopt, RNAfold, mcfold.
Methods that can be used with SHAPE contraints: RNAfold.
**ContextFold**
Example::
$ java -cp bin contextFold.app.Predict in:CCCCUUUGGGGG
CCCCUUUGGGGG
((((....))))
It seems that a seq has to be longer than 9. Otherwise::
$ java -cp bin contextFold.app.Predict in:UUUUUUGGG
Exception in thread "main" java.lang.ArrayIndexOutOfBoundsException: 10
# this is OK
$ java -cp bin contextFold.app.Predict in:CCCCUUUGGG
CCCCUUUGGG
.(((...)))
**RNAstructure**
Example::
>>> seq = RNASequence("GGGGUUUUCCC")
>>> print(seq.predict_ss("rnastructure"))
> ENERGY = -4.4 rna_seq
GGGGUUUUCCC
((((...))))
and with the shape data::
>>> print(seq.predict_ss("rnastructure", shapefn="data/shape.txt"))
> ENERGY = -0.2 rna_seq
GGGGUUUUCCC
.(((....)))
the shape data::
1 10
2 1
3 1
You can easily see that the first G is unpaired right now! The reactivity of this G was
set to 10. Worked!
**MC-Fold**
MC-Fold uses the online version of the tool, this is very powerful with constraints::
rna_seq
acucggcuaggcgaguauaaauagccgucaggccuagcgcguccaagccuagccccuucuggggcugggcgaagggucggg
((((........)))).......((((..............(((((((((((((((....)))))))))))))))..))))
curl -Y 0 -y 300 -F "pass=lucy" -F mask="((((........)))).......((((..............(((((((((((((((....)))))))))))))))..))))" -F sequence="acucggcuaggcgaguauaaauagccgucaggccuagcgcguccaagccuagccccuucuggggcugggcgaagggucggg" https://www.major.iric.ca/cgi-bin/MC-Fold/mcfold.static.cgi
mcfold::energy best dynamics programming: -53.91
(-53.91, '((((........)))).......((((..............(((((((((((((((....)))))))))))))))..))))')
curl -Y 0 -y 300 -F "pass=lucy" -F mask="((((........)))).......((((..............((((((((((..............))))))))))..))))" -F sequence="acucggcuaggcgaguauaaauagccgucaggccuagcgcguccaagccuagccccuucuggggcugggcgaagggucggg" https://www.major.iric.ca/cgi-bin/MC-Fold/mcfold.static.cgi
mcfold::energy best dynamics programming: -34.77
(-34.77, '((((........)))).......((((..............((((((((((..............))))))))))..))))')
acucggcuaggcgaguauaaauagccgucaggccuagcgcguccaagccuagccccuucuggggcugggcgaagggucggg
((((........)))).......((((..............(((((((((((((((....)))))))))))))))..))))
curl -Y 0 -y 300 -F "pass=lucy" -F mask="((((xxxxxxxx))))xxxxxxx((((xxxxxxxxxxxxxx((((((((((xxxxxxxxxxxxxx))))))))))xx))))" -F sequence="acucggcuaggcgaguauaaauagccgucaggccuagcgcguccaagccuagccccuucuggggcugggcgaagggucggg" https://www.major.iric.ca/cgi-bin/MC-Fold/mcfold.static.cgi
mcfold::energy best dynamics programming: -34.77
(-34.77, '((((........)))).......((((..............((((((((((..............))))))))))..))))')
acucggcuaggcgaguauaaauagccgucaggccuagcgcguccaagccuagccccuucuggggcugggcgaagggucggg
((((........)))).......((((..............(((((((((((((((....)))))))))))))))..))))
curl -Y 0 -y 300 -F "pass=lucy" -F mask="((((********))))*******((((**************((((((((((**************))))))))))**))))" -F sequence="acucggcuaggcgaguauaaauagccgucaggccuagcgcguccaagccuagccccuucuggggcugggcgaagggucggg" https://www.major.iric.ca/cgi-bin/MC-Fold/mcfold.static.cgi
mcfold::energy best dynamics programming: -77.30
(-71.12, '(((((((..))))))).......((((((.(((...)))..(((((((((((((((....)))))))))))))))))))))')
acucggcuaggcgaguauaaauagccgucaggccuagcgcguccaagccuagccccuucuggggcugggcgaagggucggg
((((........)))).......((((..............(((((((((((((((....)))))))))))))))..))))
curl -Y 0 -y 300 -F "pass=lucy" -F mask="((((**[[[[[**))))*******((((****]]]]]****(((((((((((((((****)))))))))))))))**))))" -F sequence="acucggcuaggcgaguauaaauagccgucaggccuagcgcguccaagccuagccccuucuggggcugggcgaagggucggg" https://www.major.iric.ca/cgi-bin/MC-Fold/mcfold.static.cgi
mcfold::energy best dynamics programming: -77.30
('-77.30', '((((**[[[[[**))))*******((((****]]]]]****(((((((((((((((****)))))))))))))))**))))')
**explore**
The sub-optimal search space can be constrained within a percentage of the minimum free energy structure, as MC-fold makes use of the Waterman-Byers algorithm [18, 19]. Because the exploration has an exponential time complexity, increasing this value can have a dramatic effect on MC-Fold’s run time.
Parisien, M., & Major, F. (2009). RNA Modeling Using the MC-Fold and MC-Sym Pipeline [Manual] (pp. 1–84).
"""
tf = tempfile.NamedTemporaryFile(delete=False)
tf.name += '.fa'
with open(tf.name, 'w') as f:
f.write('>' + self.name + '\n')
f.write(self.seq + '\n')
if constraints:
f.write(constraints)
# check for seq and constraints
if constraints:
if len(self.seq) != len(constraints):
raise Exception('The seq and constraints should be of the same length: %i %s %i %s' % (len(self.seq), self.seq, len(constraints), constraints))
# run prediction
# rnafold without contraints
if method == "RNAfold" and constraints:
cmd = 'RNAfold -C < ' + tf.name
if verbose:
print(cmd)
self.ss_log = subprocess.check_output(cmd, shell=True).decode()
return '\n'.join(self.ss_log.strip().split('\n')[:])
if method == "RNAfoldX" and constraints:
if enforce_constraint:
cmd = 'RNAfold -p -d2 --noLP -C --enforceConstraint < ' + tf.name
else:
cmd = 'RNAfold -p -d2 --noLP -C < ' + tf.name
if verbose:
print(cmd)
try:
self.ss_log = subprocess.check_output(cmd, shell=True).decode()
except subprocess.CalledProcessError:
print('Error')
return 0, 'error', 0, '', 0, '', 0, 0
if verbose:
print(self.ss_log)
# parse the results
lines = self.ss_log.split('\n')
if 'Supplied structure constraints create empty solution set for sequence' in self.ss_log:
return 0, 'Supplied structure constraints create empty solution set for sequence', 0, '', 0, '', 0, 0
#if not 'frequency of mfe structure' in self.ss_log:
# RNAfold -p -d2 --noLP -C < /var/folders/yc/ssr9692s5fzf7k165grnhpk80000gp/T/tmpGiUoo7.fa
# >rna_seq
# AAAUUAAGGGGAAGCGUUGAGCCGCUACCCAUAUGUGGUUCACUCGGAUAGCGGGGAGCUAAUAGUGAAACCGGCCCUUUAGGGG
# ...((((((((.(((......((((((.((....(((...)))..)).))))))...)))..............))))))))... (-19.80)
# ...{(((((((.(((......((((((.((....(((...)))..)).))))))...)))..............)))))))}... [-21.05]
#...((((((((.(((......((((((.((....(((...)))..)).))))))...)))..............))))))))... {-19.80 d=2.34}
# frequency of mfe structure in ensemble 0.131644; ensemble diversity 3.68
mfess = lines[2].split()[0]
mfe = ' '.join(lines[2].split()[-1:])
mfe = float(mfe.replace('(', '').replace(')', '')) # (-19.80) ->-19.80
efess = lines[3].split()[0] # ensamble free energy
efe = ' '.join(lines[3].split()[-1:])
efe = float(efe.replace('[', '').replace(']', '')) # (-19.80) ->-19.80
cfess = lines[4].split()[0] # ensamble free energy
cfe, d = ' '.join(lines[4].split()[1:]).split('d')
cfe = float(cfe.replace('{', '').replace('}', '')) # (-19.80) ->-19.80
words = lines[5].split() # ensamble free energy
freq = round(float(words[6].replace(';', '')), 2) # frequency of mfe structure in ensemble
diversity = float(words[9]) # ensemble diversity
if verbose:
print(mfe, mfess, efe, efess, cfe, cfess, freq, diversity)
return mfe, mfess, efe, efess, cfe, cfess, freq, diversity
elif method == "RNAfold":
cmd = 'RNAfold < ' + tf.name
if verbose:
print(cmd)
self.ss_log = subprocess.check_output(cmd, shell=True).decode()
return '\n'.join(self.ss_log.strip().split('\n')[:])
elif method == "RNAsubopt" and constraints:
cmd = 'RNAsubopt -C < ' + tf.name
if verbose:
print(cmd)
self.ss_log = subprocess.check_output(cmd, shell=True).decode()
return '\n'.join(self.ss_log.split('\n')[:])
elif method == "RNAsubopt":
cmd = 'RNAsubopt < ' + tf.name
if verbose:
print(cmd)
self.ss_log = subprocess.check_output(cmd, shell=True).decode()
return '\n'.join(self.ss_log.split('\n')[:])
elif method == "mcfold":
# -F tope=1
if explore:
explore_str = " -F explore=" + str(explore)
else:
explore_str = ''
#if constraints:
#cmd = "curl -Y 0 -y 300 -F \"pass=lucy\" -F mask=\"" + constraints + "\" " + explore_str + \
#" -F sequence=\"" + self.seq + "\" https://www.major.iric.ca/cgi-bin/MC-Fold/mcfold.static.cgi"
cmd = "curl https://www.major.iric.ca/cgi-bin/MC-Fold/mcfold.static.cgi\?pass\=lucy\&sequence\=" + self.seq + "\&top\=20\&explore\=15\&name\=\&mask\='" + constraints + "'\&singlehigh\=\&singlemed\=\&singlelow\="
# cmd = "curl -Y 0 -y 300 -F \"pass=lucy\" -F sequence=\"" + self.seq + "\" https://www.major.iric.ca/cgi-bin/MC-Fold/mcfold.static.cgi"
if verbose:
print(cmd)
o = subprocess.Popen(cmd, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
out = o.stdout.read().decode(errors='ignore').strip()
err = o.stderr.read().decode(errors='ignore').strip()
if verbose:
print(out)
# If the structure can't be find, detect this statement and finish this routine.
if 'Explored 0 structures' in out:
return 0.00, '', 'Explored 0 structures'
comment = ''
energy = ''
out = out.split('\n')
for l in out :
# first you will find the best dynamic energy, and in the next loop
# it will be used to search for lines with this energy and secondary
# structure
# (((..))) -5.43
if energy: # if energy is set
if energy in l:
if verbose: print(l)
ss = l.split()[0]
# Performing Dynamic Programming...
# Best Dynamic Programming Solution has Energy: -5.43
if l.startswith('Best Dynamic Programming Solution has Energy:'):
energy_bdp = l.split(':')[1].strip()
if verbose:
print ('mcfold::energy best dynamics programming: ' + energy_bdp)
comment = 'energy best dynamics programming'
ss = constraints
# return float(energy), constraints # I'm not sure if this is good
# Ok, for whatever reason Best DP energy might not be exactly the same as and
# the energy listed later for secondary structure. So this code finds this secondary
# structure and gets again the energy for this secondary structure,
# and overwrites the previous energy.
# In this case:
# Best Dynamic Programming Solution has Energy: -5.46
# ...
# CUCUCGAAAGAUG
# (((.((..))))) -5.44 ( +0.00)
# (((.((..))))) BP >= 50%
# if evenn this will not find ss, then set ss to null not to crash
# and it's possible, like in here
# curl -Y 0 -y 300 -F "pass=lucy" -F mask="((******)))" -F sequence="CCUgcgcaAGG" \
# http://www.major.iric.ca/cgi-bin/MC-Fold/mcfold.static.cgi
ss = ''
for l in out:
if 'target="_blank">MARNA</a>-formatted:<P><P><P></H2><pre>' in l:
index = out.index(l)
ss_line = out[index + 2]
ss, energy = ss_line.split()[0:2] # '(((.((..))))) -5.44 ( +0.00)'
# if there is
# UUGCCGUAAGACA
# ............. BP >= 50%
# then finish with energy 0.00, and empty ss
if energy == 'BP':
energy = energy_bdp
comment = 'BP energy'
return energy_bdp, constraints, comment
# break
# prepare outputs, return and self-s
self.log = out
self.ss = ss
return float(energy), ss, comment
# if method == "RNAsubopt":
# from cogent.app.vienna_package import RNAfold, RNAsubopt
# r = RNAsubopt(WorkingDir="/tmp")
# res = r([self.seq])
# return str(res['StdOut'].read()).strip()
# if method == 'RNAfold':
# from cogent.app.vienna_package import RNAfold, RNAsubopt
# r = RNAfold(WorkingDir="/tmp")
# res = r([self.seq])
# self.ss_log = res['StdOut'].read()
# return self.ss_log.strip().split('\n')[-1].split()[0]
elif method == "ipknot":
self.ss_log = subprocess.check_output(IPKNOT_PATH + ' ' + tf.name, shell=True)
return '\n'.join(self.ss_log.decode().split('\n')[2:])
elif method == "contextfold":
if path:
CONTEXTFOLD_PATH = path
if not CONTEXTFOLD_PATH:
print('Set up CONTEXTFOLD_PATH in configuration.')
sys.exit(0)
cmd = "cd " + CONTEXTFOLD_PATH + \
" && java -cp bin contextFold.app.Predict in:" + self.seq
if verbose:
print(cmd)
self.ss_log = subprocess.check_output(cmd, shell=True).decode()
return '\n'.join(self.ss_log.split('\n')[1:])
elif method == "centroid_fold":
self.ss_log = subprocess.check_output('centroid_fold ' + tf.name, shell=True)
return '\n'.join(self.ss_log.split('\n')[2:])
elif method == "rnastructure":
cmd = RNASTRUCTURE_PATH + '/exe/Fold ' + tf.name + ' ' + tf.name + '.out '
if shapefn:
cmd += ' -sh ' + shapefn
if verbose:
print(cmd)
o = subprocess.Popen(cmd, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
stderr = o.stderr.read().strip()
if stderr:
print(stderr)
cmd = RNASTRUCTURE_PATH + '/exe/ct2dot ' + tf.name + '.out 1 ' + \
tf.name + '.dot'
if verbose:
print(cmd)
o = subprocess.Popen(cmd, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
stderr = o.stderr.read().strip()
if not stderr:
with open(tf.name + '.dot') as f:
return f.read().strip()
# (-51.15, '.(.(((((((((((((((..))))))))))))))))(..((((((((....)))).))))).')
elif method == "rnastructure_CycleFold":
cmd = RNASTRUCTURE_PATH + '/exe/CycleFold ' + tf.name + ' > ' + tf.name + '.ct '
if verbose:
print(cmd)
o = subprocess.Popen(cmd, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
stderr = o.stderr.read().strip()
if stderr:
print(stderr)
# get energy
energy = float(open(tf.name + '.ct').readline().split("energy:")[1].strip()) # >rna_seq energy: -51.1500
# get ss in dot-bracket notation
cmd = RNASTRUCTURE_PATH + '/exe/ct2dot ' + tf.name + '.ct 1 ' + \
tf.name + '.dot'
if verbose:
print(cmd)
o = subprocess.Popen(cmd, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
stderr = o.stderr.read().strip()
if not stderr:
with open(tf.name + '.dot') as f:
# (-51.15, '.(.(((((((((((((((..))))))))))))))))(..((((((((....)))).))))).')
return energy, f.read().strip().split('\n')[2]
else:
raise MethodNotChosen('You have to define a correct method to use.')
# main
def load_fasta_ss_into_RNAseqs(fn, debug=True):
seqs = []
with open(fn) as f:
for line in f:
if debug: print(line)
name = line.replace('>', '').strip()
seq = next(f).strip()
ss = next(f).strip()
rs = RNASequence(seq, ss, name)
seqs.append(rs)
return seqs
if __name__ == '__main__':
import doctest
doctest.testmod()
seq = RNASequence("CGCUUCAUAUAAUCCUAAUGAUAUGGUUUGGGAGUUUCUACCAAGAGCCUUAAACUCUUGAUUAUGAAGUG")
seq.name = 'RNA01'
print(seq.predict_ss("RNAfold",
constraints="((((...............................................................))))")) # noqa
seq = RNASequence("CGCUUCAUAUAAUCCUAAUGAUAUGGUUUGGGAGUUUCUACCAAGAGCCUUAAACUCUUGAUUAUGAAGUG")
seq.name = 'RNA02'
print(seq.predict_ss("RNAsubopt",
constraints="((((...............................................................))))")) # noqa
print(seq.predict_ss("contextfold"))
print(seq.predict_ss(method="ipknot"))
verbose = False
seq = RNASequence("GGGGUUUUCCC")
print(seq.predict_ss("rnastructure", verbose=verbose))
print(seq.predict_ss("rnastructure", shapefn="data/shape.txt", verbose=verbose))
seq = RNASequence("CGUGGUUAGGGCCACGUUAAAUAGUUGCUUAAGCCCUAAGCGUUGAUAAAUAUCAGgUGCAA")
print(seq.predict_ss("rnastructure", shapefn="data/shape.txt", verbose=verbose))
#
# test of MethodNotChose
# print(seq.predict_ss("test"))
|
mit
|
Edu-Glez/Bank_sentiment_analysis
|
env/lib/python3.6/site-packages/IPython/lib/tests/test_latextools.py
|
8
|
3869
|
# encoding: utf-8
"""Tests for IPython.utils.path.py"""
# Copyright (c) IPython Development Team.
# Distributed under the terms of the Modified BSD License.
try:
from unittest.mock import patch
except ImportError:
from mock import patch
import nose.tools as nt
from IPython.lib import latextools
from IPython.testing.decorators import onlyif_cmds_exist, skipif_not_matplotlib
from IPython.utils.process import FindCmdError
def test_latex_to_png_dvipng_fails_when_no_cmd():
"""
`latex_to_png_dvipng` should return None when there is no required command
"""
for command in ['latex', 'dvipng']:
yield (check_latex_to_png_dvipng_fails_when_no_cmd, command)
def check_latex_to_png_dvipng_fails_when_no_cmd(command):
def mock_find_cmd(arg):
if arg == command:
raise FindCmdError
with patch.object(latextools, "find_cmd", mock_find_cmd):
nt.assert_equal(latextools.latex_to_png_dvipng("whatever", True),
None)
@onlyif_cmds_exist('latex', 'dvipng')
def test_latex_to_png_dvipng_runs():
"""
Test that latex_to_png_dvipng just runs without error.
"""
def mock_kpsewhich(filename):
nt.assert_equal(filename, "breqn.sty")
return None
for (s, wrap) in [(u"$$x^2$$", False), (u"x^2", True)]:
yield (latextools.latex_to_png_dvipng, s, wrap)
with patch.object(latextools, "kpsewhich", mock_kpsewhich):
yield (latextools.latex_to_png_dvipng, s, wrap)
@skipif_not_matplotlib
def test_latex_to_png_mpl_runs():
"""
Test that latex_to_png_mpl just runs without error.
"""
def mock_kpsewhich(filename):
nt.assert_equal(filename, "breqn.sty")
return None
for (s, wrap) in [("$x^2$", False), ("x^2", True)]:
yield (latextools.latex_to_png_mpl, s, wrap)
with patch.object(latextools, "kpsewhich", mock_kpsewhich):
yield (latextools.latex_to_png_mpl, s, wrap)
@skipif_not_matplotlib
def test_latex_to_html():
img = latextools.latex_to_html("$x^2$")
nt.assert_in("data:image/png;base64,iVBOR", img)
def test_genelatex_no_wrap():
"""
Test genelatex with wrap=False.
"""
def mock_kpsewhich(filename):
assert False, ("kpsewhich should not be called "
"(called with {0})".format(filename))
with patch.object(latextools, "kpsewhich", mock_kpsewhich):
nt.assert_equal(
'\n'.join(latextools.genelatex("body text", False)),
r'''\documentclass{article}
\usepackage{amsmath}
\usepackage{amsthm}
\usepackage{amssymb}
\usepackage{bm}
\pagestyle{empty}
\begin{document}
body text
\end{document}''')
def test_genelatex_wrap_with_breqn():
"""
Test genelatex with wrap=True for the case breqn.sty is installed.
"""
def mock_kpsewhich(filename):
nt.assert_equal(filename, "breqn.sty")
return "path/to/breqn.sty"
with patch.object(latextools, "kpsewhich", mock_kpsewhich):
nt.assert_equal(
'\n'.join(latextools.genelatex("x^2", True)),
r'''\documentclass{article}
\usepackage{amsmath}
\usepackage{amsthm}
\usepackage{amssymb}
\usepackage{bm}
\usepackage{breqn}
\pagestyle{empty}
\begin{document}
\begin{dmath*}
x^2
\end{dmath*}
\end{document}''')
def test_genelatex_wrap_without_breqn():
"""
Test genelatex with wrap=True for the case breqn.sty is not installed.
"""
def mock_kpsewhich(filename):
nt.assert_equal(filename, "breqn.sty")
return None
with patch.object(latextools, "kpsewhich", mock_kpsewhich):
nt.assert_equal(
'\n'.join(latextools.genelatex("x^2", True)),
r'''\documentclass{article}
\usepackage{amsmath}
\usepackage{amsthm}
\usepackage{amssymb}
\usepackage{bm}
\pagestyle{empty}
\begin{document}
$$x^2$$
\end{document}''')
|
apache-2.0
|
roxyboy/scikit-learn
|
sklearn/utils/tests/test_multiclass.py
|
128
|
12853
|
from __future__ import division
import numpy as np
import scipy.sparse as sp
from itertools import product
from sklearn.externals.six.moves import xrange
from sklearn.externals.six import iteritems
from scipy.sparse import issparse
from scipy.sparse import csc_matrix
from scipy.sparse import csr_matrix
from scipy.sparse import coo_matrix
from scipy.sparse import dok_matrix
from scipy.sparse import lil_matrix
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_false
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_raises_regex
from sklearn.utils.multiclass import unique_labels
from sklearn.utils.multiclass import is_multilabel
from sklearn.utils.multiclass import type_of_target
from sklearn.utils.multiclass import class_distribution
class NotAnArray(object):
"""An object that is convertable to an array. This is useful to
simulate a Pandas timeseries."""
def __init__(self, data):
self.data = data
def __array__(self):
return self.data
EXAMPLES = {
'multilabel-indicator': [
# valid when the data is formated as sparse or dense, identified
# by CSR format when the testing takes place
csr_matrix(np.random.RandomState(42).randint(2, size=(10, 10))),
csr_matrix(np.array([[0, 1], [1, 0]])),
csr_matrix(np.array([[0, 1], [1, 0]], dtype=np.bool)),
csr_matrix(np.array([[0, 1], [1, 0]], dtype=np.int8)),
csr_matrix(np.array([[0, 1], [1, 0]], dtype=np.uint8)),
csr_matrix(np.array([[0, 1], [1, 0]], dtype=np.float)),
csr_matrix(np.array([[0, 1], [1, 0]], dtype=np.float32)),
csr_matrix(np.array([[0, 0], [0, 0]])),
csr_matrix(np.array([[0, 1]])),
# Only valid when data is dense
np.array([[-1, 1], [1, -1]]),
np.array([[-3, 3], [3, -3]]),
NotAnArray(np.array([[-3, 3], [3, -3]])),
],
'multiclass': [
[1, 0, 2, 2, 1, 4, 2, 4, 4, 4],
np.array([1, 0, 2]),
np.array([1, 0, 2], dtype=np.int8),
np.array([1, 0, 2], dtype=np.uint8),
np.array([1, 0, 2], dtype=np.float),
np.array([1, 0, 2], dtype=np.float32),
np.array([[1], [0], [2]]),
NotAnArray(np.array([1, 0, 2])),
[0, 1, 2],
['a', 'b', 'c'],
np.array([u'a', u'b', u'c']),
np.array([u'a', u'b', u'c'], dtype=object),
np.array(['a', 'b', 'c'], dtype=object),
],
'multiclass-multioutput': [
np.array([[1, 0, 2, 2], [1, 4, 2, 4]]),
np.array([[1, 0, 2, 2], [1, 4, 2, 4]], dtype=np.int8),
np.array([[1, 0, 2, 2], [1, 4, 2, 4]], dtype=np.uint8),
np.array([[1, 0, 2, 2], [1, 4, 2, 4]], dtype=np.float),
np.array([[1, 0, 2, 2], [1, 4, 2, 4]], dtype=np.float32),
np.array([['a', 'b'], ['c', 'd']]),
np.array([[u'a', u'b'], [u'c', u'd']]),
np.array([[u'a', u'b'], [u'c', u'd']], dtype=object),
np.array([[1, 0, 2]]),
NotAnArray(np.array([[1, 0, 2]])),
],
'binary': [
[0, 1],
[1, 1],
[],
[0],
np.array([0, 1, 1, 1, 0, 0, 0, 1, 1, 1]),
np.array([0, 1, 1, 1, 0, 0, 0, 1, 1, 1], dtype=np.bool),
np.array([0, 1, 1, 1, 0, 0, 0, 1, 1, 1], dtype=np.int8),
np.array([0, 1, 1, 1, 0, 0, 0, 1, 1, 1], dtype=np.uint8),
np.array([0, 1, 1, 1, 0, 0, 0, 1, 1, 1], dtype=np.float),
np.array([0, 1, 1, 1, 0, 0, 0, 1, 1, 1], dtype=np.float32),
np.array([[0], [1]]),
NotAnArray(np.array([[0], [1]])),
[1, -1],
[3, 5],
['a'],
['a', 'b'],
['abc', 'def'],
np.array(['abc', 'def']),
[u'a', u'b'],
np.array(['abc', 'def'], dtype=object),
],
'continuous': [
[1e-5],
[0, .5],
np.array([[0], [.5]]),
np.array([[0], [.5]], dtype=np.float32),
],
'continuous-multioutput': [
np.array([[0, .5], [.5, 0]]),
np.array([[0, .5], [.5, 0]], dtype=np.float32),
np.array([[0, .5]]),
],
'unknown': [
[[]],
[()],
# sequence of sequences that were'nt supported even before deprecation
np.array([np.array([]), np.array([1, 2, 3])], dtype=object),
[np.array([]), np.array([1, 2, 3])],
[set([1, 2, 3]), set([1, 2])],
[frozenset([1, 2, 3]), frozenset([1, 2])],
# and also confusable as sequences of sequences
[{0: 'a', 1: 'b'}, {0: 'a'}],
# empty second dimension
np.array([[], []]),
# 3d
np.array([[[0, 1], [2, 3]], [[4, 5], [6, 7]]]),
]
}
NON_ARRAY_LIKE_EXAMPLES = [
set([1, 2, 3]),
{0: 'a', 1: 'b'},
{0: [5], 1: [5]},
'abc',
frozenset([1, 2, 3]),
None,
]
MULTILABEL_SEQUENCES = [
[[1], [2], [0, 1]],
[(), (2), (0, 1)],
np.array([[], [1, 2]], dtype='object'),
NotAnArray(np.array([[], [1, 2]], dtype='object'))
]
def test_unique_labels():
# Empty iterable
assert_raises(ValueError, unique_labels)
# Multiclass problem
assert_array_equal(unique_labels(xrange(10)), np.arange(10))
assert_array_equal(unique_labels(np.arange(10)), np.arange(10))
assert_array_equal(unique_labels([4, 0, 2]), np.array([0, 2, 4]))
# Multilabel indicator
assert_array_equal(unique_labels(np.array([[0, 0, 1],
[1, 0, 1],
[0, 0, 0]])),
np.arange(3))
assert_array_equal(unique_labels(np.array([[0, 0, 1],
[0, 0, 0]])),
np.arange(3))
# Several arrays passed
assert_array_equal(unique_labels([4, 0, 2], xrange(5)),
np.arange(5))
assert_array_equal(unique_labels((0, 1, 2), (0,), (2, 1)),
np.arange(3))
# Border line case with binary indicator matrix
assert_raises(ValueError, unique_labels, [4, 0, 2], np.ones((5, 5)))
assert_raises(ValueError, unique_labels, np.ones((5, 4)), np.ones((5, 5)))
assert_array_equal(unique_labels(np.ones((4, 5)), np.ones((5, 5))),
np.arange(5))
def test_unique_labels_non_specific():
# Test unique_labels with a variety of collected examples
# Smoke test for all supported format
for format in ["binary", "multiclass", "multilabel-indicator"]:
for y in EXAMPLES[format]:
unique_labels(y)
# We don't support those format at the moment
for example in NON_ARRAY_LIKE_EXAMPLES:
assert_raises(ValueError, unique_labels, example)
for y_type in ["unknown", "continuous", 'continuous-multioutput',
'multiclass-multioutput']:
for example in EXAMPLES[y_type]:
assert_raises(ValueError, unique_labels, example)
def test_unique_labels_mixed_types():
# Mix with binary or multiclass and multilabel
mix_clf_format = product(EXAMPLES["multilabel-indicator"],
EXAMPLES["multiclass"] +
EXAMPLES["binary"])
for y_multilabel, y_multiclass in mix_clf_format:
assert_raises(ValueError, unique_labels, y_multiclass, y_multilabel)
assert_raises(ValueError, unique_labels, y_multilabel, y_multiclass)
assert_raises(ValueError, unique_labels, [[1, 2]], [["a", "d"]])
assert_raises(ValueError, unique_labels, ["1", 2])
assert_raises(ValueError, unique_labels, [["1", 2], [1, 3]])
assert_raises(ValueError, unique_labels, [["1", "2"], [2, 3]])
def test_is_multilabel():
for group, group_examples in iteritems(EXAMPLES):
if group in ['multilabel-indicator']:
dense_assert_, dense_exp = assert_true, 'True'
else:
dense_assert_, dense_exp = assert_false, 'False'
for example in group_examples:
# Only mark explicitly defined sparse examples as valid sparse
# multilabel-indicators
if group == 'multilabel-indicator' and issparse(example):
sparse_assert_, sparse_exp = assert_true, 'True'
else:
sparse_assert_, sparse_exp = assert_false, 'False'
if (issparse(example) or
(hasattr(example, '__array__') and
np.asarray(example).ndim == 2 and
np.asarray(example).dtype.kind in 'biuf' and
np.asarray(example).shape[1] > 0)):
examples_sparse = [sparse_matrix(example)
for sparse_matrix in [coo_matrix,
csc_matrix,
csr_matrix,
dok_matrix,
lil_matrix]]
for exmpl_sparse in examples_sparse:
sparse_assert_(is_multilabel(exmpl_sparse),
msg=('is_multilabel(%r)'
' should be %s')
% (exmpl_sparse, sparse_exp))
# Densify sparse examples before testing
if issparse(example):
example = example.toarray()
dense_assert_(is_multilabel(example),
msg='is_multilabel(%r) should be %s'
% (example, dense_exp))
def test_type_of_target():
for group, group_examples in iteritems(EXAMPLES):
for example in group_examples:
assert_equal(type_of_target(example), group,
msg=('type_of_target(%r) should be %r, got %r'
% (example, group, type_of_target(example))))
for example in NON_ARRAY_LIKE_EXAMPLES:
msg_regex = 'Expected array-like \(array or non-string sequence\).*'
assert_raises_regex(ValueError, msg_regex, type_of_target, example)
for example in MULTILABEL_SEQUENCES:
msg = ('You appear to be using a legacy multi-label data '
'representation. Sequence of sequences are no longer supported;'
' use a binary array or sparse matrix instead.')
assert_raises_regex(ValueError, msg, type_of_target, example)
def test_class_distribution():
y = np.array([[1, 0, 0, 1],
[2, 2, 0, 1],
[1, 3, 0, 1],
[4, 2, 0, 1],
[2, 0, 0, 1],
[1, 3, 0, 1]])
# Define the sparse matrix with a mix of implicit and explicit zeros
data = np.array([1, 2, 1, 4, 2, 1, 0, 2, 3, 2, 3, 1, 1, 1, 1, 1, 1])
indices = np.array([0, 1, 2, 3, 4, 5, 0, 1, 2, 3, 5, 0, 1, 2, 3, 4, 5])
indptr = np.array([0, 6, 11, 11, 17])
y_sp = sp.csc_matrix((data, indices, indptr), shape=(6, 4))
classes, n_classes, class_prior = class_distribution(y)
classes_sp, n_classes_sp, class_prior_sp = class_distribution(y_sp)
classes_expected = [[1, 2, 4],
[0, 2, 3],
[0],
[1]]
n_classes_expected = [3, 3, 1, 1]
class_prior_expected = [[3/6, 2/6, 1/6],
[1/3, 1/3, 1/3],
[1.0],
[1.0]]
for k in range(y.shape[1]):
assert_array_almost_equal(classes[k], classes_expected[k])
assert_array_almost_equal(n_classes[k], n_classes_expected[k])
assert_array_almost_equal(class_prior[k], class_prior_expected[k])
assert_array_almost_equal(classes_sp[k], classes_expected[k])
assert_array_almost_equal(n_classes_sp[k], n_classes_expected[k])
assert_array_almost_equal(class_prior_sp[k], class_prior_expected[k])
# Test again with explicit sample weights
(classes,
n_classes,
class_prior) = class_distribution(y, [1.0, 2.0, 1.0, 2.0, 1.0, 2.0])
(classes_sp,
n_classes_sp,
class_prior_sp) = class_distribution(y, [1.0, 2.0, 1.0, 2.0, 1.0, 2.0])
class_prior_expected = [[4/9, 3/9, 2/9],
[2/9, 4/9, 3/9],
[1.0],
[1.0]]
for k in range(y.shape[1]):
assert_array_almost_equal(classes[k], classes_expected[k])
assert_array_almost_equal(n_classes[k], n_classes_expected[k])
assert_array_almost_equal(class_prior[k], class_prior_expected[k])
assert_array_almost_equal(classes_sp[k], classes_expected[k])
assert_array_almost_equal(n_classes_sp[k], n_classes_expected[k])
assert_array_almost_equal(class_prior_sp[k], class_prior_expected[k])
|
bsd-3-clause
|
hagne/atm-py
|
atmPy/aerosols/instruments/LAS/LAS.py
|
1
|
8196
|
# -*- coding: utf-8 -*-
"""
Created on Mon Nov 10 11:43:10 2014
@author: htelg
"""
import datetime
import warnings
import numpy as np
import pandas as pd
import pylab as plt
from StringIO import StringIO as io
from scipy.interpolate import UnivariateSpline
from atmPy.aerosols.size_distribution import sizedistribution
def read_csv(fname):
las = _readFromFakeXLS(fname)
sd,hk = _separate_sizedist_and_housekeep(las)
bins = _get_bins(sd)
dist = sizedistribution.SizeDist_TS(sd, bins, "numberConcentration")
return dist
def _separate_sizedist_and_housekeep(las):
"""Beside separating size distribution and housekeeping this
function also converts the data to a numberconcentration (#/cc)
Parameters
----------
las: pandas.DataFrame"""
sd = las.copy()
hk = las.copy()
k = sd.keys()
where = np.argwhere(k == 'Flow sccm') + 1
khk = k[: where]
sd = sd.drop(khk, axis=1)
hsd = k[where:]
hk = hk.drop(hsd, axis=1)
hk['Sample sccm'] = hk['Sample sccm'].astype(float)
hk['Accum. Secs'] = hk['Accum. Secs'].astype(float)
# normalize to time and flow
sd = sd.mul(60./hk['Sample sccm'] / hk['Accum. Secs'], axis = 0 )
return sd,hk
def _get_bins(frame, log=False):
"""
get the bins from the column labels of the size distribution DataFrame.
"""
frame = frame.copy()
bins = np.zeros(frame.keys().shape[0]+1)
for e, i in enumerate(frame.keys()):
bin_s, bin_e = i.split(' ')
bin_s = float(bin_s)
bin_e = float(bin_e)
bins[e] = bin_s
bins[e+1] = bin_e
return bins #binCenters
def _readFromFakeXLS(fname):
"""reads and shapes a XLS file produced by the LAS instruments"""
fr = pd.read_csv(fname, sep='\t')
newcolname = [fr.columns[e] + ' ' + str(fr.values[0][e]) for e, i in enumerate(fr.columns)]
fr.columns = newcolname
fr = fr.drop(fr.index[0])
bla = pd.Series(fr['Date -'].values + ' ' + fr['Time -'].values)
fr.index = bla.map(lambda x: datetime.datetime.strptime(x, '%m/%d/%Y %I:%M:%S.%f %p'))
fr = fr.drop(['Date -', 'Time -'], axis=1)
return fr
# def _getBinCenters(frame, binedges=False, log=False):
# """
# LAS gives the bin edges, this calculates the bin centers.
# if log is True, the center will be with respect to the log10 ... log(d_{n+1})-log(d_{n})
# if binedges is True, frame is not really a frame but the binedges (array with dtype=float)
# Make sure you are running "removeHousekeeping" first
# """
# frame = frame.copy()
#
# if binedges:
# if log:
# binCenters = 10**((np.log10(frame[:-1]) + np.log10(frame[1:]))/2.)
# else:
#
# binCenters = (frame[:-1] + frame[1:])/2.
# else:
# binCenters = np.zeros(frame.keys().shape)
# for e, i in enumerate(frame.keys()):
# bin_s, bin_e = i.split(' ')
# bin_s = float(bin_s)
# bin_e = float(bin_e)
# normTo = bin_e - bin_s
# frame[i] = frame[i].divide(normTo)
# if log:
# binCenters[e] = 10**((np.log10(bin_e) + np.log10(bin_s))/2.)
# else:
# binCenters[e] = (bin_e + bin_s)/2.
# return binCenters
# def getTimeIntervalFromFrame(frame, start, end):
# """cutes out a particular time interval from frame.
# e.g.: getTimeIntervalFromFrame(frame,'2014-10-31 18:10:00','2014-10-31 18:10:00')"""
# frame = frame.copy()
# if start:
# frame = frame.truncate(before = start)
#
# if end:
# frame = frame.truncate(after = end)
#
# return frame
#
# def frame2singleDistribution(frame):
# frame = frame.copy()
# singleHist = np.zeros(frame.shape[1])
# for i in xrange(frame.shape[1]):
# singleHist[i] = np.nansum(frame.values[:,i])
# singleHist /= frame.shape[0]
# return singleHist
def _string2Dataframe(data):
sb = io(data)
dataFrame = pd.read_csv(sb, sep=' ', names=('d', 'amp')).sort('d')
return dataFrame
def read_Calibration_fromString(data):
'''
unit of diameter must be nm
data = """140 88
150 102
173 175
200 295
233 480
270 740
315 880
365 1130
420 1350
490 1930
570 3050
660 4200
770 5100
890 6300
1040 8000
1200 8300
1400 10000
1600 11500
1880 16000
2180 21000
2500 28000
3000 37000"""
'''
dataFrame = _string2Dataframe(data)
calibrationInstance = calibration(dataFrame)
return calibrationInstance
def save_Calibration(calibrationInstance, fname):
"""should be saved hier cd ~/data/POPS_calibrations/"""
calibrationInstance.data.to_csv(fname, index = False)
return
# def plot_distMap_LAS(fr_d,binEdgensLAS_d):
# binCenters = getBinCenters(binEdgensLAS_d , binedges= True, log = True)
# TIME_LAS,D_LAS,DNDP_LAS = frameToXYZ(fr_d, binCenters)
# f,a = plt.subplots()
# pcIm = a.pcolormesh(TIME_LAS,D_LAS,
# DNDP_LAS,
# norm = LogNorm(),#vmin = 3,vmax = distZoom.data.values.max()),#vmin = 1e-5),
# # cmap=plt.cm.RdYlBu_r,
# # cmap = plt.cm.terrain_r,
# cmap = hm.get_colorMap_intensity(),#plt.cm.hot_r, #PuBuGn,
# # shading='gouraud',
# )
# a.semilogy()
# a.set_ylim((150,2500))
# a.set_ylabel('Diameter (nm)')
# a.set_xlabel('Time')
# a.set_title('LAS')
# cb = f.colorbar(pcIm)
# cb.set_label("Particle number (cm$^{-3}\,$s$^{-1}$)")
# f.autofmt_xdate()
# # a.yaxis.set_minor_formatter(FormatStrFormatter("%i"))
# # a.yaxis.set_major_formatter(FormatStrFormatter("%i"))
class calibration:
def __init__(self,dataTabel):
self.data = dataTabel
self.calibrationFunction = self.get_calibrationFunctionSpline()
def save_csv(self,fname):
save_Calibration(self,fname)
return
def get_calibrationFunctionSpline(self, fitOrder=1):
"""
Performes a spline fit/smoothening (scipy.interpolate.UnivariateSpline) of d over amp (yes this way not the other way around).
Returns (generates): creates a function self.spline which can later be used to calculate d from amp
Optional Parameters:
\t s: int - oder of the spline function
\t noOfPts: int - length of generated graph
\t plot: boolean - if result is supposed to be plotted
"""
# The following two step method is necessary to get a smooth curve.
#When I only do the second step on the cal_curve I get some wired whiggles
##### First Step
if (self.data.amp.values[1:]-self.data.amp.values[:-1]).min() < 0:
warnings.warn('The data represent a non injective function! This will not work. plot the calibration to see what I meen')
sf = UnivariateSpline(self.data.d.values, self.data.amp.values, s=fitOrder)
d = np.logspace(np.log10(self.data.d.values.min()), np.log10(self.data.d.values.max()), 500)
amp = sf(d)
# second step
cal_function = UnivariateSpline(amp, d, s=fitOrder)
return cal_function
def plot_calibration(self):
"""Plots the calibration function and data
Arguments
------------
cal: calibration instance
Returns
------------
figure
axes
calibration data graph
calibration function graph
"""
cal_function = self.calibrationFunction
amp = np.logspace(np.log10(self.data.amp.min()), np.log10(self.data.amp.max()), 500)
d = cal_function(amp)
f, a = plt.subplots()
cal_data, = a.plot(self.data.d, self.data.amp, 'o', label='data',)
cal_func, = a.plot(d, amp, label='function')
a.loglog()
a.set_xlim(0.9*self.data.d.min(), 1.1*self.data.d.max())
a.set_xlabel('Diameter (nm)')
a.set_ylim(0.9*self.data.amp.min(), 1.1*self.data.amp.max())
a.set_ylabel('Amplitude (digitizer bins)')
a.set_title('Calibration curve')
a.legend(loc = 2)
return f, a, cal_data, cal_func
|
mit
|
rgommers/scipy
|
scipy/stats/_entropy.py
|
12
|
11491
|
# -*- coding: utf-8 -*-
"""
Created on Fri Apr 2 09:06:05 2021
@author: matth
"""
from __future__ import annotations
import math
import numpy as np
from scipy import special
from typing import Optional, Union
__all__ = ['entropy', 'differential_entropy']
def entropy(pk, qk=None, base=None, axis=0):
"""Calculate the entropy of a distribution for given probability values.
If only probabilities `pk` are given, the entropy is calculated as
``S = -sum(pk * log(pk), axis=axis)``.
If `qk` is not None, then compute the Kullback-Leibler divergence
``S = sum(pk * log(pk / qk), axis=axis)``.
This routine will normalize `pk` and `qk` if they don't sum to 1.
Parameters
----------
pk : sequence
Defines the (discrete) distribution. ``pk[i]`` is the (possibly
unnormalized) probability of event ``i``.
qk : sequence, optional
Sequence against which the relative entropy is computed. Should be in
the same format as `pk`.
base : float, optional
The logarithmic base to use, defaults to ``e`` (natural logarithm).
axis: int, optional
The axis along which the entropy is calculated. Default is 0.
Returns
-------
S : float
The calculated entropy.
Examples
--------
>>> from scipy.stats import entropy
Bernoulli trial with different p.
The outcome of a fair coin is the most uncertain:
>>> entropy([1/2, 1/2], base=2)
1.0
The outcome of a biased coin is less uncertain:
>>> entropy([9/10, 1/10], base=2)
0.46899559358928117
Relative entropy:
>>> entropy([1/2, 1/2], qk=[9/10, 1/10])
0.5108256237659907
"""
if base is not None and base <= 0:
raise ValueError("`base` must be a positive number or `None`.")
pk = np.asarray(pk)
pk = 1.0*pk / np.sum(pk, axis=axis, keepdims=True)
if qk is None:
vec = special.entr(pk)
else:
qk = np.asarray(qk)
pk, qk = np.broadcast_arrays(pk, qk)
qk = 1.0*qk / np.sum(qk, axis=axis, keepdims=True)
vec = special.rel_entr(pk, qk)
S = np.sum(vec, axis=axis)
if base is not None:
S /= np.log(base)
return S
def differential_entropy(
values: np.typing.ArrayLike,
*,
window_length: Optional[int] = None,
base: Optional[float] = None,
axis: int = 0,
method: str = "auto",
) -> Union[np.number, np.ndarray]:
r"""Given a sample of a distribution, estimate the differential entropy.
Several estimation methods are available using the `method` parameter. By
default, a method is selected based the size of the sample.
Parameters
----------
values : sequence
Sample from a continuous distribution.
window_length : int, optional
Window length for computing Vasicek estimate. Must be an integer
between 1 and half of the sample size. If ``None`` (the default), it
uses the heuristic value
.. math::
\left \lfloor \sqrt{n} + 0.5 \right \rfloor
where :math:`n` is the sample size. This heuristic was originally
proposed in [2]_ and has become common in the literature.
base : float, optional
The logarithmic base to use, defaults to ``e`` (natural logarithm).
axis : int, optional
The axis along which the differential entropy is calculated.
Default is 0.
method : {'vasicek', 'van es', 'ebrahimi', 'correa', 'auto'}, optional
The method used to estimate the differential entropy from the sample.
Default is ``'auto'``. See Notes for more information.
Returns
-------
entropy : float
The calculated differential entropy.
Notes
-----
This function will converge to the true differential entropy in the limit
.. math::
n \to \infty, \quad m \to \infty, \quad \frac{m}{n} \to 0
The optimal choice of ``window_length`` for a given sample size depends on
the (unknown) distribution. Typically, the smoother the density of the
distribution, the larger the optimal value of ``window_length`` [1]_.
The following options are available for the `method` parameter.
* ``'vasicek'`` uses the estimator presented in [1]_. This is
one of the first and most influential estimators of differential entropy.
* ``'van es'`` uses the bias-corrected estimator presented in [3]_, which
is not only consistent but, under some conditions, asymptotically normal.
* ``'ebrahimi'`` uses an estimator presented in [4]_, which was shown
in simulation to have smaller bias and mean squared error than
the Vasicek estimator.
* ``'correa'`` uses the estimator presented in [5]_ based on local linear
regression. In a simulation study, it had consistently smaller mean
square error than the Vasiceck estimator, but it is more expensive to
compute.
* ``'auto'`` selects the method automatically (default). Currently,
this selects ``'van es'`` for very small samples (<10), ``'ebrahimi'``
for moderate sample sizes (11-1000), and ``'vasicek'`` for larger
samples, but this behavior is subject to change in future versions.
All estimators are implemented as described in [6]_.
References
----------
.. [1] Vasicek, O. (1976). A test for normality based on sample entropy.
Journal of the Royal Statistical Society:
Series B (Methodological), 38(1), 54-59.
.. [2] Crzcgorzewski, P., & Wirczorkowski, R. (1999). Entropy-based
goodness-of-fit test for exponentiality. Communications in
Statistics-Theory and Methods, 28(5), 1183-1202.
.. [3] Van Es, B. (1992). Estimating functionals related to a density by a
class of statistics based on spacings. Scandinavian Journal of
Statistics, 61-72.
.. [4] Ebrahimi, N., Pflughoeft, K., & Soofi, E. S. (1994). Two measures
of sample entropy. Statistics & Probability Letters, 20(3), 225-234.
.. [5] Correa, J. C. (1995). A new estimator of entropy. Communications
in Statistics-Theory and Methods, 24(10), 2439-2449.
.. [6] Noughabi, H. A. (2015). Entropy Estimation Using Numerical Methods.
Annals of Data Science, 2(2), 231-241.
https://link.springer.com/article/10.1007/s40745-015-0045-9
Examples
--------
>>> from scipy.stats import differential_entropy, norm
Entropy of a standard normal distribution:
>>> rng = np.random.default_rng()
>>> values = rng.standard_normal(100)
>>> differential_entropy(values)
1.3407817436640392
Compare with the true entropy:
>>> float(norm.entropy())
1.4189385332046727
For several sample sizes between 5 and 1000, compare the accuracy of
the ``'vasicek'``, ``'van es'``, and ``'ebrahimi'`` methods. Specifically,
compare the root mean squared error (over 1000 trials) between the estimate
and the true differential entropy of the distribution.
>>> from scipy import stats
>>> import matplotlib.pyplot as plt
>>>
>>>
>>> def rmse(res, expected):
... '''Root mean squared error'''
... return np.sqrt(np.mean((res - expected)**2))
>>>
>>>
>>> a, b = np.log10(5), np.log10(1000)
>>> ns = np.round(np.logspace(a, b, 10)).astype(int)
>>> reps = 1000 # number of repetitions for each sample size
>>> expected = stats.expon.entropy()
>>>
>>> method_errors = {'vasicek': [], 'van es': [], 'ebrahimi': []}
>>> for method in method_errors:
... for n in ns:
... rvs = stats.expon.rvs(size=(reps, n), random_state=rng)
... res = stats.differential_entropy(rvs, method=method, axis=-1)
... error = rmse(res, expected)
... method_errors[method].append(error)
>>>
>>> for method, errors in method_errors.items():
... plt.loglog(ns, errors, label=method)
>>>
>>> plt.legend()
>>> plt.xlabel('sample size')
>>> plt.ylabel('RMSE (1000 trials)')
>>> plt.title('Entropy Estimator Error (Exponential Distribution)')
"""
values = np.asarray(values)
values = np.moveaxis(values, axis, -1)
n = values.shape[-1] # number of observations
if window_length is None:
window_length = math.floor(math.sqrt(n) + 0.5)
if not 2 <= 2 * window_length < n:
raise ValueError(
f"Window length ({window_length}) must be positive and less "
f"than half the sample size ({n}).",
)
if base is not None and base <= 0:
raise ValueError("`base` must be a positive number or `None`.")
sorted_data = np.sort(values, axis=-1)
methods = {"vasicek": _vasicek_entropy,
"van es": _van_es_entropy,
"correa": _correa_entropy,
"ebrahimi": _ebrahimi_entropy,
"auto": _vasicek_entropy}
method = method.lower()
if method not in methods:
message = f"`method` must be one of {set(methods)}"
raise ValueError(message)
if method == "auto":
if n <= 10:
method = 'van es'
elif n <= 1000:
method = 'ebrahimi'
else:
method = 'vasicek'
res = methods[method](sorted_data, window_length)
if base is not None:
res /= np.log(base)
return res
def _pad_along_last_axis(X, m):
"""Pad the data for computing the rolling window difference."""
# scales a bit better than method in _vasicek_like_entropy
shape = np.array(X.shape)
shape[-1] = m
Xl = np.broadcast_to(X[..., [0]], shape) # [0] vs 0 to maintain shape
Xr = np.broadcast_to(X[..., [-1]], shape)
return np.concatenate((Xl, X, Xr), axis=-1)
def _vasicek_entropy(X, m):
"""Compute the Vasicek estimator as described in [6] Eq. 1.3."""
n = X.shape[-1]
X = _pad_along_last_axis(X, m)
differences = X[..., 2 * m:] - X[..., : -2 * m:]
logs = np.log(n/(2*m) * differences)
return np.mean(logs, axis=-1)
def _van_es_entropy(X, m):
"""Compute the van Es estimator as described in [6]."""
# No equation number, but referred to as HVE_mn.
# Typo: there should be a log within the summation.
n = X.shape[-1]
difference = X[..., m:] - X[..., :-m]
term1 = 1/(n-m) * np.sum(np.log((n+1)/m * difference), axis=-1)
k = np.arange(m, n+1)
return term1 + np.sum(1/k) + np.log(m) - np.log(n+1)
def _ebrahimi_entropy(X, m):
"""Compute the Ebrahimi estimator as described in [6]."""
# No equation number, but referred to as HE_mn
n = X.shape[-1]
X = _pad_along_last_axis(X, m)
differences = X[..., 2 * m:] - X[..., : -2 * m:]
i = np.arange(1, n+1).astype(float)
ci = np.ones_like(i)*2
ci[i <= m] = 1 + (i[i <= m] - 1)/m
ci[i >= n - m + 1] = 1 + (n - i[i >= n-m+1])/m
logs = np.log(n * differences / (ci * m))
return np.mean(logs, axis=-1)
def _correa_entropy(X, m):
"""Compute the Correa estimator as described in [6]."""
# No equation number, but referred to as HC_mn
n = X.shape[-1]
X = _pad_along_last_axis(X, m)
i = np.arange(1, n+1)
dj = np.arange(-m, m+1)[:, None]
j = i + dj
j0 = j + m - 1 # 0-indexed version of j
Xibar = np.mean(X[..., j0], axis=-2, keepdims=True)
difference = X[..., j0] - Xibar
num = np.sum(difference*dj, axis=-2) # dj is d-i
den = n*np.sum(difference**2, axis=-2)
return -np.mean(np.log(num/den), axis=-1)
|
bsd-3-clause
|
derekjchow/models
|
research/cognitive_mapping_and_planning/scripts/script_env_vis.py
|
5
|
6042
|
# Copyright 2016 The TensorFlow Authors All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""A simple python function to walk in the enviornments that we have created.
PYTHONPATH='.' PYOPENGL_PLATFORM=egl python scripts/script_env_vis.py \
--dataset_name sbpd --building_name area3
"""
import sys
import numpy as np
import matplotlib
matplotlib.use('TkAgg')
from PIL import ImageTk, Image
import Tkinter as tk
import logging
from tensorflow.python.platform import app
from tensorflow.python.platform import flags
import datasets.nav_env_config as nec
import datasets.nav_env as nav_env
import cv2
from datasets import factory
import render.swiftshader_renderer as renderer
SwiftshaderRenderer = renderer.SwiftshaderRenderer
VisualNavigationEnv = nav_env.VisualNavigationEnv
FLAGS = flags.FLAGS
flags.DEFINE_string('dataset_name', 'sbpd', 'Name of the dataset.')
flags.DEFINE_float('fov', 60., 'Field of view')
flags.DEFINE_integer('image_size', 512, 'Size of the image.')
flags.DEFINE_string('building_name', '', 'Name of the building.')
def get_args():
navtask = nec.nav_env_base_config()
navtask.task_params.type = 'rng_rejection_sampling_many'
navtask.task_params.rejection_sampling_M = 2000
navtask.task_params.min_dist = 10
sz = FLAGS.image_size
navtask.camera_param.fov = FLAGS.fov
navtask.camera_param.height = sz
navtask.camera_param.width = sz
navtask.task_params.img_height = sz
navtask.task_params.img_width = sz
# navtask.task_params.semantic_task.class_map_names = ['chair', 'door', 'table']
# navtask.task_params.type = 'to_nearest_obj_acc'
logging.info('navtask: %s', navtask)
return navtask
def load_building(dataset_name, building_name):
dataset = factory.get_dataset(dataset_name)
navtask = get_args()
cp = navtask.camera_param
rgb_shader, d_shader = renderer.get_shaders(cp.modalities)
r_obj = SwiftshaderRenderer()
r_obj.init_display(width=cp.width, height=cp.height,
fov=cp.fov, z_near=cp.z_near, z_far=cp.z_far,
rgb_shader=rgb_shader, d_shader=d_shader)
r_obj.clear_scene()
b = VisualNavigationEnv(robot=navtask.robot, env=navtask.env,
task_params=navtask.task_params,
building_name=building_name, flip=False,
logdir=None, building_loader=dataset,
r_obj=r_obj)
b.load_building_into_scene()
b.set_building_visibility(False)
return b
def walk_through(b):
# init agent at a random location in the environment.
init_env_state = b.reset([np.random.RandomState(0), np.random.RandomState(0)])
global current_node
rng = np.random.RandomState(0)
current_node = rng.choice(b.task.nodes.shape[0])
root = tk.Tk()
image = b.render_nodes(b.task.nodes[[current_node],:])[0]
print(image.shape)
image = image.astype(np.uint8)
im = Image.fromarray(image)
im = ImageTk.PhotoImage(im)
panel = tk.Label(root, image=im)
map_size = b.traversible.shape
sc = np.max(map_size)/256.
loc = np.array([[map_size[1]/2., map_size[0]/2.]])
x_axis = np.zeros_like(loc); x_axis[:,1] = sc
y_axis = np.zeros_like(loc); y_axis[:,0] = -sc
cum_fs, cum_valid = nav_env.get_map_to_predict(loc, x_axis, y_axis,
map=b.traversible*1.,
map_size=256)
cum_fs = cum_fs[0]
cum_fs = cv2.applyColorMap((cum_fs*255).astype(np.uint8), cv2.COLORMAP_JET)
im = Image.fromarray(cum_fs)
im = ImageTk.PhotoImage(im)
panel_overhead = tk.Label(root, image=im)
def refresh():
global current_node
image = b.render_nodes(b.task.nodes[[current_node],:])[0]
image = image.astype(np.uint8)
im = Image.fromarray(image)
im = ImageTk.PhotoImage(im)
panel.configure(image=im)
panel.image = im
def left_key(event):
global current_node
current_node = b.take_action([current_node], [2], 1)[0][0]
refresh()
def up_key(event):
global current_node
current_node = b.take_action([current_node], [3], 1)[0][0]
refresh()
def right_key(event):
global current_node
current_node = b.take_action([current_node], [1], 1)[0][0]
refresh()
def quit(event):
root.destroy()
panel_overhead.grid(row=4, column=5, rowspan=1, columnspan=1,
sticky=tk.W+tk.E+tk.N+tk.S)
panel.bind('<Left>', left_key)
panel.bind('<Up>', up_key)
panel.bind('<Right>', right_key)
panel.bind('q', quit)
panel.focus_set()
panel.grid(row=0, column=0, rowspan=5, columnspan=5,
sticky=tk.W+tk.E+tk.N+tk.S)
root.mainloop()
def simple_window():
root = tk.Tk()
image = np.zeros((128, 128, 3), dtype=np.uint8)
image[32:96, 32:96, 0] = 255
im = Image.fromarray(image)
im = ImageTk.PhotoImage(im)
image = np.zeros((128, 128, 3), dtype=np.uint8)
image[32:96, 32:96, 1] = 255
im2 = Image.fromarray(image)
im2 = ImageTk.PhotoImage(im2)
panel = tk.Label(root, image=im)
def left_key(event):
panel.configure(image=im2)
panel.image = im2
def quit(event):
sys.exit()
panel.bind('<Left>', left_key)
panel.bind('<Up>', left_key)
panel.bind('<Down>', left_key)
panel.bind('q', quit)
panel.focus_set()
panel.pack(side = "bottom", fill = "both", expand = "yes")
root.mainloop()
def main(_):
b = load_building(FLAGS.dataset_name, FLAGS.building_name)
walk_through(b)
if __name__ == '__main__':
app.run()
|
apache-2.0
|
jldbc/pybaseball
|
pybaseball/lahman.py
|
1
|
3787
|
from io import BytesIO
from os import path
from typing import Optional
from zipfile import ZipFile
import pandas as pd
import requests
from . import cache
url = "https://github.com/chadwickbureau/baseballdatabank/archive/master.zip"
base_string = "baseballdatabank-master/core"
_handle = None
def get_lahman_zip() -> Optional[ZipFile]:
# Retrieve the Lahman database zip file, returns None if file already exists in cwd.
# If we already have the zip file, keep re-using that.
# Making this a function since everything else will be re-using these lines
global _handle
if path.exists(path.join(cache.config.cache_directory, base_string)):
_handle = None
elif not _handle:
s = requests.get(url, stream=True)
_handle = ZipFile(BytesIO(s.content))
return _handle
def download_lahman():
# download entire lahman db to present working directory
z = get_lahman_zip()
if z is not None:
z.extractall(cache.config.cache_directory)
z = get_lahman_zip()
# this way we'll now start using the extracted zip directory
# instead of the session ZipFile object
def _get_file(tablename: str, quotechar: str = "'") -> pd.DataFrame:
z = get_lahman_zip()
f = f'{base_string}/{tablename}'
data = pd.read_csv(
f"{path.join(cache.config.cache_directory, f)}" if z is None else z.open(f),
header=0,
sep=',',
quotechar=quotechar
)
return data
# do this for every table in the lahman db so they can exist as separate functions
def parks() -> pd.DataFrame:
return _get_file('Parks.csv')
def all_star_full() -> pd.DataFrame:
return _get_file("AllstarFull.csv")
def appearances() -> pd.DataFrame:
return _get_file("Appearances.csv")
def awards_managers() -> pd.DataFrame:
return _get_file("AwardsManagers.csv")
def awards_players() -> pd.DataFrame:
return _get_file("AwardsPlayers.csv")
def awards_share_managers() -> pd.DataFrame:
return _get_file("AwardsShareManagers.csv")
def awards_share_players() -> pd.DataFrame:
return _get_file("AwardsSharePlayers.csv")
def batting() -> pd.DataFrame:
return _get_file("Batting.csv")
def batting_post() -> pd.DataFrame:
return _get_file("BattingPost.csv")
def college_playing() -> pd.DataFrame:
return _get_file("CollegePlaying.csv")
def fielding() -> pd.DataFrame:
return _get_file("Fielding.csv")
def fielding_of() -> pd.DataFrame:
return _get_file("FieldingOF.csv")
def fielding_of_split() -> pd.DataFrame:
return _get_file("FieldingOFsplit.csv")
def fielding_post() -> pd.DataFrame:
return _get_file("FieldingPost.csv")
def hall_of_fame() -> pd.DataFrame:
return _get_file("HallOfFame.csv")
def home_games() -> pd.DataFrame:
return _get_file("HomeGames.csv")
def managers() -> pd.DataFrame:
return _get_file("Managers.csv")
def managers_half() -> pd.DataFrame:
return _get_file("ManagersHalf.csv")
def master() -> pd.DataFrame:
# Alias for people -- the new name for master
return people()
def people() -> pd.DataFrame:
return _get_file("People.csv")
def pitching() -> pd.DataFrame:
return _get_file("Pitching.csv")
def pitching_post() -> pd.DataFrame:
return _get_file("PitchingPost.csv")
def salaries() -> pd.DataFrame:
return _get_file("Salaries.csv")
def schools() -> pd.DataFrame:
return _get_file("Schools.csv", quotechar='"') # different here bc of doublequotes used in some school names
def series_post() -> pd.DataFrame:
return _get_file("SeriesPost.csv")
def teams() -> pd.DataFrame:
return _get_file("Teams.csv")
def teams_franchises() -> pd.DataFrame:
return _get_file("TeamsFranchises.csv")
def teams_half() -> pd.DataFrame:
return _get_file("TeamsHalf.csv")
|
mit
|
SigridK/corpkit
|
corpkit/conc.py
|
1
|
13245
|
def conc(corpus,
query,
option = 'tregex',
dep_function = 'any',
dep_type = 'basic-dependencies',
n = 100,
random = False,
window = 100,
trees = False,
plaintext = False, #'guess',
add_links = False,
show_links = False,
print_status = True,
print_output = True,
just_speakers = False,
root = False,
**kwargs):
"""A concordancer for Tregex queries and dependencies"""
import corpkit
import os
import re
import pandas as pd
from pandas import DataFrame
from time import localtime, strftime
try:
from IPython.display import display, clear_output
except ImportError:
pass
from corpkit.other import tregex_engine
from corpkit.tests import check_pytex, check_dit
try:
get_ipython().getoutput()
except TypeError:
have_ipython = True
except NameError:
import subprocess
have_ipython = False
if query == 'any':
query = r'.*'
# convert list to query
if type(query) == list:
from other import as_regex
if option.startswith('t'):
query = r'/%s/ !< __' % as_regex(query, boundaries = 'line')
else:
query = as_regex(query, boundaries = 'w')
can_do_fast = False
if option.startswith('t'):
if just_speakers is False:
can_do_fast = True
just_speakers_is_list = False
if type(just_speakers) == list:
just_speakers_is_list = True
if type(just_speakers) == str:
if just_speakers.lower() != 'all':
just_speakers = [just_speakers]
def get_deps(sentence, dep_type):
if dep_type == 'basic-dependencies':
return sentence.basic_dependencies
if dep_type == 'collapsed-dependencies':
return sentence.collapsed_dependencies
if dep_type == 'collapsed-ccprocessed-dependencies':
return sentence.collapsed_ccprocessed_dependencies
conc_lines = []
if option.startswith('t'):
if trees:
options = '-s'
else:
options = '-t'
if can_do_fast:
speakr = ''
tregex_engine(query = query, check_query = True, root = root)
wholes = tregex_engine(query = query,
options = ['-o', '-w', '-f', options],
corpus = corpus,
preserve_case = True,
root = root)
middle_column_result = tregex_engine(query = query,
options = ['-o', options],
corpus = corpus,
preserve_case = True,
root = root)
for (f, whole), mid in zip(wholes, middle_column_result):
reg = re.compile(r'(' + re.escape(mid) + r')', re.IGNORECASE)
start, middle, end = re.split(reg, whole, 1)
conc_lines.append([os.path.basename(f), speakr, start, middle, end])
else:
fs_to_conc = []
for r, dirs, fs in os.walk(corpus):
for f in fs:
if not os.path.isfile(os.path.join(r, f)):
continue
if not f.endswith('.txt') and not f.endswith('.xml'):
continue
fs_to_conc.append(os.path.join(r, f))
def normalise(concline):
import re
reg = re.compile(r'\([^ ]+')
spaces = re.compile(r'\s+')
concline = re.sub(reg, '', concline)
concline = re.sub(spaces, ' ', concline)
concline = concline.replace(')', '').replace(' ', ' ')
return concline.strip()
num_fs = len(fs_to_conc)
for index, filepath in enumerate(fs_to_conc):
f = os.path.basename(filepath)
if num_fs > 1:
if 'note' in kwargs.keys():
kwargs['note'].progvar.set((index) * 100.0 / num_fs)
from time import localtime, strftime
thetime = strftime("%H:%M:%S", localtime())
print '%s: Extracting data from %s ...' % (thetime, f)
if root:
root.update()
with open(filepath, "rb") as text:
parsetreedict = {}
data = text.read()
if option.startswith('p') or option.startswith('l'):
if option.startswith('l'):
lstokens = pickle.load(open(filepath, 'rb'))
data = ' '.join(tokens)
data = data.split(' . ')
else:
lines = data.splitlines()
for l in lines:
m = re.compile(r'^(.*?)(' + query + r')(.*)$', re.IGNORECASE)
mat = re.search(m, l)
if mat:
conc_lines.append([f, '', mat.group(1), mat.group(2), mat.group(3)])
continue
from corenlp_xml.document import Document
corenlp_xml = Document(data)
#corenlp_xml = Beautifulcorenlp_xml(data, parse_only=justsents)
if just_speakers:
for s in just_speakers:
parsetreedict[s] = []
sents = [s for s in corenlp_xml.sentences if s.speakername in just_speakers]
#sents = [s for s in corenlp_xml.find_all('sentence') \
#if s.speakername.text.strip() in just_speakers]
else:
sents = corenlp_xml.sentences
nsents = len(sents)
for i, s in enumerate(sents):
if num_fs == 1:
if 'note' in kwargs.keys():
kwargs['note'].progvar.set((index) * 100.0 / nsents)
if root:
root.update()
try:
speakr = s.speakername.strip()
except:
speakr = ''
parsetree = s.parse_string
if option.startswith('t'):
parsetreedict[speakr].append(parsetree)
continue
elif option.startswith('d'):
#right_dependency_grammar = s.find_all('dependencies', type=dep_type, limit = 1)
deps = get_deps(s, dep_type)
if dep_function == 'any' or dep_function is False:
wdsmatching = [l.dependent.text.strip() for l in deps.links \
if re.match(query, l.dependent.text.strip())]
else:
comped = re.compile(dep_function, re.IGNORECASE)
#goodsent = any(re.match(query, l.dependent.text.strip()) for l in deps.links if re.match(comped, l.type.strip()))
wdsmatching = [l.dependent.text.strip() for l in deps.links \
if re.match(comped, l.type.strip()) and \
re.match(query, l.dependent.text.strip())]
# this is shit, needs indexing or something
for wd in wdsmatching:
line = normalise(parsetree)
start, middle, end = re.split(r'(' + wd + r')', line, 1)
conc_lines.append([f, speakr, start, middle, end])
if option.startswith('t'):
for speakr, dt in parsetreedict.items():
trees_as_string = '\n'.join(dt)
if trees:
options = '-s'
else:
options = '-t'
with open('tmp.txt', 'w') as fo:
fo.write(trees_as_string.encode('utf-8', errors = 'ignore'))
tregex_engine(query = query, check_query = True, root = root)
wholes = tregex_engine(query = query,
options = ['-o', '-w', options],
corpus = 'tmp.txt',
preserve_case = True,
root = root)
middle_column_result = tregex_engine(query = query,
options = ['-o', options],
corpus = 'tmp.txt',
preserve_case = True,
root = root)
for whole, mid in zip(wholes, middle_column_result):
reg = re.compile(r'(' + re.escape(mid) + r')', re.IGNORECASE)
start, middle, end = re.split(reg, whole, 1)
conc_lines.append([f, speakr, start, middle, end])
# does not keep results ordered!
try:
os.remove('tmp.txt')
except:
pass
unique_results = [list(x) for x in set(tuple(x) for x in conc_lines)]
#make into series
series = []
pindex = 'f s l m r'.encode('utf-8').split()
for fname, spkr, start, word, end in unique_results:
import os
fname = os.path.basename(fname)
start = start.replace('$ ', '$').replace('`` ', '``').replace(' ,', ',').replace(' .', '.').replace("'' ", "''").replace(" n't", "n't").replace(" 're","'re").replace(" 'm","'m").replace(" 's","'s").replace(" 'd","'d").replace(" 'll","'ll").replace(' ', ' ')
word = word.replace('$ ', '$').replace('`` ', '``').replace(' ,', ',').replace(' .', '.').replace("'' ", "''").replace(" n't", "n't").replace(" 're","'re").replace(" 'm","'m").replace(" 's","'s").replace(" 'd","'d").replace(" 'll","'ll").replace(' ', ' ')
end = end.replace('$ ', '$').replace('`` ', '``').replace(' ,', ',').replace(' .', '.').replace("'' ", "''").replace(" n't", "n't").replace(" 're","'re").replace(" 'm","'m").replace(" 's","'s").replace(" 'd","'d").replace(" 'll","'ll").replace(' ', ' ')
#spaces = ' ' * (maximum / 2 - (len(word) / 2))
#new_word = spaces + word + spaces
series.append(pd.Series([fname.encode('utf-8', errors = 'ignore'), \
spkr.encode('utf-8', errors = 'ignore'), \
start.encode('utf-8', errors = 'ignore'), \
word.encode('utf-8', errors = 'ignore'), \
end.encode('utf-8', errors = 'ignore')], index = pindex))
# randomise results...
if random:
from random import shuffle
shuffle(series)
if series == []:
if root:
print 'No results found, sorry.'
return
else:
raise ValueError("No results found, I'm afraid. Check your query and path.")
df = pd.concat(series, axis = 1).T
if not add_links:
df.columns = ['f', 's', 'l', 'm', 'r']
else:
df.columns = ['f', 's', 'l', 'm', 'r', 'link']
if all(x == '' for x in list(df['s'].values)):
df.drop('s', axis = 1, inplace = True)
formatl = lambda x: "{0}".format(x[-window:])
formatf = lambda x: "{0}".format(x[-20:])
#formatr = lambda x:
formatr = lambda x: "{{:<{}s}}".format(df['r'].str.len().max()).format(x[:window])
st = df.head(n).to_string(header = False, formatters={'l': formatl,
'r': formatr,
'f': formatf}).splitlines()
# hack because i can't figure out formatter:
rem = '\n'.join([re.sub('\s*\.\.\.\s*$', '', s) for s in st])
if print_output:
print rem
if 'note' in kwargs.keys():
kwargs['note'].progvar.set(100)
return df
if add_links:
def _add_links(lines, links = False, show = 'thread'):
link = "http://www.healthboards.com/boards/bipolar-disorder/695089-labels.html"
linktext = '<a href="%s>link</a>' % link
import pandas as pd
inds = list(df.index)
num_objects = len(list(df.index))
ser = pd.Series([link for n in range(num_objects)], index = inds)
lines['link'] = ser
return lines
df = _add_links(df)
if add_links:
if not show_links:
if print_output:
print df.drop('link', axis = 1).head(n).to_string(header = False, formatters={rname:'{{:<{}s}}'.format(df[rname].str.len().max()).format})
else:
if print_output:
print HTML(df.to_html(escape=False))
else:
if print_output:
print df.head(n).to_string(header = False, formatters={rname:'{{:<{}s}}'.format(df[rname].str.len().max()).format})
if not add_links:
df.columns = ['f', 'l', 'm', 'r']
else:
df.columns = ['f', 'l', 'm', 'r', 'link']
return df
|
mit
|
kain88-de/mdanalysis
|
testsuite/MDAnalysisTests/analysis/test_hole.py
|
1
|
10084
|
# -*- Mode: python; tab-width: 4; indent-tabs-mode:nil; coding:utf-8 -*-
# vim: tabstop=4 expandtab shiftwidth=4 softtabstop=4 fileencoding=utf-8
#
# MDAnalysis --- http://www.mdanalysis.org
# Copyright (c) 2006-2016 The MDAnalysis Development Team and contributors
# (see the file AUTHORS for the full list of names)
#
# Released under the GNU Public Licence, v2 or any higher version
#
# Please cite your use of MDAnalysis in published work:
#
# R. J. Gowers, M. Linke, J. Barnoud, T. J. E. Reddy, M. N. Melo, S. L. Seyler,
# D. L. Dotson, J. Domanski, S. Buchoux, I. M. Kenney, and O. Beckstein.
# MDAnalysis: A Python package for the rapid analysis of molecular dynamics
# simulations. In S. Benthall and S. Rostrup editors, Proceedings of the 15th
# Python in Science Conference, pages 102-109, Austin, TX, 2016. SciPy.
#
# N. Michaud-Agrawal, E. J. Denning, T. B. Woolf, and O. Beckstein.
# MDAnalysis: A Toolkit for the Analysis of Molecular Dynamics Simulations.
# J. Comput. Chem. 32 (2011), 2319--2327, doi:10.1002/jcc.21787
#
from __future__ import print_function, absolute_import
from six.moves import range
import MDAnalysis
import MDAnalysis.analysis.hole
from MDAnalysis.analysis.hole import HOLEtraj, HOLE
from numpy.testing import (TestCase, dec,
assert_equal, assert_almost_equal,
assert_array_equal,
assert_array_almost_equal, assert_)
import numpy as np
import nose
from nose.plugins.attrib import attr
import os
import errno
from MDAnalysisTests.datafiles import PDB_HOLE, MULTIPDB_HOLE
from MDAnalysisTests import (executable_not_found, module_not_found,
tempdir, in_dir)
def rlimits_missing():
# return True if resources module not accesible (ie setting of rlimits)
try:
# on Unix we can manipulate our limits: http://docs.python.org/2/library/resource.html
import resource
soft_max_open_files, hard_max_open_files = resource.getrlimit(resource.RLIMIT_NOFILE)
except ImportError:
return True
return False
class TestHOLE(TestCase):
filename = PDB_HOLE
@dec.skipif(executable_not_found("hole"), msg="Test skipped because HOLE not found")
def setUp(self):
# keep tempdir around for the whole lifetime of the class
self.tempdir = tempdir.TempDir()
with in_dir(self.tempdir.name):
H = HOLE(self.filename, raseed=31415)
H.run()
H.collect()
self.H = H
def tearDown(self):
del self.H
del self.tempdir
@attr('slow')
@dec.skipif(executable_not_found("hole"), msg="Test skipped because HOLE not found")
def test_HOLE(self):
profiles = self.H.profiles.values()
assert_equal(len(profiles), 1,
err_msg="HOLE.profile should contain exactly 1 profile")
p = profiles[0]
assert_equal(len(p), 425,
err_msg="wrong number of points in HOLE profile")
assert_almost_equal(p.rxncoord.mean(), -1.41225,
err_msg="wrong mean HOLE rxncoord")
assert_almost_equal(p.radius.min(), 1.19707,
err_msg="wrong min HOLE radius")
@attr('slow')
@dec.skipif(executable_not_found("hole"), msg="Test skipped because HOLE not found")
def test_vmd_surface(self):
with in_dir(self.tempdir.name):
filename = self.H.create_vmd_surface(filename="hole.vmd")
assert_equal(len(open(filename).readlines()), 6504,
err_msg="HOLE VMD surface file is incomplete")
class TestHOLEtraj(TestCase):
filename = MULTIPDB_HOLE
start = 5
stop = 7
# HOLE is so slow so we only run it once and keep it in
# the class; note that you may not change universe.trajectory
# (eg iteration) because this is not safe in parallel
@classmethod
def setUpClass(cls):
cls.universe = MDAnalysis.Universe(cls.filename)
if not executable_not_found("hole"):
with tempdir.in_tempdir():
H = HOLEtraj(cls.universe, start=cls.start,
stop=cls.stop, raseed=31415)
H.run()
cls.H = H
else:
cls.H = None
cls.frames = [ts.frame
for ts in cls.universe.trajectory[cls.start:cls.stop]]
@classmethod
def tearDownClass(cls):
del cls.H
del cls.universe
# This is VERY slow on 11 frames so we just take 2
@attr('slow')
@dec.skipif(executable_not_found("hole"), msg="Test skipped because HOLE not found")
def test_HOLEtraj(self):
assert_array_equal(sorted(self.H.profiles.keys()), self.frames,
err_msg="H.profiles.keys() should contain the frame numbers")
data = np.transpose([(len(p), p.rxncoord.mean(), p.radius.min())
for p in self.H.profiles.values()])
assert_array_equal(data[0], [401, 399],
err_msg="incorrect profile lengths")
assert_array_almost_equal(data[1], [1.98767, 0.0878],
err_msg="wrong mean HOLE rxncoord")
assert_array_almost_equal(data[2], [1.19819, 1.29628],
err_msg="wrong minimum radius")
@attr('slow')
@dec.skipif(executable_not_found("hole"), msg="Test skipped because HOLE not found")
def test_min_radius(self):
assert_array_almost_equal(self.H.min_radius(),
np.array([[ 5. , 1.19819],
[ 6. , 1.29628]]),
err_msg="min_radius() array not correct")
@attr('slow')
@dec.skipif(executable_not_found("hole"), msg="Test skipped because HOLE not found")
@dec.skipif(module_not_found("matplotlib"))
def test_plot(self):
import matplotlib.axes
ax = self.H.plot(label=True)
assert_(isinstance(ax, matplotlib.axes.Axes),
msg="H.plot() did not produce an Axes instance")
@attr('slow')
@dec.skipif(executable_not_found("hole"), msg="Test skipped because HOLE not found")
@dec.skipif(module_not_found("matplotlib"))
def test_plot3D(self):
import mpl_toolkits.mplot3d
ax = self.H.plot3D()
assert_(isinstance(ax, mpl_toolkits.mplot3d.Axes3D),
msg="H.plot3D() did not produce an Axes3D instance")
@attr('slow')
@dec.skipif(executable_not_found("hole"), msg="Test skipped because HOLE not found")
@dec.skipif(module_not_found("matplotlib"))
def test_plot3D_rmax(self):
import mpl_toolkits.mplot3d
ax = self.H.plot3D(rmax=2.5)
assert_(isinstance(ax, mpl_toolkits.mplot3d.Axes3D),
msg="H.plot3D(rmax=float) did not produce an Axes3D instance")
class TestHoleModule(TestCase):
@dec.skipif(rlimits_missing, msg="Test skipped because platform does not allow setting rlimits")
def setUp(self):
self.universe = MDAnalysis.Universe(MULTIPDB_HOLE)
try:
# on Unix we can manipulate our limits: http://docs.python.org/2/library/resource.html
import resource
self.soft_max_open_files, self.hard_max_open_files = resource.getrlimit(resource.RLIMIT_NOFILE)
except ImportError:
pass
@attr('slow')
@attr('issue')
@dec.skipif(rlimits_missing, msg="Test skipped because platform does not allow setting rlimits")
@dec.skipif(executable_not_found("hole"), msg="Test skipped because HOLE not found")
def test_hole_module_fd_closure(self):
"""test open file descriptors are closed (MDAnalysisTests.analysis.test_hole.TestHoleModule): Issue 129"""
# If Issue 129 isn't resolved, this function will produce an OSError on
# the system, and cause many other tests to fail as well.
#
# Successful test takes ~10 s, failure ~2 s.
# Hasten failure by setting "ulimit -n 64" (can't go too low because of open modules etc...)
import resource
# ----- temporary hack -----
# on Mac OS X (on Travis) we run out of open file descriptors
# before even starting this test (see
# https://github.com/MDAnalysis/mdanalysis/pull/901#issuecomment-231938093);
# if this issue is solved by #363 then revert the following
# hack:
#
import platform
if platform.platform() == "Darwin":
max_open_files = 512
else:
max_open_files = 64
#
# --------------------------
resource.setrlimit(resource.RLIMIT_NOFILE,
(max_open_files, self.hard_max_open_files))
with tempdir.in_tempdir():
try:
H = HOLEtraj(self.universe, cvect=[0, 1, 0], sample=20.0)
finally:
self._restore_rlimits()
# pretty unlikely that the code will get through 2 rounds if the MDA
# issue 129 isn't fixed, although this depends on the file descriptor
# open limit for the machine in question
try:
for i in range(2):
# will typically get an OSError for too many files being open after
# about 2 seconds if issue 129 isn't resolved
H.run()
except OSError as err:
if err.errno == errno.EMFILE:
raise AssertionError("HOLEtraj does not close file descriptors (Issue 129)")
raise
finally:
# make sure to restore open file limit !!
self._restore_rlimits()
def _restore_rlimits(self):
try:
import resource
resource.setrlimit(resource.RLIMIT_NOFILE,
(self.soft_max_open_files, self.hard_max_open_files))
except ImportError:
pass
def tearDown(self):
self._restore_rlimits()
del self.universe
|
gpl-2.0
|
RPGOne/Skynet
|
scikit-learn-c604ac39ad0e5b066d964df3e8f31ba7ebda1e0e/sklearn/utils/tests/test_shortest_path.py
|
42
|
2894
|
from collections import defaultdict
import numpy as np
from numpy.testing import assert_array_almost_equal
from sklearn.utils.graph import (graph_shortest_path,
single_source_shortest_path_length)
def floyd_warshall_slow(graph, directed=False):
N = graph.shape[0]
#set nonzero entries to infinity
graph[np.where(graph == 0)] = np.inf
#set diagonal to zero
graph.flat[::N + 1] = 0
if not directed:
graph = np.minimum(graph, graph.T)
for k in range(N):
for i in range(N):
for j in range(N):
graph[i, j] = min(graph[i, j], graph[i, k] + graph[k, j])
graph[np.where(np.isinf(graph))] = 0
return graph
def generate_graph(N=20):
#sparse grid of distances
rng = np.random.RandomState(0)
dist_matrix = rng.random_sample((N, N))
#make symmetric: distances are not direction-dependent
dist_matrix += dist_matrix.T
#make graph sparse
i = (rng.randint(N, size=N * N // 2), rng.randint(N, size=N * N // 2))
dist_matrix[i] = 0
#set diagonal to zero
dist_matrix.flat[::N + 1] = 0
return dist_matrix
def test_floyd_warshall():
dist_matrix = generate_graph(20)
for directed in (True, False):
graph_FW = graph_shortest_path(dist_matrix, directed, 'FW')
graph_py = floyd_warshall_slow(dist_matrix.copy(), directed)
assert_array_almost_equal(graph_FW, graph_py)
def test_dijkstra():
dist_matrix = generate_graph(20)
for directed in (True, False):
graph_D = graph_shortest_path(dist_matrix, directed, 'D')
graph_py = floyd_warshall_slow(dist_matrix.copy(), directed)
assert_array_almost_equal(graph_D, graph_py)
def test_shortest_path():
dist_matrix = generate_graph(20)
# We compare path length and not costs (-> set distances to 0 or 1)
dist_matrix[dist_matrix != 0] = 1
for directed in (True, False):
if not directed:
dist_matrix = np.minimum(dist_matrix, dist_matrix.T)
graph_py = floyd_warshall_slow(dist_matrix.copy(), directed)
for i in range(dist_matrix.shape[0]):
# Non-reachable nodes have distance 0 in graph_py
dist_dict = defaultdict(int)
dist_dict.update(single_source_shortest_path_length(dist_matrix,
i))
for j in range(graph_py[i].shape[0]):
assert_array_almost_equal(dist_dict[j], graph_py[i, j])
def test_dijkstra_bug_fix():
X = np.array([[0., 0., 4.],
[1., 0., 2.],
[0., 5., 0.]])
dist_FW = graph_shortest_path(X, directed=False, method='FW')
dist_D = graph_shortest_path(X, directed=False, method='D')
assert_array_almost_equal(dist_D, dist_FW)
if __name__ == '__main__':
import nose
nose.runmodule()
|
bsd-3-clause
|
loli/sklearn-ensembletrees
|
sklearn/datasets/tests/test_20news.py
|
42
|
2416
|
"""Test the 20news downloader, if the data is available."""
import numpy as np
import scipy.sparse as sp
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import SkipTest
from sklearn import datasets
def test_20news():
try:
data = datasets.fetch_20newsgroups(
subset='all', download_if_missing=False, shuffle=False)
except IOError:
raise SkipTest("Download 20 newsgroups to run this test")
# Extract a reduced dataset
data2cats = datasets.fetch_20newsgroups(
subset='all', categories=data.target_names[-1:-3:-1], shuffle=False)
# Check that the ordering of the target_names is the same
# as the ordering in the full dataset
assert_equal(data2cats.target_names,
data.target_names[-2:])
# Assert that we have only 0 and 1 as labels
assert_equal(np.unique(data2cats.target).tolist(), [0, 1])
# Check that the number of filenames is consistent with data/target
assert_equal(len(data2cats.filenames), len(data2cats.target))
assert_equal(len(data2cats.filenames), len(data2cats.data))
# Check that the first entry of the reduced dataset corresponds to
# the first entry of the corresponding category in the full dataset
entry1 = data2cats.data[0]
category = data2cats.target_names[data2cats.target[0]]
label = data.target_names.index(category)
entry2 = data.data[np.where(data.target == label)[0][0]]
assert_equal(entry1, entry2)
def test_20news_vectorized():
# This test is slow.
raise SkipTest("Test too slow.")
bunch = datasets.fetch_20newsgroups_vectorized(subset="train")
assert_true(sp.isspmatrix_csr(bunch.data))
assert_equal(bunch.data.shape, (11314, 107428))
assert_equal(bunch.target.shape[0], 11314)
assert_equal(bunch.data.dtype, np.float64)
bunch = datasets.fetch_20newsgroups_vectorized(subset="test")
assert_true(sp.isspmatrix_csr(bunch.data))
assert_equal(bunch.data.shape, (7532, 107428))
assert_equal(bunch.target.shape[0], 7532)
assert_equal(bunch.data.dtype, np.float64)
bunch = datasets.fetch_20newsgroups_vectorized(subset="all")
assert_true(sp.isspmatrix_csr(bunch.data))
assert_equal(bunch.data.shape, (11314 + 7532, 107428))
assert_equal(bunch.target.shape[0], 11314 + 7532)
assert_equal(bunch.data.dtype, np.float64)
|
bsd-3-clause
|
mitdbg/aurum-datadiscovery
|
DoD/evaluate-dod.py
|
1
|
22264
|
from knowledgerepr import fieldnetwork
from modelstore.elasticstore import StoreHandler
from DoD.dod import DoD
from DoD import data_processing_utils as dpu
from DoD import view_4c_analysis_baseline as v4c
from tqdm import tqdm
import pandas as pd
import os
import time
from collections import defaultdict
import pprint
pp = pprint.PrettyPrinter(indent=4)
def create_folder(base_folder, name):
op = base_folder + name
os.makedirs(op)
return op
def run_dod(dod, attrs, values, output_path, max_hops=2, name=None):
view_metadata_mapping = dict()
i = 0
perf_stats = dict()
st_runtime = time.time()
for mjp, attrs_project, metadata in dod.virtual_schema_iterative_search(attrs, values, perf_stats, max_hops=max_hops,
debug_enumerate_all_jps=False):
proj_view = dpu.project(mjp, attrs_project)
if output_path is not None:
view_path = output_path + "/view_" + str(i)
proj_view.to_csv(view_path, encoding='latin1', index=False) # always store this
# store metadata associated to that view
view_metadata_mapping[view_path] = metadata
i += 1
et_runtime = time.time()
perf_stats['et_runtime'] = (et_runtime - st_runtime)
print("#$# " + str(name))
print("#$# ")
print("")
pp.pprint(perf_stats)
total_join_graphs = sum(perf_stats['num_join_graphs_per_candidate_group'])
total_materializable_join_graphs = sum(perf_stats['materializable_join_graphs'])
print("Total join graphs: " + str(total_join_graphs))
print("Total materializable join graphs: " + str(total_materializable_join_graphs))
print("")
print("Total views: " + str(i))
print("#$# ")
def assemble_views():
# have a way of generating the views for each query-view in a different folder
# for qv_name, qv_attr, qv_values in tqdm(query_view_definitions_many):
# print("Running query: " + str(qv_name))
# # Create a folder for each query-view
# output_path = create_folder(eval_folder, "many/" + qv_name)
# print("Out path: " + str(output_path))
# run_dod(dod, qv_attr, qv_values, output_path=output_path)
#
# for qv_name, qv_attr, qv_values in tqdm(query_view_definitions_few):
# print("Running query: " + str(qv_name))
# # Create a folder for each query-view
# output_path = create_folder(eval_folder, "few/" + qv_name)
# print("Out path: " + str(output_path))
# run_dod(dod, qv_attr, qv_values, output_path=output_path)
for qv_name, qv_attr, qv_values in tqdm(query_view_definitions_chembl):
print("Running query: " + str(qv_name))
# Create a folder for each query-view
output_path = create_folder(eval_folder, "chembl/" + qv_name)
print("Out path: " + str(output_path))
run_dod(dod, qv_attr, qv_values, output_path=output_path)
def measure_dod_performance(qv_name, qv_attr, qv_values):
# for qv_name, qv_attr, qv_values in tqdm(query_view_definitions_many):
print("Running query: " + str(qv_name))
# Create a folder for each query-view
# output_path = create_folder(eval_folder, "many/" + qv_name)
output_path = None
print("Out path: " + str(output_path))
run_dod(dod, qv_attr, qv_values, output_path=output_path, name=qv_name)
def run_4c(path):
groups_per_column_cardinality = v4c.main(path)
return groups_per_column_cardinality
def run_4c_nochasing(path):
groups_per_column_cardinality = v4c.nochasing_main(path)
return groups_per_column_cardinality
def run_4c_valuewise_main(path):
groups_per_column_cardinality = v4c.valuewise_main(path)
return groups_per_column_cardinality
def brancher(groups_per_column_cardinality):
"""
Given the 4C output, determine how many interactions this demands
:param groups_per_column_cardinality:
:return:
"""
# interactions_per_group_optimistic = []
pruned_groups_per_column_cardinality = defaultdict(dict)
human_selection = 0
for k, v in groups_per_column_cardinality.items():
compatible_groups = v['compatible']
contained_groups = v['contained']
complementary_group = v['complementary']
complementary_group = [(a, b, "", "") for a, b, _, _ in complementary_group]
contradictory_group = v['contradictory']
# Optimistic path
contradictions = defaultdict(list)
if len(contradictory_group) > 0:
for path1, _, _, path2 in contradictory_group:
if path1 not in contradictions:
contradictions[path1].append(path2)
if path2 not in contradictions:
contradictions[path2].append(path1)
if path1 not in contradictions[path2]:
contradictions[path2].append(path1)
if path2 not in contradictions[path1]:
contradictions[path1].append(path2)
# Now we sort contradictions by value length. Second sort key for determinism
contradictions = sorted(contradictions.items(), key=lambda x: (len(x[1]), x[0]), reverse=True)
if len(contradictions) > 0:
# Now we loop per each contradiction, after making a decision we prune space of views
while len(contradictions) > 0:
human_selection += 1
pruned_compatible_groups = []
pruned_contained_groups = []
pruned_complementary_groups = []
path1, path2 = contradictions.pop()
# We assume path1 is good. Therefore, path2 tables are bad. Prune away all path2
for cg in compatible_groups:
valid = True
for p2 in path2:
if p2 in set(cg):
# remove this compatible group
valid = False
break # cg is not valid
if valid:
pruned_compatible_groups.append(cg)
for contg in tqdm(contained_groups):
valid = True
for p2 in path2:
if p2 in set(contg):
valid = False
break
if valid:
pruned_contained_groups.append(contg)
invalid_paths = set(path2) # assist lookup for next two blocks
for compg in complementary_group:
compp1, compp2, _, _ = compg
if compp1 not in invalid_paths and compp2 not in invalid_paths:
pruned_complementary_groups.append((compp1, compp2, "", ""))
pruned_contradiction_group = [] # remove those with keys in invalid group
for other_path1, other_path2 in contradictions:
if other_path1 not in invalid_paths: # only check the key
pruned_contradiction_group.append((other_path1, other_path2))
# update all groups with the pruned versions
contradictions = [el for el in pruned_contradiction_group]
compatible_groups = [el for el in pruned_compatible_groups]
contained_groups = [el for el in pruned_contained_groups]
complementary_group = [(a, b, "", "") for a, b, _, _ in pruned_complementary_groups]
# Now removed contained views
# 1- from complementary groups
contained_views = set() # all contained views across contained groups
for contained_group in contained_groups:
if len(contained_group) >= 2:
contained_views.update(set(contained_group[1:]))
pruned_complementary_groups = []
for compp1, compp2, _, _ in complementary_group:
if compp1 not in contained_views and compp2 not in contained_views:
pruned_complementary_groups.append((compp1, compp2, "", ""))
complementary_group = [(a, b, "", "") for a, b, _, _ in pruned_complementary_groups]
# 2- from contanied groups
pruned_compatible_groups = []
for cg in compatible_groups:
valid = True
for el in cg:
if el in contained_views:
# remove this compatible group
valid = False
break # cg is not valid
if valid:
pruned_compatible_groups.append(cg)
compatible_groups = [el for el in pruned_compatible_groups]
# Now union complementary with compatible and coalesce contained with compatible
compatible_views = set()
pruned_complementary_groups = []
for cg in compatible_groups:
compatible_views.update(cg)
for compp1, compp2, _, _ in complementary_group:
if compp1 not in compatible_views and compp2 not in compatible_views:
pruned_complementary_groups.append((compp1, compp2, "", ""))
complementary_group = [(a, b, "", "") for a, b, _, _ in pruned_complementary_groups]
pruned_contained_groups = []
for contained_group in contained_groups:
if contained_group[0] not in compatible_views:
pruned_contained_groups.append(contained_group)
contained_groups = [el for el in pruned_contained_groups]
pruned_groups_per_column_cardinality[k]['compatible'] = compatible_groups
pruned_groups_per_column_cardinality[k]['contained'] = contained_groups
pruned_groups_per_column_cardinality[k]['complementary'] = complementary_group
pruned_groups_per_column_cardinality[k]['contradictory'] = {p1: p2 for p1, p2 in contradictions}
return pruned_groups_per_column_cardinality, human_selection
def summarize_4c_output(groups_per_column_cardinality, schema_id_info):
interactions_per_group = []
for k, v in groups_per_column_cardinality.items():
print("")
print("Analyzing group with columns = " + str(k))
print("")
compatible_groups = v['compatible']
contained_groups = v['contained']
complementary_group = v['complementary']
contradictory_group = v['contradictory']
# summary complements:
complementary_summary = defaultdict(set)
for compg in complementary_group:
compp1, compp2, _, _ = compg
if compp1 in complementary_summary:
complementary_summary[compp1].add(compp2)
elif compp2 in complementary_group:
complementary_summary[compp2].add(compp1)
else:
complementary_summary[compp1].add(compp2)
total_interactions = len(compatible_groups) + len(contained_groups) \
+ len(complementary_summary.keys()) + len(contradictory_group)
interactions_per_group.append((schema_id_info[k], total_interactions))
return interactions_per_group
def output_4c_results(groups_per_column_cardinality):
print("RESULTS: ")
for k, v in groups_per_column_cardinality.items():
print("")
print("Analyzing group with columns = " + str(k))
print("")
compatible_groups = v['compatible']
contained_groups = v['contained']
complementary_group = v['complementary']
contradictory_group = v['contradictory']
print("Compatible views: " + str(len(compatible_groups)))
print("Contained views: " + str(len(contained_groups)))
s_containments = dict()
if len(contained_groups) > 0:
containments = defaultdict(set)
for contg in contained_groups:
contains, contained = contg[0], contg[1:]
containments[contains].update(contained)
# now summarize dict
to_summarize = set()
for k, v in containments.items():
for k2, v2 in containments.items():
if k == k2:
continue
if k in v2:
to_summarize.add(k)
containments[k2].update(v) # add containments of k to k2
for k, v in containments.items():
if k not in to_summarize:
s_containments[k] = v
for k, v in s_containments.items():
print(str(k) + " contains: " + str(v))
print("Complementary views: " + str(len(complementary_group)))
if len(complementary_group) > 0:
for p1, p2, _, _ in complementary_group:
print(str(p1) + " is complementary with: " + str(p2))
print("Contradictory views: " + str(len(contradictory_group)))
if len(contradictory_group) > 0:
contradictions = defaultdict(lambda: defaultdict(list))
for path1, k, contradictory_key1, path2 in contradictory_group:
if path1 not in contradictions and path2 not in contradictions:
contradictions[path1][(k, contradictory_key1)].append(path2)
elif path1 in contradictions:
if path2 not in contradictions[path1][(k, contradictory_key1)]:
contradictions[path1][(k, contradictory_key1)].append(path2)
elif path2 in contradictions:
if path1 not in contradictions[path2][(k, contradictory_key1)]:
contradictions[path2][(k, contradictory_key1)].append(path1)
# print(path1 + " contradicts: " + path2 + " when " + str(k) + " = " + str(contradictory_key1))
# contradictions_ordered = sorted(contradictions.items(), key=lambda x: len(x[0][x[1]]), reverse=True)
for k, v in contradictions.items():
for contradiction_value, tables in v.items():
attr_k, value_k = contradiction_value
print(k + " contradicts: " + str(len(tables)) + " tables when " +
str(attr_k) + " = " + str(value_k))
print("Summarized contradictions: " + str(len(set(contradictions.keys()))))
# print("Relevant contradictions: " + str(len([k for k, _ in contradictions.items() if k not in s_containments])))
for k, v in contradictions.items():
if k not in s_containments:
for contradiction_value, tables in v.items():
attr_k, value_k = contradiction_value
print(k + " contradicts: " + str(len(tables)) + " tables when " +
str(attr_k) + " = " + str(value_k))
print("Relevant contradictions: " + str(
len(set([k for k, _ in contradictions.items() if k not in s_containments]))))
def compare_4c_baselines(many_views, few_views):
for num_views, path in tqdm(many_views):
print("#$# " + str(path))
s = time.time()
run_4c(path)
e = time.time()
print("#$# Chasing")
print("#$# " + str(num_views) + " " + str((e-s)))
s = time.time()
run_4c_nochasing(path)
e = time.time()
print("#$# No Chasing")
print("#$# " + str(num_views) + " " + str((e - s)))
for num_views, path in tqdm(few_views):
print("#$# " + str(path))
s = time.time()
run_4c(path)
e = time.time()
print("#$# Chasing")
print("#$# " + str(num_views) + " " + str((e - s)))
s = time.time()
run_4c_nochasing(path)
e = time.time()
print("#$# No Chasing")
print("#$# " + str(num_views) + " " + str((e - s)))
# s = time.time()
# run_4c_valuewise_main(path)
# e = time.time()
# print("Value Wise: " + str((e - s)))
def eval_sampling_join():
sep = ';'
base = "/Users/ra-mit/data/chembl_21/chembl/"
r1 = 'public.assays.csv'
r2 = 'public.activities.csv'
a1 = 'assay_id'
a2 = 'assay_id'
# have pairs of tables to join as input -- large tables, which is when this makes sense
# read tables in memory - dataframes
df1 = pd.read_csv(base + r1, encoding='latin1', sep=sep)
df2 = pd.read_csv(base + r2, encoding='latin1', sep=sep)
s = time.time()
# perform materialize, and sampling-materialize (with a given % sample size?)
df_a = dpu.join_ab_on_key(df1, df2, a1, a2, suffix_str='_x')
e = time.time()
print("Join: " + str((e-s)))
# force gc
import gc
df_a = None
gc.collect()
time.sleep(15)
s = time.time()
# sampling
sample_size = 1000
l, r = dpu.apply_consistent_sample(df1, df2, a1, a2, sample_size)
df_b = dpu.join_ab_on_key(l, r, a1, a2, normalize=False, suffix_str='_x')
e = time.time()
print("s-Join: " + str((e - s)))
return
if __name__ == "__main__":
print("DoD evaluation")
# eval_sampling_join()
# exit()
# Eval parameters
eval_folder = "dod_evaluation/vassembly/"
query_view_definitions_many = [
("qv2", ["Building Name Long", "Ext Gross Area", "Building Room", "Room Square Footage"],
["", "", "", ""]),
("qv4", ["Email Address", "Department Full Name"],
["[email protected]", ""]),
("qv5", ["Last Name", "Building Name", "Bldg Gross Square Footage", "Department Name"],
["", "", "", ""])
]
query_view_definitions_few = [
("qv1", ["Iap Category Name", "Person Name", "Person Email"],
["Engineering", "", ""]),
("qv3", ["Last Name", "Building Name", "Bldg Gross Square Footage", "Department Name"],
["Madden", "Ray and Maria Stata Center", "", "Dept of Electrical Engineering & Computer Science"]),
]
query_view_definitions_chembl = [
("qv1", ['assay_test_type', 'assay_category', 'journal', 'year', 'volume'],
['', '', '', '', '']),
("qv2", ['accession', 'sequence', 'organism', 'start_position', 'end_position'],
['O09028', '', 'Rattus norvegicus', '', '']),
("qv3", ['ref_type', 'ref_url', 'enzyme_name', 'organism'],
['', '', '', '']),
("qv4", ['hba', 'hbd', 'parenteral', 'topical'],
['', '', '', '']),
("qv5", ['accession', 'sequence', 'organism', 'start_position', 'end_position'],
['', '', '', '', ''])
]
# Configure DoD
# path_to_serialized_model = "/Users/ra-mit/development/discovery_proto/models/mitdwh/"
path_to_serialized_model = "/Users/ra-mit/development/discovery_proto/models/chembl_and_drugcentral/"
# sep = ","
sep = ";"
store_client = StoreHandler()
network = fieldnetwork.deserialize_network(path_to_serialized_model)
dod = DoD(network=network, store_client=store_client, csv_separator=sep)
# 0- Assemble views for query views. To have raw number of views
# assemble_views()
#
# exit()
# 1- measure dod performance
# qv_name, qv_attr, qv_values = query_view_definitions_many[2]
# print(qv_name)
# print(qv_attr)
# print(qv_values)
# measure_dod_performance(qv_name, qv_attr, qv_values)
# 1.5- then have a way for calling 4c on each folder -- on all folders. To compare savings (create strategy here)
path = "dod_evaluation/vassembly/chembl/qv5/"
groups_per_column_cardinality, schema_id_info = run_4c(path)
import pickle
with open("./tmp-4c-serial", 'wb') as f:
pickle.dump(groups_per_column_cardinality, f)
pickle.dump(schema_id_info, f)
# with open("./tmp-4c-serial", 'rb') as f:
# groups_per_column_cardinality = pickle.load(f)
# schema_id_info = pickle.load(f)
# print("!!!")
# for k, v in groups_per_column_cardinality.items():
# print(k)
# compatible_groups = v['compatible']
# contained_groups = v['contained']
# complementary_group = v['complementary']
# contradictory_group = v['contradictory']
# print("Compatible: " + str(len(compatible_groups)))
# print("Contained: " + str(len(contained_groups)))
# print("Complementary: " + str(len(complementary_group)))
# print("Contradictory: " + str(len(contradictory_group)))
# print("!!!")
# output_4c_results(groups_per_column_cardinality)
# print("")
# print("")
# print("PRUNING...")
# print("")
# print("")
pruned_groups_per_column_cardinality, human_selection = brancher(groups_per_column_cardinality)
#
# print("!!!")
# for k, v in pruned_groups_per_column_cardinality.items():
# print(k)
# compatible_groups = v['compatible']
# contained_groups = v['contained']
# complementary_group = v['complementary']
# contradictory_group = v['contradictory']
# print("Compatible: " + str(len(compatible_groups)))
# print("Contained: " + str(len(contained_groups)))
# print("Complementary: " + str(len(complementary_group)))
# print("Contradictory: " + str(len(contradictory_group)))
# print("!!!")
#
i_per_group = summarize_4c_output(pruned_groups_per_column_cardinality, schema_id_info)
# #
# # print("Pruned!!!")
# # pp.pprint(pruned_groups_per_column_cardinality)
print("Total interactions: " + str(sorted(i_per_group, key=lambda x: x[0], reverse=True)))
print("+ human selections: " + str(human_selection))
exit()
# 2- 4c efficienty
# 2.1- with many views to show advantage with respect to other less sophisticated baselines
# 2.2- with few views to show that the overhead it adds is negligible
# path1 = "dod_evaluation/vassembly/many/qv4/"
# path2 = "dod_evaluation/vassembly/many/qv2-50/"
# path3 = "dod_evaluation/vassembly/many/qv5/"
# path4 = "dod_evaluation/vassembly/few/qv1/"
# path5 = "dod_evaluation/vassembly/few/qv3/"
# # compare_4c_baselines(many_views=[('9', path1), ('177', path2), ('99', path3)],
# # few_views=[('2', path4), ('2', path5)])
path = "dod_evaluation/vassembly/many/qv5/"
compare_4c_baselines(many_views=[('12', path)],
few_views=[])
# 3- Measure average time per join attempt. Add total times as well
|
mit
|
louisLouL/pair_trading
|
capstone_env/lib/python3.6/site-packages/pandas/tests/io/json/test_json_table_schema.py
|
9
|
18572
|
"""Tests for Table Schema integration."""
import json
from collections import OrderedDict
import numpy as np
import pandas as pd
import pytest
from pandas import DataFrame
from pandas.core.dtypes.dtypes import (
PeriodDtype, CategoricalDtype, DatetimeTZDtype)
from pandas.io.json.table_schema import (
as_json_table_type,
build_table_schema,
make_field,
set_default_names)
class TestBuildSchema(object):
def setup_method(self, method):
self.df = DataFrame(
{'A': [1, 2, 3, 4],
'B': ['a', 'b', 'c', 'c'],
'C': pd.date_range('2016-01-01', freq='d', periods=4),
'D': pd.timedelta_range('1H', periods=4, freq='T'),
},
index=pd.Index(range(4), name='idx'))
def test_build_table_schema(self):
result = build_table_schema(self.df, version=False)
expected = {
'fields': [{'name': 'idx', 'type': 'integer'},
{'name': 'A', 'type': 'integer'},
{'name': 'B', 'type': 'string'},
{'name': 'C', 'type': 'datetime'},
{'name': 'D', 'type': 'duration'},
],
'primaryKey': ['idx']
}
assert result == expected
result = build_table_schema(self.df)
assert "pandas_version" in result
def test_series(self):
s = pd.Series([1, 2, 3], name='foo')
result = build_table_schema(s, version=False)
expected = {'fields': [{'name': 'index', 'type': 'integer'},
{'name': 'foo', 'type': 'integer'}],
'primaryKey': ['index']}
assert result == expected
result = build_table_schema(s)
assert 'pandas_version' in result
def tets_series_unnamed(self):
result = build_table_schema(pd.Series([1, 2, 3]), version=False)
expected = {'fields': [{'name': 'index', 'type': 'integer'},
{'name': 'values', 'type': 'integer'}],
'primaryKey': ['index']}
assert result == expected
def test_multiindex(self):
df = self.df.copy()
idx = pd.MultiIndex.from_product([('a', 'b'), (1, 2)])
df.index = idx
result = build_table_schema(df, version=False)
expected = {
'fields': [{'name': 'level_0', 'type': 'string'},
{'name': 'level_1', 'type': 'integer'},
{'name': 'A', 'type': 'integer'},
{'name': 'B', 'type': 'string'},
{'name': 'C', 'type': 'datetime'},
{'name': 'D', 'type': 'duration'},
],
'primaryKey': ['level_0', 'level_1']
}
assert result == expected
df.index.names = ['idx0', None]
expected['fields'][0]['name'] = 'idx0'
expected['primaryKey'] = ['idx0', 'level_1']
result = build_table_schema(df, version=False)
assert result == expected
class TestTableSchemaType(object):
def test_as_json_table_type_int_data(self):
int_data = [1, 2, 3]
int_types = [np.int, np.int16, np.int32, np.int64]
for t in int_types:
assert as_json_table_type(np.array(
int_data, dtype=t)) == 'integer'
def test_as_json_table_type_float_data(self):
float_data = [1., 2., 3.]
float_types = [np.float, np.float16, np.float32, np.float64]
for t in float_types:
assert as_json_table_type(np.array(
float_data, dtype=t)) == 'number'
def test_as_json_table_type_bool_data(self):
bool_data = [True, False]
bool_types = [bool, np.bool]
for t in bool_types:
assert as_json_table_type(np.array(
bool_data, dtype=t)) == 'boolean'
def test_as_json_table_type_date_data(self):
date_data = [pd.to_datetime(['2016']),
pd.to_datetime(['2016'], utc=True),
pd.Series(pd.to_datetime(['2016'])),
pd.Series(pd.to_datetime(['2016'], utc=True)),
pd.period_range('2016', freq='A', periods=3)]
for arr in date_data:
assert as_json_table_type(arr) == 'datetime'
def test_as_json_table_type_string_data(self):
strings = [pd.Series(['a', 'b']), pd.Index(['a', 'b'])]
for t in strings:
assert as_json_table_type(t) == 'string'
def test_as_json_table_type_categorical_data(self):
assert as_json_table_type(pd.Categorical(['a'])) == 'any'
assert as_json_table_type(pd.Categorical([1])) == 'any'
assert as_json_table_type(pd.Series(pd.Categorical([1]))) == 'any'
assert as_json_table_type(pd.CategoricalIndex([1])) == 'any'
assert as_json_table_type(pd.Categorical([1])) == 'any'
# ------
# dtypes
# ------
def test_as_json_table_type_int_dtypes(self):
integers = [np.int, np.int16, np.int32, np.int64]
for t in integers:
assert as_json_table_type(t) == 'integer'
def test_as_json_table_type_float_dtypes(self):
floats = [np.float, np.float16, np.float32, np.float64]
for t in floats:
assert as_json_table_type(t) == 'number'
def test_as_json_table_type_bool_dtypes(self):
bools = [bool, np.bool]
for t in bools:
assert as_json_table_type(t) == 'boolean'
def test_as_json_table_type_date_dtypes(self):
# TODO: datedate.date? datetime.time?
dates = [np.datetime64, np.dtype("<M8[ns]"), PeriodDtype(),
DatetimeTZDtype('ns', 'US/Central')]
for t in dates:
assert as_json_table_type(t) == 'datetime'
def test_as_json_table_type_timedelta_dtypes(self):
durations = [np.timedelta64, np.dtype("<m8[ns]")]
for t in durations:
assert as_json_table_type(t) == 'duration'
def test_as_json_table_type_string_dtypes(self):
strings = [object] # TODO
for t in strings:
assert as_json_table_type(t) == 'string'
def test_as_json_table_type_categorical_dtypes(self):
assert as_json_table_type(pd.Categorical) == 'any'
assert as_json_table_type(CategoricalDtype()) == 'any'
class TestTableOrient(object):
def setup_method(self, method):
self.df = DataFrame(
{'A': [1, 2, 3, 4],
'B': ['a', 'b', 'c', 'c'],
'C': pd.date_range('2016-01-01', freq='d', periods=4),
'D': pd.timedelta_range('1H', periods=4, freq='T'),
'E': pd.Series(pd.Categorical(['a', 'b', 'c', 'c'])),
'F': pd.Series(pd.Categorical(['a', 'b', 'c', 'c'],
ordered=True)),
'G': [1., 2., 3, 4.],
'H': pd.date_range('2016-01-01', freq='d', periods=4,
tz='US/Central'),
},
index=pd.Index(range(4), name='idx'))
def test_build_series(self):
s = pd.Series([1, 2], name='a')
s.index.name = 'id'
result = s.to_json(orient='table', date_format='iso')
result = json.loads(result, object_pairs_hook=OrderedDict)
assert "pandas_version" in result['schema']
result['schema'].pop('pandas_version')
fields = [{'name': 'id', 'type': 'integer'},
{'name': 'a', 'type': 'integer'}]
schema = {
'fields': fields,
'primaryKey': ['id'],
}
expected = OrderedDict([
('schema', schema),
('data', [OrderedDict([('id', 0), ('a', 1)]),
OrderedDict([('id', 1), ('a', 2)])])])
assert result == expected
def test_to_json(self):
df = self.df.copy()
df.index.name = 'idx'
result = df.to_json(orient='table', date_format='iso')
result = json.loads(result, object_pairs_hook=OrderedDict)
assert "pandas_version" in result['schema']
result['schema'].pop('pandas_version')
fields = [
{'name': 'idx', 'type': 'integer'},
{'name': 'A', 'type': 'integer'},
{'name': 'B', 'type': 'string'},
{'name': 'C', 'type': 'datetime'},
{'name': 'D', 'type': 'duration'},
{'constraints': {'enum': ['a', 'b', 'c']},
'name': 'E',
'ordered': False,
'type': 'any'},
{'constraints': {'enum': ['a', 'b', 'c']},
'name': 'F',
'ordered': True,
'type': 'any'},
{'name': 'G', 'type': 'number'},
{'name': 'H', 'type': 'datetime', 'tz': 'US/Central'}
]
schema = {
'fields': fields,
'primaryKey': ['idx'],
}
data = [
OrderedDict([('idx', 0), ('A', 1), ('B', 'a'),
('C', '2016-01-01T00:00:00.000Z'),
('D', 'P0DT1H0M0S'),
('E', 'a'), ('F', 'a'), ('G', 1.),
('H', '2016-01-01T06:00:00.000Z')
]),
OrderedDict([('idx', 1), ('A', 2), ('B', 'b'),
('C', '2016-01-02T00:00:00.000Z'),
('D', 'P0DT1H1M0S'),
('E', 'b'), ('F', 'b'), ('G', 2.),
('H', '2016-01-02T06:00:00.000Z')
]),
OrderedDict([('idx', 2), ('A', 3), ('B', 'c'),
('C', '2016-01-03T00:00:00.000Z'),
('D', 'P0DT1H2M0S'),
('E', 'c'), ('F', 'c'), ('G', 3.),
('H', '2016-01-03T06:00:00.000Z')
]),
OrderedDict([('idx', 3), ('A', 4), ('B', 'c'),
('C', '2016-01-04T00:00:00.000Z'),
('D', 'P0DT1H3M0S'),
('E', 'c'), ('F', 'c'), ('G', 4.),
('H', '2016-01-04T06:00:00.000Z')
]),
]
expected = OrderedDict([('schema', schema), ('data', data)])
assert result == expected
def test_to_json_float_index(self):
data = pd.Series(1, index=[1., 2.])
result = data.to_json(orient='table', date_format='iso')
result = json.loads(result, object_pairs_hook=OrderedDict)
result['schema'].pop('pandas_version')
expected = (
OrderedDict([('schema', {
'fields': [{'name': 'index', 'type': 'number'},
{'name': 'values', 'type': 'integer'}],
'primaryKey': ['index']
}),
('data', [OrderedDict([('index', 1.0), ('values', 1)]),
OrderedDict([('index', 2.0), ('values', 1)])])])
)
assert result == expected
def test_to_json_period_index(self):
idx = pd.period_range('2016', freq='Q-JAN', periods=2)
data = pd.Series(1, idx)
result = data.to_json(orient='table', date_format='iso')
result = json.loads(result, object_pairs_hook=OrderedDict)
result['schema'].pop('pandas_version')
fields = [{'freq': 'Q-JAN', 'name': 'index', 'type': 'datetime'},
{'name': 'values', 'type': 'integer'}]
schema = {'fields': fields, 'primaryKey': ['index']}
data = [OrderedDict([('index', '2015-11-01T00:00:00.000Z'),
('values', 1)]),
OrderedDict([('index', '2016-02-01T00:00:00.000Z'),
('values', 1)])]
expected = OrderedDict([('schema', schema), ('data', data)])
assert result == expected
def test_to_json_categorical_index(self):
data = pd.Series(1, pd.CategoricalIndex(['a', 'b']))
result = data.to_json(orient='table', date_format='iso')
result = json.loads(result, object_pairs_hook=OrderedDict)
result['schema'].pop('pandas_version')
expected = (
OrderedDict([('schema',
{'fields': [{'name': 'index', 'type': 'any',
'constraints': {'enum': ['a', 'b']},
'ordered': False},
{'name': 'values', 'type': 'integer'}],
'primaryKey': ['index']}),
('data', [
OrderedDict([('index', 'a'),
('values', 1)]),
OrderedDict([('index', 'b'), ('values', 1)])])])
)
assert result == expected
def test_date_format_raises(self):
with pytest.raises(ValueError):
self.df.to_json(orient='table', date_format='epoch')
# others work
self.df.to_json(orient='table', date_format='iso')
self.df.to_json(orient='table')
def test_make_field_int(self):
data = [1, 2, 3]
kinds = [pd.Series(data, name='name'), pd.Index(data, name='name')]
for kind in kinds:
result = make_field(kind)
expected = {"name": "name", "type": 'integer'}
assert result == expected
def test_make_field_float(self):
data = [1., 2., 3.]
kinds = [pd.Series(data, name='name'), pd.Index(data, name='name')]
for kind in kinds:
result = make_field(kind)
expected = {"name": "name", "type": 'number'}
assert result == expected
def test_make_field_datetime(self):
data = [1., 2., 3.]
kinds = [pd.Series(pd.to_datetime(data), name='values'),
pd.to_datetime(data)]
for kind in kinds:
result = make_field(kind)
expected = {"name": "values", "type": 'datetime'}
assert result == expected
kinds = [pd.Series(pd.to_datetime(data, utc=True), name='values'),
pd.to_datetime(data, utc=True)]
for kind in kinds:
result = make_field(kind)
expected = {"name": "values", "type": 'datetime', "tz": "UTC"}
assert result == expected
arr = pd.period_range('2016', freq='A-DEC', periods=4)
result = make_field(arr)
expected = {"name": "values", "type": 'datetime', "freq": "A-DEC"}
assert result == expected
def test_make_field_categorical(self):
data = ['a', 'b', 'c']
ordereds = [True, False]
for ordered in ordereds:
arr = pd.Series(pd.Categorical(data, ordered=ordered), name='cats')
result = make_field(arr)
expected = {"name": "cats", "type": "any",
"constraints": {"enum": data},
"ordered": ordered}
assert result == expected
arr = pd.CategoricalIndex(data, ordered=ordered, name='cats')
result = make_field(arr)
expected = {"name": "cats", "type": "any",
"constraints": {"enum": data},
"ordered": ordered}
assert result == expected
def test_categorical(self):
s = pd.Series(pd.Categorical(['a', 'b', 'a']))
s.index.name = 'idx'
result = s.to_json(orient='table', date_format='iso')
result = json.loads(result, object_pairs_hook=OrderedDict)
result['schema'].pop('pandas_version')
fields = [{'name': 'idx', 'type': 'integer'},
{'constraints': {'enum': ['a', 'b']},
'name': 'values',
'ordered': False,
'type': 'any'}]
expected = OrderedDict([
('schema', {'fields': fields,
'primaryKey': ['idx']}),
('data', [OrderedDict([('idx', 0), ('values', 'a')]),
OrderedDict([('idx', 1), ('values', 'b')]),
OrderedDict([('idx', 2), ('values', 'a')])])])
assert result == expected
def test_set_default_names_unset(self):
data = pd.Series(1, pd.Index([1]))
result = set_default_names(data)
assert result.index.name == 'index'
def test_set_default_names_set(self):
data = pd.Series(1, pd.Index([1], name='myname'))
result = set_default_names(data)
assert result.index.name == 'myname'
def test_set_default_names_mi_unset(self):
data = pd.Series(
1, pd.MultiIndex.from_product([('a', 'b'), ('c', 'd')]))
result = set_default_names(data)
assert result.index.names == ['level_0', 'level_1']
def test_set_default_names_mi_set(self):
data = pd.Series(
1, pd.MultiIndex.from_product([('a', 'b'), ('c', 'd')],
names=['n1', 'n2']))
result = set_default_names(data)
assert result.index.names == ['n1', 'n2']
def test_set_default_names_mi_partion(self):
data = pd.Series(
1, pd.MultiIndex.from_product([('a', 'b'), ('c', 'd')],
names=['n1', None]))
result = set_default_names(data)
assert result.index.names == ['n1', 'level_1']
def test_timestamp_in_columns(self):
df = pd.DataFrame([[1, 2]], columns=[pd.Timestamp('2016'),
pd.Timedelta(10, unit='s')])
result = df.to_json(orient="table")
js = json.loads(result)
assert js['schema']['fields'][1]['name'] == 1451606400000
assert js['schema']['fields'][2]['name'] == 10000
def test_overlapping_names(self):
cases = [
pd.Series([1], index=pd.Index([1], name='a'), name='a'),
pd.DataFrame({"A": [1]}, index=pd.Index([1], name="A")),
pd.DataFrame({"A": [1]}, index=pd.MultiIndex.from_arrays([
['a'], [1]
], names=["A", "a"])),
]
for data in cases:
with pytest.raises(ValueError) as excinfo:
data.to_json(orient='table')
assert 'Overlapping' in str(excinfo.value)
def test_mi_falsey_name(self):
# GH 16203
df = pd.DataFrame(np.random.randn(4, 4),
index=pd.MultiIndex.from_product([('A', 'B'),
('a', 'b')]))
result = [x['name'] for x in build_table_schema(df)['fields']]
assert result == ['level_0', 'level_1', 0, 1, 2, 3]
|
mit
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.