repo_name
stringlengths 6
112
| path
stringlengths 4
204
| copies
stringlengths 1
3
| size
stringlengths 4
6
| content
stringlengths 714
810k
| license
stringclasses 15
values |
---|---|---|---|---|---|
lthurlow/Network-Grapher | proj/external/matplotlib-1.2.1/doc/mpl_examples/event_handling/viewlims.py | 3 | 2924 | # Creates two identical panels. Zooming in on the right panel will show
# a rectangle in the first panel, denoting the zoomed region.
import numpy as np
import matplotlib.pyplot as plt
from matplotlib.patches import Rectangle
# We just subclass Rectangle so that it can be called with an Axes
# instance, causing the rectangle to update its shape to match the
# bounds of the Axes
class UpdatingRect(Rectangle):
def __call__(self, ax):
self.set_bounds(*ax.viewLim.bounds)
ax.figure.canvas.draw_idle()
# A class that will regenerate a fractal set as we zoom in, so that you
# can actually see the increasing detail. A box in the left panel will show
# the area to which we are zoomed.
class MandlebrotDisplay(object):
def __init__(self, h=500, w=500, niter=50, radius=2., power=2):
self.height = h
self.width = w
self.niter = niter
self.radius = radius
self.power = power
def __call__(self, xstart, xend, ystart, yend):
self.x = np.linspace(xstart, xend, self.width)
self.y = np.linspace(ystart, yend, self.height).reshape(-1,1)
c = self.x + 1.0j * self.y
threshold_time = np.zeros((self.height, self.width))
z = np.zeros(threshold_time.shape, dtype=np.complex)
mask = np.ones(threshold_time.shape, dtype=np.bool)
for i in range(self.niter):
z[mask] = z[mask]**self.power + c[mask]
mask = (np.abs(z) < self.radius)
threshold_time += mask
return threshold_time
def ax_update(self, ax):
ax.set_autoscale_on(False) # Otherwise, infinite loop
#Get the number of points from the number of pixels in the window
dims = ax.axesPatch.get_window_extent().bounds
self.width = int(dims[2] + 0.5)
self.height = int(dims[2] + 0.5)
#Get the range for the new area
xstart,ystart,xdelta,ydelta = ax.viewLim.bounds
xend = xstart + xdelta
yend = ystart + ydelta
# Update the image object with our new data and extent
im = ax.images[-1]
im.set_data(self.__call__(xstart, xend, ystart, yend))
im.set_extent((xstart, xend, ystart, yend))
ax.figure.canvas.draw_idle()
md = MandlebrotDisplay()
Z = md(-2., 0.5, -1.25, 1.25)
fig = plt.figure()
ax1 = fig.add_subplot(1, 2, 1)
ax1.imshow(Z, origin='lower', extent=(md.x.min(), md.x.max(), md.y.min(), md.y.max()))
ax2 = fig.add_subplot(1, 2, 2)
ax2.imshow(Z, origin='lower', extent=(md.x.min(), md.x.max(), md.y.min(), md.y.max()))
rect = UpdatingRect([0, 0], 0, 0, facecolor='None', edgecolor='black')
rect.set_bounds(*ax2.viewLim.bounds)
ax1.add_patch(rect)
# Connect for changing the view limits
ax2.callbacks.connect('xlim_changed', rect)
ax2.callbacks.connect('ylim_changed', rect)
ax2.callbacks.connect('xlim_changed', md.ax_update)
ax2.callbacks.connect('ylim_changed', md.ax_update)
plt.show()
| mit |
sinhrks/scikit-learn | sklearn/linear_model/tests/test_base.py | 19 | 12955 | # Author: Alexandre Gramfort <[email protected]>
# Fabian Pedregosa <[email protected]>
#
# License: BSD 3 clause
import numpy as np
from scipy import sparse
from scipy import linalg
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_equal
from sklearn.linear_model.base import LinearRegression
from sklearn.linear_model.base import center_data
from sklearn.linear_model.base import sparse_center_data
from sklearn.linear_model.base import _rescale_data
from sklearn.utils import check_random_state
from sklearn.utils.testing import assert_greater
from sklearn.datasets.samples_generator import make_sparse_uncorrelated
from sklearn.datasets.samples_generator import make_regression
def test_linear_regression():
# Test LinearRegression on a simple dataset.
# a simple dataset
X = [[1], [2]]
Y = [1, 2]
reg = LinearRegression()
reg.fit(X, Y)
assert_array_almost_equal(reg.coef_, [1])
assert_array_almost_equal(reg.intercept_, [0])
assert_array_almost_equal(reg.predict(X), [1, 2])
# test it also for degenerate input
X = [[1]]
Y = [0]
reg = LinearRegression()
reg.fit(X, Y)
assert_array_almost_equal(reg.coef_, [0])
assert_array_almost_equal(reg.intercept_, [0])
assert_array_almost_equal(reg.predict(X), [0])
def test_linear_regression_sample_weights():
# TODO: loop over sparse data as well
rng = np.random.RandomState(0)
# It would not work with under-determined systems
for n_samples, n_features in ((6, 5), ):
y = rng.randn(n_samples)
X = rng.randn(n_samples, n_features)
sample_weight = 1.0 + rng.rand(n_samples)
for intercept in (True, False):
# LinearRegression with explicit sample_weight
reg = LinearRegression(fit_intercept=intercept)
reg.fit(X, y, sample_weight=sample_weight)
coefs1 = reg.coef_
inter1 = reg.intercept_
assert_equal(reg.coef_.shape, (X.shape[1], )) # sanity checks
assert_greater(reg.score(X, y), 0.5)
# Closed form of the weighted least square
# theta = (X^T W X)^(-1) * X^T W y
W = np.diag(sample_weight)
if intercept is False:
X_aug = X
else:
dummy_column = np.ones(shape=(n_samples, 1))
X_aug = np.concatenate((dummy_column, X), axis=1)
coefs2 = linalg.solve(X_aug.T.dot(W).dot(X_aug),
X_aug.T.dot(W).dot(y))
if intercept is False:
assert_array_almost_equal(coefs1, coefs2)
else:
assert_array_almost_equal(coefs1, coefs2[1:])
assert_almost_equal(inter1, coefs2[0])
def test_raises_value_error_if_sample_weights_greater_than_1d():
# Sample weights must be either scalar or 1D
n_sampless = [2, 3]
n_featuress = [3, 2]
rng = np.random.RandomState(42)
for n_samples, n_features in zip(n_sampless, n_featuress):
X = rng.randn(n_samples, n_features)
y = rng.randn(n_samples)
sample_weights_OK = rng.randn(n_samples) ** 2 + 1
sample_weights_OK_1 = 1.
sample_weights_OK_2 = 2.
reg = LinearRegression()
# make sure the "OK" sample weights actually work
reg.fit(X, y, sample_weights_OK)
reg.fit(X, y, sample_weights_OK_1)
reg.fit(X, y, sample_weights_OK_2)
def test_fit_intercept():
# Test assertions on betas shape.
X2 = np.array([[0.38349978, 0.61650022],
[0.58853682, 0.41146318]])
X3 = np.array([[0.27677969, 0.70693172, 0.01628859],
[0.08385139, 0.20692515, 0.70922346]])
y = np.array([1, 1])
lr2_without_intercept = LinearRegression(fit_intercept=False).fit(X2, y)
lr2_with_intercept = LinearRegression(fit_intercept=True).fit(X2, y)
lr3_without_intercept = LinearRegression(fit_intercept=False).fit(X3, y)
lr3_with_intercept = LinearRegression(fit_intercept=True).fit(X3, y)
assert_equal(lr2_with_intercept.coef_.shape,
lr2_without_intercept.coef_.shape)
assert_equal(lr3_with_intercept.coef_.shape,
lr3_without_intercept.coef_.shape)
assert_equal(lr2_without_intercept.coef_.ndim,
lr3_without_intercept.coef_.ndim)
def test_linear_regression_sparse(random_state=0):
# Test that linear regression also works with sparse data
random_state = check_random_state(random_state)
for i in range(10):
n = 100
X = sparse.eye(n, n)
beta = random_state.rand(n)
y = X * beta[:, np.newaxis]
ols = LinearRegression()
ols.fit(X, y.ravel())
assert_array_almost_equal(beta, ols.coef_ + ols.intercept_)
assert_array_almost_equal(ols.predict(X) - y.ravel(), 0)
def test_linear_regression_multiple_outcome(random_state=0):
# Test multiple-outcome linear regressions
X, y = make_regression(random_state=random_state)
Y = np.vstack((y, y)).T
n_features = X.shape[1]
reg = LinearRegression(fit_intercept=True)
reg.fit((X), Y)
assert_equal(reg.coef_.shape, (2, n_features))
Y_pred = reg.predict(X)
reg.fit(X, y)
y_pred = reg.predict(X)
assert_array_almost_equal(np.vstack((y_pred, y_pred)).T, Y_pred, decimal=3)
def test_linear_regression_sparse_multiple_outcome(random_state=0):
# Test multiple-outcome linear regressions with sparse data
random_state = check_random_state(random_state)
X, y = make_sparse_uncorrelated(random_state=random_state)
X = sparse.coo_matrix(X)
Y = np.vstack((y, y)).T
n_features = X.shape[1]
ols = LinearRegression()
ols.fit(X, Y)
assert_equal(ols.coef_.shape, (2, n_features))
Y_pred = ols.predict(X)
ols.fit(X, y.ravel())
y_pred = ols.predict(X)
assert_array_almost_equal(np.vstack((y_pred, y_pred)).T, Y_pred, decimal=3)
def test_center_data():
n_samples = 200
n_features = 2
rng = check_random_state(0)
X = rng.rand(n_samples, n_features)
y = rng.rand(n_samples)
expected_X_mean = np.mean(X, axis=0)
# XXX: currently scaled to variance=n_samples
expected_X_std = np.std(X, axis=0) * np.sqrt(X.shape[0])
expected_y_mean = np.mean(y, axis=0)
Xt, yt, X_mean, y_mean, X_std = center_data(X, y, fit_intercept=False,
normalize=False)
assert_array_almost_equal(X_mean, np.zeros(n_features))
assert_array_almost_equal(y_mean, 0)
assert_array_almost_equal(X_std, np.ones(n_features))
assert_array_almost_equal(Xt, X)
assert_array_almost_equal(yt, y)
Xt, yt, X_mean, y_mean, X_std = center_data(X, y, fit_intercept=True,
normalize=False)
assert_array_almost_equal(X_mean, expected_X_mean)
assert_array_almost_equal(y_mean, expected_y_mean)
assert_array_almost_equal(X_std, np.ones(n_features))
assert_array_almost_equal(Xt, X - expected_X_mean)
assert_array_almost_equal(yt, y - expected_y_mean)
Xt, yt, X_mean, y_mean, X_std = center_data(X, y, fit_intercept=True,
normalize=True)
assert_array_almost_equal(X_mean, expected_X_mean)
assert_array_almost_equal(y_mean, expected_y_mean)
assert_array_almost_equal(X_std, expected_X_std)
assert_array_almost_equal(Xt, (X - expected_X_mean) / expected_X_std)
assert_array_almost_equal(yt, y - expected_y_mean)
def test_center_data_multioutput():
n_samples = 200
n_features = 3
n_outputs = 2
rng = check_random_state(0)
X = rng.rand(n_samples, n_features)
y = rng.rand(n_samples, n_outputs)
expected_y_mean = np.mean(y, axis=0)
args = [(center_data, X), (sparse_center_data, sparse.csc_matrix(X))]
for center, X in args:
_, yt, _, y_mean, _ = center(X, y, fit_intercept=False,
normalize=False)
assert_array_almost_equal(y_mean, np.zeros(n_outputs))
assert_array_almost_equal(yt, y)
_, yt, _, y_mean, _ = center(X, y, fit_intercept=True,
normalize=False)
assert_array_almost_equal(y_mean, expected_y_mean)
assert_array_almost_equal(yt, y - y_mean)
_, yt, _, y_mean, _ = center(X, y, fit_intercept=True,
normalize=True)
assert_array_almost_equal(y_mean, expected_y_mean)
assert_array_almost_equal(yt, y - y_mean)
def test_center_data_weighted():
n_samples = 200
n_features = 2
rng = check_random_state(0)
X = rng.rand(n_samples, n_features)
y = rng.rand(n_samples)
sample_weight = rng.rand(n_samples)
expected_X_mean = np.average(X, axis=0, weights=sample_weight)
expected_y_mean = np.average(y, axis=0, weights=sample_weight)
# XXX: if normalize=True, should we expect a weighted standard deviation?
# Currently not weighted, but calculated with respect to weighted mean
# XXX: currently scaled to variance=n_samples
expected_X_std = (np.sqrt(X.shape[0]) *
np.mean((X - expected_X_mean) ** 2, axis=0) ** .5)
Xt, yt, X_mean, y_mean, X_std = center_data(X, y, fit_intercept=True,
normalize=False,
sample_weight=sample_weight)
assert_array_almost_equal(X_mean, expected_X_mean)
assert_array_almost_equal(y_mean, expected_y_mean)
assert_array_almost_equal(X_std, np.ones(n_features))
assert_array_almost_equal(Xt, X - expected_X_mean)
assert_array_almost_equal(yt, y - expected_y_mean)
Xt, yt, X_mean, y_mean, X_std = center_data(X, y, fit_intercept=True,
normalize=True,
sample_weight=sample_weight)
assert_array_almost_equal(X_mean, expected_X_mean)
assert_array_almost_equal(y_mean, expected_y_mean)
assert_array_almost_equal(X_std, expected_X_std)
assert_array_almost_equal(Xt, (X - expected_X_mean) / expected_X_std)
assert_array_almost_equal(yt, y - expected_y_mean)
def test_sparse_center_data():
n_samples = 200
n_features = 2
rng = check_random_state(0)
# random_state not supported yet in sparse.rand
X = sparse.rand(n_samples, n_features, density=.5) # , random_state=rng
X = X.tolil()
y = rng.rand(n_samples)
XA = X.toarray()
# XXX: currently scaled to variance=n_samples
expected_X_std = np.std(XA, axis=0) * np.sqrt(X.shape[0])
Xt, yt, X_mean, y_mean, X_std = sparse_center_data(X, y,
fit_intercept=False,
normalize=False)
assert_array_almost_equal(X_mean, np.zeros(n_features))
assert_array_almost_equal(y_mean, 0)
assert_array_almost_equal(X_std, np.ones(n_features))
assert_array_almost_equal(Xt.A, XA)
assert_array_almost_equal(yt, y)
Xt, yt, X_mean, y_mean, X_std = sparse_center_data(X, y,
fit_intercept=True,
normalize=False)
assert_array_almost_equal(X_mean, np.mean(XA, axis=0))
assert_array_almost_equal(y_mean, np.mean(y, axis=0))
assert_array_almost_equal(X_std, np.ones(n_features))
assert_array_almost_equal(Xt.A, XA)
assert_array_almost_equal(yt, y - np.mean(y, axis=0))
Xt, yt, X_mean, y_mean, X_std = sparse_center_data(X, y,
fit_intercept=True,
normalize=True)
assert_array_almost_equal(X_mean, np.mean(XA, axis=0))
assert_array_almost_equal(y_mean, np.mean(y, axis=0))
assert_array_almost_equal(X_std, expected_X_std)
assert_array_almost_equal(Xt.A, XA / expected_X_std)
assert_array_almost_equal(yt, y - np.mean(y, axis=0))
def test_csr_sparse_center_data():
# Test output format of sparse_center_data, when input is csr
X, y = make_regression()
X[X < 2.5] = 0.0
csr = sparse.csr_matrix(X)
csr_, y, _, _, _ = sparse_center_data(csr, y, True)
assert_equal(csr_.getformat(), 'csr')
def test_rescale_data():
n_samples = 200
n_features = 2
rng = np.random.RandomState(0)
sample_weight = 1.0 + rng.rand(n_samples)
X = rng.rand(n_samples, n_features)
y = rng.rand(n_samples)
rescaled_X, rescaled_y = _rescale_data(X, y, sample_weight)
rescaled_X2 = X * np.sqrt(sample_weight)[:, np.newaxis]
rescaled_y2 = y * np.sqrt(sample_weight)
assert_array_almost_equal(rescaled_X, rescaled_X2)
assert_array_almost_equal(rescaled_y, rescaled_y2)
| bsd-3-clause |
JingJunYin/tensorflow | tensorflow/contrib/metrics/python/ops/metric_ops_test.py | 7 | 266607 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for metric_ops."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import math
import numpy as np
from six.moves import xrange # pylint: disable=redefined-builtin
from tensorflow.contrib import metrics as metrics_lib
from tensorflow.contrib.metrics.python.ops import metric_ops
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes as dtypes_lib
from tensorflow.python.framework import errors_impl
from tensorflow.python.framework import ops
from tensorflow.python.framework import sparse_tensor
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import data_flow_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import random_ops
from tensorflow.python.ops import variables
from tensorflow.python.platform import test
NAN = float('nan')
metrics = metrics_lib
def _enqueue_vector(sess, queue, values, shape=None):
if not shape:
shape = (1, len(values))
dtype = queue.dtypes[0]
sess.run(
queue.enqueue(constant_op.constant(
values, dtype=dtype, shape=shape)))
def _binary_2d_label_to_sparse_value(labels):
"""Convert dense 2D binary indicator tensor to sparse tensor.
Only 1 values in `labels` are included in result.
Args:
labels: Dense 2D binary indicator tensor.
Returns:
`SparseTensorValue` whose values are indices along the last dimension of
`labels`.
"""
indices = []
values = []
batch = 0
for row in labels:
label = 0
xi = 0
for x in row:
if x == 1:
indices.append([batch, xi])
values.append(label)
xi += 1
else:
assert x == 0
label += 1
batch += 1
shape = [len(labels), len(labels[0])]
return sparse_tensor.SparseTensorValue(
np.array(indices, np.int64),
np.array(values, np.int64), np.array(shape, np.int64))
def _binary_2d_label_to_sparse(labels):
"""Convert dense 2D binary indicator tensor to sparse tensor.
Only 1 values in `labels` are included in result.
Args:
labels: Dense 2D binary indicator tensor.
Returns:
`SparseTensor` whose values are indices along the last dimension of
`labels`.
"""
return sparse_tensor.SparseTensor.from_value(
_binary_2d_label_to_sparse_value(labels))
def _binary_3d_label_to_sparse_value(labels):
"""Convert dense 3D binary indicator tensor to sparse tensor.
Only 1 values in `labels` are included in result.
Args:
labels: Dense 2D binary indicator tensor.
Returns:
`SparseTensorValue` whose values are indices along the last dimension of
`labels`.
"""
indices = []
values = []
for d0, labels_d0 in enumerate(labels):
for d1, labels_d1 in enumerate(labels_d0):
d2 = 0
for class_id, label in enumerate(labels_d1):
if label == 1:
values.append(class_id)
indices.append([d0, d1, d2])
d2 += 1
else:
assert label == 0
shape = [len(labels), len(labels[0]), len(labels[0][0])]
return sparse_tensor.SparseTensorValue(
np.array(indices, np.int64),
np.array(values, np.int64), np.array(shape, np.int64))
def _binary_3d_label_to_sparse(labels):
"""Convert dense 3D binary indicator tensor to sparse tensor.
Only 1 values in `labels` are included in result.
Args:
labels: Dense 2D binary indicator tensor.
Returns:
`SparseTensor` whose values are indices along the last dimension of
`labels`.
"""
return sparse_tensor.SparseTensor.from_value(
_binary_3d_label_to_sparse_value(labels))
def _assert_nan(test_case, actual):
test_case.assertTrue(math.isnan(actual), 'Expected NAN, got %s.' % actual)
def _assert_metric_variables(test_case, expected):
test_case.assertEquals(
set(expected), set(v.name for v in variables.local_variables()))
test_case.assertEquals(
set(expected),
set(v.name for v in ops.get_collection(ops.GraphKeys.METRIC_VARIABLES)))
class StreamingMeanTest(test.TestCase):
def setUp(self):
ops.reset_default_graph()
def testVars(self):
metrics.streaming_mean(array_ops.ones([4, 3]))
_assert_metric_variables(self, ('mean/count:0', 'mean/total:0'))
def testMetricsCollection(self):
my_collection_name = '__metrics__'
mean, _ = metrics.streaming_mean(
array_ops.ones([4, 3]), metrics_collections=[my_collection_name])
self.assertListEqual(ops.get_collection(my_collection_name), [mean])
def testUpdatesCollection(self):
my_collection_name = '__updates__'
_, update_op = metrics.streaming_mean(
array_ops.ones([4, 3]), updates_collections=[my_collection_name])
self.assertListEqual(ops.get_collection(my_collection_name), [update_op])
def testBasic(self):
with self.test_session() as sess:
values_queue = data_flow_ops.FIFOQueue(
4, dtypes=dtypes_lib.float32, shapes=(1, 2))
_enqueue_vector(sess, values_queue, [0, 1])
_enqueue_vector(sess, values_queue, [-4.2, 9.1])
_enqueue_vector(sess, values_queue, [6.5, 0])
_enqueue_vector(sess, values_queue, [-3.2, 4.0])
values = values_queue.dequeue()
mean, update_op = metrics.streaming_mean(values)
sess.run(variables.local_variables_initializer())
for _ in range(4):
sess.run(update_op)
self.assertAlmostEqual(1.65, sess.run(mean), 5)
def testUpdateOpsReturnsCurrentValue(self):
with self.test_session() as sess:
values_queue = data_flow_ops.FIFOQueue(
4, dtypes=dtypes_lib.float32, shapes=(1, 2))
_enqueue_vector(sess, values_queue, [0, 1])
_enqueue_vector(sess, values_queue, [-4.2, 9.1])
_enqueue_vector(sess, values_queue, [6.5, 0])
_enqueue_vector(sess, values_queue, [-3.2, 4.0])
values = values_queue.dequeue()
mean, update_op = metrics.streaming_mean(values)
sess.run(variables.local_variables_initializer())
self.assertAlmostEqual(0.5, sess.run(update_op), 5)
self.assertAlmostEqual(1.475, sess.run(update_op), 5)
self.assertAlmostEqual(12.4 / 6.0, sess.run(update_op), 5)
self.assertAlmostEqual(1.65, sess.run(update_op), 5)
self.assertAlmostEqual(1.65, sess.run(mean), 5)
def test1dWeightedValues(self):
with self.test_session() as sess:
# Create the queue that populates the values.
values_queue = data_flow_ops.FIFOQueue(
4, dtypes=dtypes_lib.float32, shapes=(1, 2))
_enqueue_vector(sess, values_queue, [0, 1])
_enqueue_vector(sess, values_queue, [-4.2, 9.1])
_enqueue_vector(sess, values_queue, [6.5, 0])
_enqueue_vector(sess, values_queue, [-3.2, 4.0])
values = values_queue.dequeue()
# Create the queue that populates the weighted labels.
weights_queue = data_flow_ops.FIFOQueue(
4, dtypes=dtypes_lib.float32, shapes=(1, 1))
_enqueue_vector(sess, weights_queue, [1])
_enqueue_vector(sess, weights_queue, [0])
_enqueue_vector(sess, weights_queue, [0])
_enqueue_vector(sess, weights_queue, [1])
weights = weights_queue.dequeue()
mean, update_op = metrics.streaming_mean(values, weights)
variables.local_variables_initializer().run()
for _ in range(4):
update_op.eval()
self.assertAlmostEqual((0 + 1 - 3.2 + 4.0) / 4.0, mean.eval(), 5)
def test1dWeightedValues_placeholders(self):
with self.test_session() as sess:
# Create the queue that populates the values.
feed_values = ((0, 1), (-4.2, 9.1), (6.5, 0), (-3.2, 4.0))
values = array_ops.placeholder(dtype=dtypes_lib.float32)
# Create the queue that populates the weighted labels.
weights_queue = data_flow_ops.FIFOQueue(
4, dtypes=dtypes_lib.float32, shapes=(1,))
_enqueue_vector(sess, weights_queue, 1, shape=(1,))
_enqueue_vector(sess, weights_queue, 0, shape=(1,))
_enqueue_vector(sess, weights_queue, 0, shape=(1,))
_enqueue_vector(sess, weights_queue, 1, shape=(1,))
weights = weights_queue.dequeue()
mean, update_op = metrics.streaming_mean(values, weights)
variables.local_variables_initializer().run()
for i in range(4):
update_op.eval(feed_dict={values: feed_values[i]})
self.assertAlmostEqual((0 + 1 - 3.2 + 4.0) / 4.0, mean.eval(), 5)
def test2dWeightedValues(self):
with self.test_session() as sess:
# Create the queue that populates the values.
values_queue = data_flow_ops.FIFOQueue(
4, dtypes=dtypes_lib.float32, shapes=(1, 2))
_enqueue_vector(sess, values_queue, [0, 1])
_enqueue_vector(sess, values_queue, [-4.2, 9.1])
_enqueue_vector(sess, values_queue, [6.5, 0])
_enqueue_vector(sess, values_queue, [-3.2, 4.0])
values = values_queue.dequeue()
# Create the queue that populates the weighted labels.
weights_queue = data_flow_ops.FIFOQueue(
4, dtypes=dtypes_lib.float32, shapes=(1, 2))
_enqueue_vector(sess, weights_queue, [1, 1])
_enqueue_vector(sess, weights_queue, [1, 0])
_enqueue_vector(sess, weights_queue, [0, 1])
_enqueue_vector(sess, weights_queue, [0, 0])
weights = weights_queue.dequeue()
mean, update_op = metrics.streaming_mean(values, weights)
variables.local_variables_initializer().run()
for _ in range(4):
update_op.eval()
self.assertAlmostEqual((0 + 1 - 4.2 + 0) / 4.0, mean.eval(), 5)
def test2dWeightedValues_placeholders(self):
with self.test_session() as sess:
# Create the queue that populates the values.
feed_values = ((0, 1), (-4.2, 9.1), (6.5, 0), (-3.2, 4.0))
values = array_ops.placeholder(dtype=dtypes_lib.float32)
# Create the queue that populates the weighted labels.
weights_queue = data_flow_ops.FIFOQueue(
4, dtypes=dtypes_lib.float32, shapes=(2,))
_enqueue_vector(sess, weights_queue, [1, 1], shape=(2,))
_enqueue_vector(sess, weights_queue, [1, 0], shape=(2,))
_enqueue_vector(sess, weights_queue, [0, 1], shape=(2,))
_enqueue_vector(sess, weights_queue, [0, 0], shape=(2,))
weights = weights_queue.dequeue()
mean, update_op = metrics.streaming_mean(values, weights)
variables.local_variables_initializer().run()
for i in range(4):
update_op.eval(feed_dict={values: feed_values[i]})
self.assertAlmostEqual((0 + 1 - 4.2 + 0) / 4.0, mean.eval(), 5)
class StreamingMeanTensorTest(test.TestCase):
def setUp(self):
ops.reset_default_graph()
def testVars(self):
metrics.streaming_mean_tensor(array_ops.ones([4, 3]))
_assert_metric_variables(self,
('mean/total_tensor:0', 'mean/count_tensor:0'))
def testMetricsCollection(self):
my_collection_name = '__metrics__'
mean, _ = metrics.streaming_mean_tensor(
array_ops.ones([4, 3]), metrics_collections=[my_collection_name])
self.assertListEqual(ops.get_collection(my_collection_name), [mean])
def testUpdatesCollection(self):
my_collection_name = '__updates__'
_, update_op = metrics.streaming_mean_tensor(
array_ops.ones([4, 3]), updates_collections=[my_collection_name])
self.assertListEqual(ops.get_collection(my_collection_name), [update_op])
def testBasic(self):
with self.test_session() as sess:
values_queue = data_flow_ops.FIFOQueue(
4, dtypes=dtypes_lib.float32, shapes=(1, 2))
_enqueue_vector(sess, values_queue, [0, 1])
_enqueue_vector(sess, values_queue, [-4.2, 9.1])
_enqueue_vector(sess, values_queue, [6.5, 0])
_enqueue_vector(sess, values_queue, [-3.2, 4.0])
values = values_queue.dequeue()
mean, update_op = metrics.streaming_mean_tensor(values)
sess.run(variables.local_variables_initializer())
for _ in range(4):
sess.run(update_op)
self.assertAllClose([[-0.9 / 4., 3.525]], sess.run(mean))
def testMultiDimensional(self):
with self.test_session() as sess:
values_queue = data_flow_ops.FIFOQueue(
2, dtypes=dtypes_lib.float32, shapes=(2, 2, 2))
_enqueue_vector(
sess,
values_queue, [[[1, 2], [1, 2]], [[1, 2], [1, 2]]],
shape=(2, 2, 2))
_enqueue_vector(
sess,
values_queue, [[[1, 2], [1, 2]], [[3, 4], [9, 10]]],
shape=(2, 2, 2))
values = values_queue.dequeue()
mean, update_op = metrics.streaming_mean_tensor(values)
sess.run(variables.local_variables_initializer())
for _ in range(2):
sess.run(update_op)
self.assertAllClose([[[1, 2], [1, 2]], [[2, 3], [5, 6]]], sess.run(mean))
def testUpdateOpsReturnsCurrentValue(self):
with self.test_session() as sess:
values_queue = data_flow_ops.FIFOQueue(
4, dtypes=dtypes_lib.float32, shapes=(1, 2))
_enqueue_vector(sess, values_queue, [0, 1])
_enqueue_vector(sess, values_queue, [-4.2, 9.1])
_enqueue_vector(sess, values_queue, [6.5, 0])
_enqueue_vector(sess, values_queue, [-3.2, 4.0])
values = values_queue.dequeue()
mean, update_op = metrics.streaming_mean_tensor(values)
sess.run(variables.local_variables_initializer())
self.assertAllClose([[0, 1]], sess.run(update_op), 5)
self.assertAllClose([[-2.1, 5.05]], sess.run(update_op), 5)
self.assertAllClose([[2.3 / 3., 10.1 / 3.]], sess.run(update_op), 5)
self.assertAllClose([[-0.9 / 4., 3.525]], sess.run(update_op), 5)
self.assertAllClose([[-0.9 / 4., 3.525]], sess.run(mean), 5)
def testWeighted1d(self):
with self.test_session() as sess:
# Create the queue that populates the values.
values_queue = data_flow_ops.FIFOQueue(
4, dtypes=dtypes_lib.float32, shapes=(1, 2))
_enqueue_vector(sess, values_queue, [0, 1])
_enqueue_vector(sess, values_queue, [-4.2, 9.1])
_enqueue_vector(sess, values_queue, [6.5, 0])
_enqueue_vector(sess, values_queue, [-3.2, 4.0])
values = values_queue.dequeue()
# Create the queue that populates the weights.
weights_queue = data_flow_ops.FIFOQueue(
4, dtypes=dtypes_lib.float32, shapes=(1, 1))
_enqueue_vector(sess, weights_queue, [[1]])
_enqueue_vector(sess, weights_queue, [[0]])
_enqueue_vector(sess, weights_queue, [[1]])
_enqueue_vector(sess, weights_queue, [[0]])
weights = weights_queue.dequeue()
mean, update_op = metrics.streaming_mean_tensor(values, weights)
sess.run(variables.local_variables_initializer())
for _ in range(4):
sess.run(update_op)
self.assertAllClose([[3.25, 0.5]], sess.run(mean), 5)
def testWeighted2d_1(self):
with self.test_session() as sess:
# Create the queue that populates the values.
values_queue = data_flow_ops.FIFOQueue(
4, dtypes=dtypes_lib.float32, shapes=(1, 2))
_enqueue_vector(sess, values_queue, [0, 1])
_enqueue_vector(sess, values_queue, [-4.2, 9.1])
_enqueue_vector(sess, values_queue, [6.5, 0])
_enqueue_vector(sess, values_queue, [-3.2, 4.0])
values = values_queue.dequeue()
# Create the queue that populates the weights.
weights_queue = data_flow_ops.FIFOQueue(
4, dtypes=dtypes_lib.float32, shapes=(1, 2))
_enqueue_vector(sess, weights_queue, [1, 1])
_enqueue_vector(sess, weights_queue, [1, 0])
_enqueue_vector(sess, weights_queue, [0, 1])
_enqueue_vector(sess, weights_queue, [0, 0])
weights = weights_queue.dequeue()
mean, update_op = metrics.streaming_mean_tensor(values, weights)
sess.run(variables.local_variables_initializer())
for _ in range(4):
sess.run(update_op)
self.assertAllClose([[-2.1, 0.5]], sess.run(mean), 5)
def testWeighted2d_2(self):
with self.test_session() as sess:
# Create the queue that populates the values.
values_queue = data_flow_ops.FIFOQueue(
4, dtypes=dtypes_lib.float32, shapes=(1, 2))
_enqueue_vector(sess, values_queue, [0, 1])
_enqueue_vector(sess, values_queue, [-4.2, 9.1])
_enqueue_vector(sess, values_queue, [6.5, 0])
_enqueue_vector(sess, values_queue, [-3.2, 4.0])
values = values_queue.dequeue()
# Create the queue that populates the weights.
weights_queue = data_flow_ops.FIFOQueue(
4, dtypes=dtypes_lib.float32, shapes=(1, 2))
_enqueue_vector(sess, weights_queue, [0, 1])
_enqueue_vector(sess, weights_queue, [0, 0])
_enqueue_vector(sess, weights_queue, [0, 1])
_enqueue_vector(sess, weights_queue, [0, 0])
weights = weights_queue.dequeue()
mean, update_op = metrics.streaming_mean_tensor(values, weights)
sess.run(variables.local_variables_initializer())
for _ in range(4):
sess.run(update_op)
self.assertAllClose([[0, 0.5]], sess.run(mean), 5)
class StreamingAccuracyTest(test.TestCase):
def setUp(self):
ops.reset_default_graph()
def testVars(self):
metrics.streaming_accuracy(
predictions=array_ops.ones((10, 1)),
labels=array_ops.ones((10, 1)),
name='my_accuracy')
_assert_metric_variables(self,
('my_accuracy/count:0', 'my_accuracy/total:0'))
def testMetricsCollection(self):
my_collection_name = '__metrics__'
mean, _ = metrics.streaming_accuracy(
predictions=array_ops.ones((10, 1)),
labels=array_ops.ones((10, 1)),
metrics_collections=[my_collection_name])
self.assertListEqual(ops.get_collection(my_collection_name), [mean])
def testUpdatesCollection(self):
my_collection_name = '__updates__'
_, update_op = metrics.streaming_accuracy(
predictions=array_ops.ones((10, 1)),
labels=array_ops.ones((10, 1)),
updates_collections=[my_collection_name])
self.assertListEqual(ops.get_collection(my_collection_name), [update_op])
def testPredictionsAndLabelsOfDifferentSizeRaisesValueError(self):
predictions = array_ops.ones((10, 3))
labels = array_ops.ones((10, 4))
with self.assertRaises(ValueError):
metrics.streaming_accuracy(predictions, labels)
def testPredictionsAndWeightsOfDifferentSizeRaisesValueError(self):
predictions = array_ops.ones((10, 3))
labels = array_ops.ones((10, 3))
weights = array_ops.ones((9, 3))
with self.assertRaises(ValueError):
metrics.streaming_accuracy(predictions, labels, weights)
def testValueTensorIsIdempotent(self):
predictions = random_ops.random_uniform(
(10, 3), maxval=3, dtype=dtypes_lib.int64, seed=1)
labels = random_ops.random_uniform(
(10, 3), maxval=3, dtype=dtypes_lib.int64, seed=2)
accuracy, update_op = metrics.streaming_accuracy(predictions, labels)
with self.test_session() as sess:
sess.run(variables.local_variables_initializer())
# Run several updates.
for _ in range(10):
sess.run(update_op)
# Then verify idempotency.
initial_accuracy = accuracy.eval()
for _ in range(10):
self.assertEqual(initial_accuracy, accuracy.eval())
def testMultipleUpdates(self):
with self.test_session() as sess:
# Create the queue that populates the predictions.
preds_queue = data_flow_ops.FIFOQueue(
4, dtypes=dtypes_lib.float32, shapes=(1, 1))
_enqueue_vector(sess, preds_queue, [0])
_enqueue_vector(sess, preds_queue, [1])
_enqueue_vector(sess, preds_queue, [2])
_enqueue_vector(sess, preds_queue, [1])
predictions = preds_queue.dequeue()
# Create the queue that populates the labels.
labels_queue = data_flow_ops.FIFOQueue(
4, dtypes=dtypes_lib.float32, shapes=(1, 1))
_enqueue_vector(sess, labels_queue, [0])
_enqueue_vector(sess, labels_queue, [1])
_enqueue_vector(sess, labels_queue, [1])
_enqueue_vector(sess, labels_queue, [2])
labels = labels_queue.dequeue()
accuracy, update_op = metrics.streaming_accuracy(predictions, labels)
sess.run(variables.local_variables_initializer())
for _ in xrange(3):
sess.run(update_op)
self.assertEqual(0.5, sess.run(update_op))
self.assertEqual(0.5, accuracy.eval())
def testEffectivelyEquivalentSizes(self):
predictions = array_ops.ones((40, 1))
labels = array_ops.ones((40,))
with self.test_session() as sess:
accuracy, update_op = metrics.streaming_accuracy(predictions, labels)
sess.run(variables.local_variables_initializer())
self.assertEqual(1.0, update_op.eval())
self.assertEqual(1.0, accuracy.eval())
def testEffectivelyEquivalentSizesWithStaicShapedWeight(self):
predictions = ops.convert_to_tensor([1, 1, 1]) # shape 3,
labels = array_ops.expand_dims(ops.convert_to_tensor([1, 0, 0]),
1) # shape 3, 1
weights = array_ops.expand_dims(ops.convert_to_tensor([100, 1, 1]),
1) # shape 3, 1
with self.test_session() as sess:
accuracy, update_op = metrics.streaming_accuracy(predictions, labels,
weights)
sess.run(variables.local_variables_initializer())
# if streaming_accuracy does not flatten the weight, accuracy would be
# 0.33333334 due to an intended broadcast of weight. Due to flattening,
# it will be higher than .95
self.assertGreater(update_op.eval(), .95)
self.assertGreater(accuracy.eval(), .95)
def testEffectivelyEquivalentSizesWithDynamicallyShapedWeight(self):
predictions = ops.convert_to_tensor([1, 1, 1]) # shape 3,
labels = array_ops.expand_dims(ops.convert_to_tensor([1, 0, 0]),
1) # shape 3, 1
weights = [[100], [1], [1]] # shape 3, 1
weights_placeholder = array_ops.placeholder(
dtype=dtypes_lib.int32, name='weights')
feed_dict = {weights_placeholder: weights}
with self.test_session() as sess:
accuracy, update_op = metrics.streaming_accuracy(predictions, labels,
weights_placeholder)
sess.run(variables.local_variables_initializer())
# if streaming_accuracy does not flatten the weight, accuracy would be
# 0.33333334 due to an intended broadcast of weight. Due to flattening,
# it will be higher than .95
self.assertGreater(update_op.eval(feed_dict=feed_dict), .95)
self.assertGreater(accuracy.eval(feed_dict=feed_dict), .95)
def testMultipleUpdatesWithWeightedValues(self):
with self.test_session() as sess:
# Create the queue that populates the predictions.
preds_queue = data_flow_ops.FIFOQueue(
4, dtypes=dtypes_lib.float32, shapes=(1, 1))
_enqueue_vector(sess, preds_queue, [0])
_enqueue_vector(sess, preds_queue, [1])
_enqueue_vector(sess, preds_queue, [2])
_enqueue_vector(sess, preds_queue, [1])
predictions = preds_queue.dequeue()
# Create the queue that populates the labels.
labels_queue = data_flow_ops.FIFOQueue(
4, dtypes=dtypes_lib.float32, shapes=(1, 1))
_enqueue_vector(sess, labels_queue, [0])
_enqueue_vector(sess, labels_queue, [1])
_enqueue_vector(sess, labels_queue, [1])
_enqueue_vector(sess, labels_queue, [2])
labels = labels_queue.dequeue()
# Create the queue that populates the weights.
weights_queue = data_flow_ops.FIFOQueue(
4, dtypes=dtypes_lib.int64, shapes=(1, 1))
_enqueue_vector(sess, weights_queue, [1])
_enqueue_vector(sess, weights_queue, [1])
_enqueue_vector(sess, weights_queue, [0])
_enqueue_vector(sess, weights_queue, [0])
weights = weights_queue.dequeue()
accuracy, update_op = metrics.streaming_accuracy(predictions, labels,
weights)
sess.run(variables.local_variables_initializer())
for _ in xrange(3):
sess.run(update_op)
self.assertEqual(1.0, sess.run(update_op))
self.assertEqual(1.0, accuracy.eval())
class StreamingTruePositivesTest(test.TestCase):
def setUp(self):
np.random.seed(1)
ops.reset_default_graph()
def testVars(self):
metrics.streaming_true_positives((0, 1, 0), (0, 1, 1))
_assert_metric_variables(self, ('true_positives/count:0',))
def testUnweighted(self):
for expand_predictions in [True, False]:
for expand_labels in [True, False]:
for dtype in (dtypes_lib.bool, dtypes_lib.int32, dtypes_lib.float32):
predictions = math_ops.cast(constant_op.constant(
((1, 0, 1, 0),
(0, 1, 1, 1),
(0, 0, 0, 0))), dtype=dtype)
if expand_predictions:
predictions = array_ops.expand_dims(predictions, 2)
labels = math_ops.cast(constant_op.constant(
((0, 1, 1, 0),
(1, 0, 0, 0),
(0, 0, 0, 0))), dtype=dtype)
if expand_labels:
labels = array_ops.expand_dims(labels, 2)
tp, tp_update_op = metrics.streaming_true_positives(predictions,
labels)
with self.test_session() as sess:
sess.run(variables.local_variables_initializer())
self.assertEqual(0, tp.eval())
self.assertEqual(1, tp_update_op.eval())
self.assertEqual(1, tp.eval())
def testWeighted(self):
for dtype in (dtypes_lib.bool, dtypes_lib.int32, dtypes_lib.float32):
predictions = math_ops.cast(constant_op.constant(
((1, 0, 1, 0),
(0, 1, 1, 1),
(0, 0, 0, 0))), dtype=dtype)
labels = math_ops.cast(constant_op.constant(
((0, 1, 1, 0),
(1, 0, 0, 0),
(0, 0, 0, 0))), dtype=dtype)
tp, tp_update_op = metrics.streaming_true_positives(
predictions, labels, weights=37.0)
with self.test_session() as sess:
sess.run(variables.local_variables_initializer())
self.assertEqual(0, tp.eval())
self.assertEqual(37.0, tp_update_op.eval())
self.assertEqual(37.0, tp.eval())
class StreamingFalseNegativesTest(test.TestCase):
def setUp(self):
np.random.seed(1)
ops.reset_default_graph()
def testVars(self):
metrics.streaming_false_negatives((0, 1, 0),
(0, 1, 1))
_assert_metric_variables(self, ('false_negatives/count:0',))
def testUnweighted(self):
for expand_predictions in [True, False]:
for expand_labels in [True, False]:
for dtype in (dtypes_lib.bool, dtypes_lib.int32, dtypes_lib.float32):
predictions = math_ops.cast(constant_op.constant(
((1, 0, 1, 0),
(0, 1, 1, 1),
(0, 0, 0, 0))), dtype=dtype)
if expand_predictions:
predictions = array_ops.expand_dims(predictions, 2)
labels = math_ops.cast(constant_op.constant(
((0, 1, 1, 0),
(1, 0, 0, 0),
(0, 0, 0, 0))), dtype=dtype)
if expand_labels:
labels = array_ops.expand_dims(labels, 2)
fn, fn_update_op = metrics.streaming_false_negatives(predictions,
labels)
with self.test_session() as sess:
sess.run(variables.local_variables_initializer())
self.assertEqual(0, fn.eval())
self.assertEqual(2, fn_update_op.eval())
self.assertEqual(2, fn.eval())
def testWeighted(self):
for dtype in (dtypes_lib.bool, dtypes_lib.int32, dtypes_lib.float32):
predictions = math_ops.cast(constant_op.constant(
((1, 0, 1, 0),
(0, 1, 1, 1),
(0, 0, 0, 0))), dtype=dtype)
labels = math_ops.cast(constant_op.constant(
((0, 1, 1, 0),
(1, 0, 0, 0),
(0, 0, 0, 0))), dtype=dtype)
fn, fn_update_op = metrics.streaming_false_negatives(
predictions, labels, weights=((3.0,), (5.0,), (7.0,)))
with self.test_session() as sess:
sess.run(variables.local_variables_initializer())
self.assertEqual(0, fn.eval())
self.assertEqual(8.0, fn_update_op.eval())
self.assertEqual(8.0, fn.eval())
class StreamingFalsePositivesTest(test.TestCase):
def setUp(self):
np.random.seed(1)
ops.reset_default_graph()
def testVars(self):
metrics.streaming_false_positives((0, 1, 0),
(0, 1, 1))
_assert_metric_variables(self, ('false_positives/count:0',))
def testUnweighted(self):
for expand_predictions in [True, False]:
for expand_labels in [True, False]:
for dtype in (dtypes_lib.bool, dtypes_lib.int32, dtypes_lib.float32):
predictions = math_ops.cast(constant_op.constant(
((1, 0, 1, 0),
(0, 1, 1, 1),
(0, 0, 0, 0))), dtype=dtype)
if expand_predictions:
predictions = array_ops.expand_dims(predictions, 2)
labels = math_ops.cast(constant_op.constant(
((0, 1, 1, 0),
(1, 0, 0, 0),
(0, 0, 0, 0))), dtype=dtype)
if expand_labels:
labels = array_ops.expand_dims(labels, 2)
fp, fp_update_op = metrics.streaming_false_positives(predictions,
labels)
with self.test_session() as sess:
sess.run(variables.local_variables_initializer())
self.assertEqual(0, fp.eval())
self.assertEqual(4, fp_update_op.eval())
self.assertEqual(4, fp.eval())
def testWeighted(self):
for dtype in (dtypes_lib.bool, dtypes_lib.int32, dtypes_lib.float32):
predictions = math_ops.cast(constant_op.constant(
((1, 0, 1, 0),
(0, 1, 1, 1),
(0, 0, 0, 0))), dtype=dtype)
labels = math_ops.cast(constant_op.constant(
((0, 1, 1, 0),
(1, 0, 0, 0),
(0, 0, 0, 0))), dtype=dtype)
fp, fp_update_op = metrics.streaming_false_positives(
predictions,
labels,
weights=((1.0, 2.0, 3.0, 5.0),
(7.0, 11.0, 13.0, 17.0),
(19.0, 23.0, 29.0, 31.0)))
with self.test_session() as sess:
sess.run(variables.local_variables_initializer())
self.assertEqual(0, fp.eval())
self.assertEqual(42.0, fp_update_op.eval())
self.assertEqual(42.0, fp.eval())
class StreamingTrueNegativesTest(test.TestCase):
def setUp(self):
np.random.seed(1)
ops.reset_default_graph()
def testVars(self):
metrics.streaming_true_negatives((0, 1, 0),
(0, 1, 1))
_assert_metric_variables(self, ('true_negatives/count:0',))
def testUnweighted(self):
for expand_predictions in [True, False]:
for expand_labels in [True, False]:
for dtype in (dtypes_lib.bool, dtypes_lib.int32, dtypes_lib.float32):
predictions = math_ops.cast(constant_op.constant(
((1, 0, 1, 0),
(0, 1, 1, 1),
(0, 0, 0, 0))), dtype=dtype)
if expand_predictions:
predictions = array_ops.expand_dims(predictions, 2)
labels = math_ops.cast(constant_op.constant(
((0, 1, 1, 0),
(1, 0, 0, 0),
(0, 0, 0, 0))), dtype=dtype)
if expand_labels:
labels = array_ops.expand_dims(labels, 2)
tn, tn_update_op = metrics.streaming_true_negatives(predictions,
labels)
with self.test_session() as sess:
sess.run(variables.local_variables_initializer())
self.assertEqual(0, tn.eval())
self.assertEqual(5, tn_update_op.eval())
self.assertEqual(5, tn.eval())
def testWeighted(self):
for dtype in (dtypes_lib.bool, dtypes_lib.int32, dtypes_lib.float32):
predictions = math_ops.cast(constant_op.constant(
((1, 0, 1, 0),
(0, 1, 1, 1),
(0, 0, 0, 0))), dtype=dtype)
labels = math_ops.cast(constant_op.constant(
((0, 1, 1, 0),
(1, 0, 0, 0),
(0, 0, 0, 0))), dtype=dtype)
tn, tn_update_op = metrics.streaming_true_negatives(
predictions, labels, weights=((0.0, 2.0, 3.0, 5.0),))
with self.test_session() as sess:
sess.run(variables.local_variables_initializer())
self.assertEqual(0, tn.eval())
self.assertEqual(15.0, tn_update_op.eval())
self.assertEqual(15.0, tn.eval())
class StreamingTruePositivesAtThresholdsTest(test.TestCase):
def setUp(self):
np.random.seed(1)
ops.reset_default_graph()
def testVars(self):
metrics.streaming_true_positives_at_thresholds(
(0.0, 1.0, 0.0), (0, 1, 1), thresholds=(0.15, 0.5, 0.85))
_assert_metric_variables(self, ('true_positives:0',))
def testUnweighted(self):
predictions = constant_op.constant(((0.9, 0.2, 0.8, 0.1),
(0.2, 0.9, 0.7, 0.6),
(0.1, 0.2, 0.4, 0.3)))
labels = constant_op.constant(((0, 1, 1, 0),
(1, 0, 0, 0),
(0, 0, 0, 0)))
tp, tp_update_op = metrics.streaming_true_positives_at_thresholds(
predictions, labels, thresholds=(0.15, 0.5, 0.85))
with self.test_session() as sess:
sess.run(variables.local_variables_initializer())
self.assertAllEqual((0, 0, 0), tp.eval())
self.assertAllEqual((3, 1, 0), tp_update_op.eval())
self.assertAllEqual((3, 1, 0), tp.eval())
def testWeighted(self):
predictions = constant_op.constant(((0.9, 0.2, 0.8, 0.1),
(0.2, 0.9, 0.7, 0.6),
(0.1, 0.2, 0.4, 0.3)))
labels = constant_op.constant(((0, 1, 1, 0),
(1, 0, 0, 0),
(0, 0, 0, 0)))
tp, tp_update_op = metrics.streaming_true_positives_at_thresholds(
predictions, labels, weights=37.0, thresholds=(0.15, 0.5, 0.85))
with self.test_session() as sess:
sess.run(variables.local_variables_initializer())
self.assertAllEqual((0.0, 0.0, 0.0), tp.eval())
self.assertAllEqual((111.0, 37.0, 0.0), tp_update_op.eval())
self.assertAllEqual((111.0, 37.0, 0.0), tp.eval())
class StreamingFalseNegativesAtThresholdsTest(test.TestCase):
def setUp(self):
np.random.seed(1)
ops.reset_default_graph()
def testVars(self):
metrics.streaming_false_negatives_at_thresholds(
(0.0, 1.0, 0.0), (0, 1, 1), thresholds=(
0.15,
0.5,
0.85,))
_assert_metric_variables(self, ('false_negatives:0',))
def testUnweighted(self):
predictions = constant_op.constant(((0.9, 0.2, 0.8, 0.1),
(0.2, 0.9, 0.7, 0.6),
(0.1, 0.2, 0.4, 0.3)))
labels = constant_op.constant(((0, 1, 1, 0),
(1, 0, 0, 0),
(0, 0, 0, 0)))
fn, fn_update_op = metrics.streaming_false_negatives_at_thresholds(
predictions, labels, thresholds=(0.15, 0.5, 0.85))
with self.test_session() as sess:
sess.run(variables.local_variables_initializer())
self.assertAllEqual((0, 0, 0), fn.eval())
self.assertAllEqual((0, 2, 3), fn_update_op.eval())
self.assertAllEqual((0, 2, 3), fn.eval())
def testWeighted(self):
predictions = constant_op.constant(((0.9, 0.2, 0.8, 0.1),
(0.2, 0.9, 0.7, 0.6),
(0.1, 0.2, 0.4, 0.3)))
labels = constant_op.constant(((0, 1, 1, 0),
(1, 0, 0, 0),
(0, 0, 0, 0)))
fn, fn_update_op = metrics.streaming_false_negatives_at_thresholds(
predictions,
labels,
weights=((3.0,), (5.0,), (7.0,)),
thresholds=(0.15, 0.5, 0.85))
with self.test_session() as sess:
sess.run(variables.local_variables_initializer())
self.assertAllEqual((0.0, 0.0, 0.0), fn.eval())
self.assertAllEqual((0.0, 8.0, 11.0), fn_update_op.eval())
self.assertAllEqual((0.0, 8.0, 11.0), fn.eval())
class StreamingFalsePositivesAtThresholdsTest(test.TestCase):
def setUp(self):
np.random.seed(1)
ops.reset_default_graph()
def testVars(self):
metrics.streaming_false_positives_at_thresholds(
(0.0, 1.0, 0.0), (0, 1, 1), thresholds=(0.15, 0.5, 0.85))
_assert_metric_variables(self, ('false_positives:0',))
def testUnweighted(self):
predictions = constant_op.constant(((0.9, 0.2, 0.8, 0.1),
(0.2, 0.9, 0.7, 0.6),
(0.1, 0.2, 0.4, 0.3)))
labels = constant_op.constant(((0, 1, 1, 0),
(1, 0, 0, 0),
(0, 0, 0, 0)))
fp, fp_update_op = metrics.streaming_false_positives_at_thresholds(
predictions, labels, thresholds=(0.15, 0.5, 0.85))
with self.test_session() as sess:
sess.run(variables.local_variables_initializer())
self.assertAllEqual((0, 0, 0), fp.eval())
self.assertAllEqual((7, 4, 2), fp_update_op.eval())
self.assertAllEqual((7, 4, 2), fp.eval())
def testWeighted(self):
predictions = constant_op.constant(((0.9, 0.2, 0.8, 0.1),
(0.2, 0.9, 0.7, 0.6),
(0.1, 0.2, 0.4, 0.3)))
labels = constant_op.constant(((0, 1, 1, 0),
(1, 0, 0, 0),
(0, 0, 0, 0)))
fp, fp_update_op = metrics.streaming_false_positives_at_thresholds(
predictions,
labels,
weights=((1.0, 2.0, 3.0, 5.0),
(7.0, 11.0, 13.0, 17.0),
(19.0, 23.0, 29.0, 31.0)),
thresholds=(0.15, 0.5, 0.85))
with self.test_session() as sess:
sess.run(variables.local_variables_initializer())
self.assertAllEqual((0.0, 0.0, 0.0), fp.eval())
self.assertAllEqual((125.0, 42.0, 12.0), fp_update_op.eval())
self.assertAllEqual((125.0, 42.0, 12.0), fp.eval())
class StreamingTrueNegativesAtThresholdsTest(test.TestCase):
def setUp(self):
np.random.seed(1)
ops.reset_default_graph()
def testVars(self):
metrics.streaming_true_negatives_at_thresholds(
(0.0, 1.0, 0.0), (0, 1, 1), thresholds=(0.15, 0.5, 0.85))
_assert_metric_variables(self, ('true_negatives:0',))
def testUnweighted(self):
predictions = constant_op.constant(((0.9, 0.2, 0.8, 0.1),
(0.2, 0.9, 0.7, 0.6),
(0.1, 0.2, 0.4, 0.3)))
labels = constant_op.constant(((0, 1, 1, 0),
(1, 0, 0, 0),
(0, 0, 0, 0)))
tn, tn_update_op = metrics.streaming_true_negatives_at_thresholds(
predictions, labels, thresholds=(0.15, 0.5, 0.85))
with self.test_session() as sess:
sess.run(variables.local_variables_initializer())
self.assertAllEqual((0, 0, 0), tn.eval())
self.assertAllEqual((2, 5, 7), tn_update_op.eval())
self.assertAllEqual((2, 5, 7), tn.eval())
def testWeighted(self):
predictions = constant_op.constant(((0.9, 0.2, 0.8, 0.1),
(0.2, 0.9, 0.7, 0.6),
(0.1, 0.2, 0.4, 0.3)))
labels = constant_op.constant(((0, 1, 1, 0),
(1, 0, 0, 0),
(0, 0, 0, 0)))
tn, tn_update_op = metrics.streaming_true_negatives_at_thresholds(
predictions,
labels,
weights=((0.0, 2.0, 3.0, 5.0),),
thresholds=(0.15, 0.5, 0.85))
with self.test_session() as sess:
sess.run(variables.local_variables_initializer())
self.assertAllEqual((0.0, 0.0, 0.0), tn.eval())
self.assertAllEqual((5.0, 15.0, 23.0), tn_update_op.eval())
self.assertAllEqual((5.0, 15.0, 23.0), tn.eval())
class StreamingPrecisionTest(test.TestCase):
def setUp(self):
np.random.seed(1)
ops.reset_default_graph()
def testVars(self):
metrics.streaming_precision(
predictions=array_ops.ones((10, 1)), labels=array_ops.ones((10, 1)))
_assert_metric_variables(self, ('precision/false_positives/count:0',
'precision/true_positives/count:0'))
def testMetricsCollection(self):
my_collection_name = '__metrics__'
mean, _ = metrics.streaming_precision(
predictions=array_ops.ones((10, 1)),
labels=array_ops.ones((10, 1)),
metrics_collections=[my_collection_name])
self.assertListEqual(ops.get_collection(my_collection_name), [mean])
def testUpdatesCollection(self):
my_collection_name = '__updates__'
_, update_op = metrics.streaming_precision(
predictions=array_ops.ones((10, 1)),
labels=array_ops.ones((10, 1)),
updates_collections=[my_collection_name])
self.assertListEqual(ops.get_collection(my_collection_name), [update_op])
def testValueTensorIsIdempotent(self):
predictions = random_ops.random_uniform(
(10, 3), maxval=1, dtype=dtypes_lib.int64, seed=1)
labels = random_ops.random_uniform(
(10, 3), maxval=2, dtype=dtypes_lib.int64, seed=2)
precision, update_op = metrics.streaming_precision(predictions, labels)
with self.test_session() as sess:
sess.run(variables.local_variables_initializer())
# Run several updates.
for _ in range(10):
sess.run(update_op)
# Then verify idempotency.
initial_precision = precision.eval()
for _ in range(10):
self.assertEqual(initial_precision, precision.eval())
def testAllCorrect(self):
inputs = np.random.randint(0, 2, size=(100, 1))
predictions = constant_op.constant(inputs)
labels = constant_op.constant(inputs)
precision, update_op = metrics.streaming_precision(predictions, labels)
with self.test_session() as sess:
sess.run(variables.local_variables_initializer())
self.assertAlmostEqual(1, sess.run(update_op))
self.assertAlmostEqual(1, precision.eval())
def testSomeCorrect(self):
predictions = constant_op.constant([1, 0, 1, 0], shape=(1, 4))
labels = constant_op.constant([0, 1, 1, 0], shape=(1, 4))
precision, update_op = metrics.streaming_precision(predictions, labels)
with self.test_session() as sess:
sess.run(variables.local_variables_initializer())
self.assertAlmostEqual(0.5, update_op.eval())
self.assertAlmostEqual(0.5, precision.eval())
def testWeighted1d(self):
predictions = constant_op.constant([[1, 0, 1, 0], [1, 0, 1, 0]])
labels = constant_op.constant([[0, 1, 1, 0], [1, 0, 0, 1]])
precision, update_op = metrics.streaming_precision(
predictions, labels, weights=constant_op.constant([[2], [5]]))
with self.test_session():
variables.local_variables_initializer().run()
weighted_tp = 2.0 + 5.0
weighted_positives = (2.0 + 2.0) + (5.0 + 5.0)
expected_precision = weighted_tp / weighted_positives
self.assertAlmostEqual(expected_precision, update_op.eval())
self.assertAlmostEqual(expected_precision, precision.eval())
def testWeighted1d_placeholders(self):
predictions = array_ops.placeholder(dtype=dtypes_lib.float32)
labels = array_ops.placeholder(dtype=dtypes_lib.float32)
feed_dict = {
predictions: ((1, 0, 1, 0), (1, 0, 1, 0)),
labels: ((0, 1, 1, 0), (1, 0, 0, 1))
}
precision, update_op = metrics.streaming_precision(
predictions, labels, weights=constant_op.constant([[2], [5]]))
with self.test_session():
variables.local_variables_initializer().run()
weighted_tp = 2.0 + 5.0
weighted_positives = (2.0 + 2.0) + (5.0 + 5.0)
expected_precision = weighted_tp / weighted_positives
self.assertAlmostEqual(
expected_precision, update_op.eval(feed_dict=feed_dict))
self.assertAlmostEqual(
expected_precision, precision.eval(feed_dict=feed_dict))
def testWeighted2d(self):
predictions = constant_op.constant([[1, 0, 1, 0], [1, 0, 1, 0]])
labels = constant_op.constant([[0, 1, 1, 0], [1, 0, 0, 1]])
precision, update_op = metrics.streaming_precision(
predictions,
labels,
weights=constant_op.constant([[1, 2, 3, 4], [4, 3, 2, 1]]))
with self.test_session():
variables.local_variables_initializer().run()
weighted_tp = 3.0 + 4.0
weighted_positives = (1.0 + 3.0) + (4.0 + 2.0)
expected_precision = weighted_tp / weighted_positives
self.assertAlmostEqual(expected_precision, update_op.eval())
self.assertAlmostEqual(expected_precision, precision.eval())
def testWeighted2d_placeholders(self):
predictions = array_ops.placeholder(dtype=dtypes_lib.float32)
labels = array_ops.placeholder(dtype=dtypes_lib.float32)
feed_dict = {
predictions: ((1, 0, 1, 0), (1, 0, 1, 0)),
labels: ((0, 1, 1, 0), (1, 0, 0, 1))
}
precision, update_op = metrics.streaming_precision(
predictions,
labels,
weights=constant_op.constant([[1, 2, 3, 4], [4, 3, 2, 1]]))
with self.test_session():
variables.local_variables_initializer().run()
weighted_tp = 3.0 + 4.0
weighted_positives = (1.0 + 3.0) + (4.0 + 2.0)
expected_precision = weighted_tp / weighted_positives
self.assertAlmostEqual(
expected_precision, update_op.eval(feed_dict=feed_dict))
self.assertAlmostEqual(
expected_precision, precision.eval(feed_dict=feed_dict))
def testAllIncorrect(self):
inputs = np.random.randint(0, 2, size=(100, 1))
predictions = constant_op.constant(inputs)
labels = constant_op.constant(1 - inputs)
precision, update_op = metrics.streaming_precision(predictions, labels)
with self.test_session() as sess:
sess.run(variables.local_variables_initializer())
sess.run(update_op)
self.assertAlmostEqual(0, precision.eval())
def testZeroTrueAndFalsePositivesGivesZeroPrecision(self):
predictions = constant_op.constant([0, 0, 0, 0])
labels = constant_op.constant([0, 0, 0, 0])
precision, update_op = metrics.streaming_precision(predictions, labels)
with self.test_session() as sess:
sess.run(variables.local_variables_initializer())
sess.run(update_op)
self.assertEqual(0.0, precision.eval())
class StreamingRecallTest(test.TestCase):
def setUp(self):
np.random.seed(1)
ops.reset_default_graph()
def testVars(self):
metrics.streaming_recall(
predictions=array_ops.ones((10, 1)), labels=array_ops.ones((10, 1)))
_assert_metric_variables(
self,
('recall/false_negatives/count:0', 'recall/true_positives/count:0'))
def testMetricsCollection(self):
my_collection_name = '__metrics__'
mean, _ = metrics.streaming_recall(
predictions=array_ops.ones((10, 1)),
labels=array_ops.ones((10, 1)),
metrics_collections=[my_collection_name])
self.assertListEqual(ops.get_collection(my_collection_name), [mean])
def testUpdatesCollection(self):
my_collection_name = '__updates__'
_, update_op = metrics.streaming_recall(
predictions=array_ops.ones((10, 1)),
labels=array_ops.ones((10, 1)),
updates_collections=[my_collection_name])
self.assertListEqual(ops.get_collection(my_collection_name), [update_op])
def testValueTensorIsIdempotent(self):
predictions = random_ops.random_uniform(
(10, 3), maxval=1, dtype=dtypes_lib.int64, seed=1)
labels = random_ops.random_uniform(
(10, 3), maxval=2, dtype=dtypes_lib.int64, seed=2)
recall, update_op = metrics.streaming_recall(predictions, labels)
with self.test_session() as sess:
sess.run(variables.local_variables_initializer())
# Run several updates.
for _ in range(10):
sess.run(update_op)
# Then verify idempotency.
initial_recall = recall.eval()
for _ in range(10):
self.assertEqual(initial_recall, recall.eval())
def testAllCorrect(self):
np_inputs = np.random.randint(0, 2, size=(100, 1))
predictions = constant_op.constant(np_inputs)
labels = constant_op.constant(np_inputs)
recall, update_op = metrics.streaming_recall(predictions, labels)
with self.test_session() as sess:
sess.run(variables.local_variables_initializer())
sess.run(update_op)
self.assertEqual(1, recall.eval())
def testSomeCorrect(self):
predictions = constant_op.constant([1, 0, 1, 0], shape=(1, 4))
labels = constant_op.constant([0, 1, 1, 0], shape=(1, 4))
recall, update_op = metrics.streaming_recall(predictions, labels)
with self.test_session() as sess:
sess.run(variables.local_variables_initializer())
self.assertAlmostEqual(0.5, update_op.eval())
self.assertAlmostEqual(0.5, recall.eval())
def testWeighted1d(self):
predictions = constant_op.constant([[1, 0, 1, 0], [0, 1, 0, 1]])
labels = constant_op.constant([[0, 1, 1, 0], [1, 0, 0, 1]])
weights = constant_op.constant([[2], [5]])
recall, update_op = metrics.streaming_recall(
predictions, labels, weights=weights)
with self.test_session() as sess:
sess.run(variables.local_variables_initializer())
weighted_tp = 2.0 + 5.0
weighted_t = (2.0 + 2.0) + (5.0 + 5.0)
expected_precision = weighted_tp / weighted_t
self.assertAlmostEqual(expected_precision, update_op.eval())
self.assertAlmostEqual(expected_precision, recall.eval())
def testWeighted2d(self):
predictions = constant_op.constant([[1, 0, 1, 0], [0, 1, 0, 1]])
labels = constant_op.constant([[0, 1, 1, 0], [1, 0, 0, 1]])
weights = constant_op.constant([[1, 2, 3, 4], [4, 3, 2, 1]])
recall, update_op = metrics.streaming_recall(
predictions, labels, weights=weights)
with self.test_session() as sess:
sess.run(variables.local_variables_initializer())
weighted_tp = 3.0 + 1.0
weighted_t = (2.0 + 3.0) + (4.0 + 1.0)
expected_precision = weighted_tp / weighted_t
self.assertAlmostEqual(expected_precision, update_op.eval())
self.assertAlmostEqual(expected_precision, recall.eval())
def testAllIncorrect(self):
np_inputs = np.random.randint(0, 2, size=(100, 1))
predictions = constant_op.constant(np_inputs)
labels = constant_op.constant(1 - np_inputs)
recall, update_op = metrics.streaming_recall(predictions, labels)
with self.test_session() as sess:
sess.run(variables.local_variables_initializer())
sess.run(update_op)
self.assertEqual(0, recall.eval())
def testZeroTruePositivesAndFalseNegativesGivesZeroRecall(self):
predictions = array_ops.zeros((1, 4))
labels = array_ops.zeros((1, 4))
recall, update_op = metrics.streaming_recall(predictions, labels)
with self.test_session() as sess:
sess.run(variables.local_variables_initializer())
sess.run(update_op)
self.assertEqual(0, recall.eval())
class StreamingFPRTest(test.TestCase):
def setUp(self):
np.random.seed(1)
ops.reset_default_graph()
def testVars(self):
metrics.streaming_false_positive_rate(
predictions=array_ops.ones((10, 1)), labels=array_ops.ones((10, 1)))
_assert_metric_variables(self,
('false_positive_rate/false_positives/count:0',
'false_positive_rate/true_negatives/count:0'))
def testMetricsCollection(self):
my_collection_name = '__metrics__'
mean, _ = metrics.streaming_false_positive_rate(
predictions=array_ops.ones((10, 1)),
labels=array_ops.ones((10, 1)),
metrics_collections=[my_collection_name])
self.assertListEqual(ops.get_collection(my_collection_name), [mean])
def testUpdatesCollection(self):
my_collection_name = '__updates__'
_, update_op = metrics.streaming_false_positive_rate(
predictions=array_ops.ones((10, 1)),
labels=array_ops.ones((10, 1)),
updates_collections=[my_collection_name])
self.assertListEqual(ops.get_collection(my_collection_name), [update_op])
def testValueTensorIsIdempotent(self):
predictions = random_ops.random_uniform(
(10, 3), maxval=1, dtype=dtypes_lib.int64, seed=1)
labels = random_ops.random_uniform(
(10, 3), maxval=2, dtype=dtypes_lib.int64, seed=2)
fpr, update_op = metrics.streaming_false_positive_rate(
predictions, labels)
with self.test_session() as sess:
sess.run(variables.local_variables_initializer())
# Run several updates.
for _ in range(10):
sess.run(update_op)
# Then verify idempotency.
initial_fpr = fpr.eval()
for _ in range(10):
self.assertEqual(initial_fpr, fpr.eval())
def testAllCorrect(self):
np_inputs = np.random.randint(0, 2, size=(100, 1))
predictions = constant_op.constant(np_inputs)
labels = constant_op.constant(np_inputs)
fpr, update_op = metrics.streaming_false_positive_rate(
predictions, labels)
with self.test_session() as sess:
sess.run(variables.local_variables_initializer())
sess.run(update_op)
self.assertEqual(0, fpr.eval())
def testSomeCorrect(self):
predictions = constant_op.constant([1, 0, 1, 0], shape=(1, 4))
labels = constant_op.constant([0, 1, 1, 0], shape=(1, 4))
fpr, update_op = metrics.streaming_false_positive_rate(
predictions, labels)
with self.test_session() as sess:
sess.run(variables.local_variables_initializer())
self.assertAlmostEqual(0.5, update_op.eval())
self.assertAlmostEqual(0.5, fpr.eval())
def testWeighted1d(self):
predictions = constant_op.constant([[1, 0, 1, 0], [0, 1, 0, 1]])
labels = constant_op.constant([[0, 1, 1, 0], [1, 0, 0, 1]])
weights = constant_op.constant([[2], [5]])
fpr, update_op = metrics.streaming_false_positive_rate(
predictions, labels, weights=weights)
with self.test_session() as sess:
sess.run(variables.local_variables_initializer())
weighted_fp = 2.0 + 5.0
weighted_f = (2.0 + 2.0) + (5.0 + 5.0)
expected_fpr = weighted_fp / weighted_f
self.assertAlmostEqual(expected_fpr, update_op.eval())
self.assertAlmostEqual(expected_fpr, fpr.eval())
def testWeighted2d(self):
predictions = constant_op.constant([[1, 0, 1, 0], [0, 1, 0, 1]])
labels = constant_op.constant([[0, 1, 1, 0], [1, 0, 0, 1]])
weights = constant_op.constant([[1, 2, 3, 4], [4, 3, 2, 1]])
fpr, update_op = metrics.streaming_false_positive_rate(
predictions, labels, weights=weights)
with self.test_session() as sess:
sess.run(variables.local_variables_initializer())
weighted_fp = 1.0 + 3.0
weighted_f = (1.0 + 4.0) + (2.0 + 3.0)
expected_fpr = weighted_fp / weighted_f
self.assertAlmostEqual(expected_fpr, update_op.eval())
self.assertAlmostEqual(expected_fpr, fpr.eval())
def testAllIncorrect(self):
np_inputs = np.random.randint(0, 2, size=(100, 1))
predictions = constant_op.constant(np_inputs)
labels = constant_op.constant(1 - np_inputs)
fpr, update_op = metrics.streaming_false_positive_rate(
predictions, labels)
with self.test_session() as sess:
sess.run(variables.local_variables_initializer())
sess.run(update_op)
self.assertEqual(1, fpr.eval())
def testZeroFalsePositivesAndTrueNegativesGivesZeroFPR(self):
predictions = array_ops.ones((1, 4))
labels = array_ops.ones((1, 4))
fpr, update_op = metrics.streaming_false_positive_rate(
predictions, labels)
with self.test_session() as sess:
sess.run(variables.local_variables_initializer())
sess.run(update_op)
self.assertEqual(0, fpr.eval())
class StreamingFNRTest(test.TestCase):
def setUp(self):
np.random.seed(1)
ops.reset_default_graph()
def testVars(self):
metrics.streaming_false_negative_rate(
predictions=array_ops.ones((10, 1)), labels=array_ops.ones((10, 1)))
_assert_metric_variables(self,
('false_negative_rate/false_negatives/count:0',
'false_negative_rate/true_positives/count:0'))
def testMetricsCollection(self):
my_collection_name = '__metrics__'
mean, _ = metrics.streaming_false_negative_rate(
predictions=array_ops.ones((10, 1)),
labels=array_ops.ones((10, 1)),
metrics_collections=[my_collection_name])
self.assertListEqual(ops.get_collection(my_collection_name), [mean])
def testUpdatesCollection(self):
my_collection_name = '__updates__'
_, update_op = metrics.streaming_false_negative_rate(
predictions=array_ops.ones((10, 1)),
labels=array_ops.ones((10, 1)),
updates_collections=[my_collection_name])
self.assertListEqual(ops.get_collection(my_collection_name), [update_op])
def testValueTensorIsIdempotent(self):
predictions = random_ops.random_uniform(
(10, 3), maxval=1, dtype=dtypes_lib.int64, seed=1)
labels = random_ops.random_uniform(
(10, 3), maxval=2, dtype=dtypes_lib.int64, seed=2)
fnr, update_op = metrics.streaming_false_negative_rate(
predictions, labels)
with self.test_session() as sess:
sess.run(variables.local_variables_initializer())
# Run several updates.
for _ in range(10):
sess.run(update_op)
# Then verify idempotency.
initial_fnr = fnr.eval()
for _ in range(10):
self.assertEqual(initial_fnr, fnr.eval())
def testAllCorrect(self):
np_inputs = np.random.randint(0, 2, size=(100, 1))
predictions = constant_op.constant(np_inputs)
labels = constant_op.constant(np_inputs)
fnr, update_op = metrics.streaming_false_negative_rate(
predictions, labels)
with self.test_session() as sess:
sess.run(variables.local_variables_initializer())
sess.run(update_op)
self.assertEqual(0, fnr.eval())
def testSomeCorrect(self):
predictions = constant_op.constant([1, 0, 1, 0], shape=(1, 4))
labels = constant_op.constant([0, 1, 1, 0], shape=(1, 4))
fnr, update_op = metrics.streaming_false_negative_rate(
predictions, labels)
with self.test_session() as sess:
sess.run(variables.local_variables_initializer())
self.assertAlmostEqual(0.5, update_op.eval())
self.assertAlmostEqual(0.5, fnr.eval())
def testWeighted1d(self):
predictions = constant_op.constant([[1, 0, 1, 0], [0, 1, 0, 1]])
labels = constant_op.constant([[0, 1, 1, 0], [1, 0, 0, 1]])
weights = constant_op.constant([[2], [5]])
fnr, update_op = metrics.streaming_false_negative_rate(
predictions, labels, weights=weights)
with self.test_session() as sess:
sess.run(variables.local_variables_initializer())
weighted_fn = 2.0 + 5.0
weighted_t = (2.0 + 2.0) + (5.0 + 5.0)
expected_fnr = weighted_fn / weighted_t
self.assertAlmostEqual(expected_fnr, update_op.eval())
self.assertAlmostEqual(expected_fnr, fnr.eval())
def testWeighted2d(self):
predictions = constant_op.constant([[1, 0, 1, 0], [0, 1, 0, 1]])
labels = constant_op.constant([[0, 1, 1, 0], [1, 0, 0, 1]])
weights = constant_op.constant([[1, 2, 3, 4], [4, 3, 2, 1]])
fnr, update_op = metrics.streaming_false_negative_rate(
predictions, labels, weights=weights)
with self.test_session() as sess:
sess.run(variables.local_variables_initializer())
weighted_fn = 2.0 + 4.0
weighted_t = (2.0 + 3.0) + (1.0 + 4.0)
expected_fnr = weighted_fn / weighted_t
self.assertAlmostEqual(expected_fnr, update_op.eval())
self.assertAlmostEqual(expected_fnr, fnr.eval())
def testAllIncorrect(self):
np_inputs = np.random.randint(0, 2, size=(100, 1))
predictions = constant_op.constant(np_inputs)
labels = constant_op.constant(1 - np_inputs)
fnr, update_op = metrics.streaming_false_negative_rate(
predictions, labels)
with self.test_session() as sess:
sess.run(variables.local_variables_initializer())
sess.run(update_op)
self.assertEqual(1, fnr.eval())
def testZeroFalseNegativesAndTruePositivesGivesZeroFNR(self):
predictions = array_ops.zeros((1, 4))
labels = array_ops.zeros((1, 4))
fnr, update_op = metrics.streaming_false_negative_rate(
predictions, labels)
with self.test_session() as sess:
sess.run(variables.local_variables_initializer())
sess.run(update_op)
self.assertEqual(0, fnr.eval())
class StreamingCurvePointsTest(test.TestCase):
def setUp(self):
np.random.seed(1)
ops.reset_default_graph()
def testVars(self):
metric_ops.streaming_curve_points(
predictions=array_ops.ones((10, 1)), labels=array_ops.ones((10, 1)))
_assert_metric_variables(
self,
('curve_points/true_positives:0', 'curve_points/false_negatives:0',
'curve_points/false_positives:0', 'curve_points/true_negatives:0'))
def testMetricsCollection(self):
my_collection_name = '__metrics__'
points, _ = metric_ops.streaming_curve_points(
labels=array_ops.ones((10, 1)),
predictions=array_ops.ones((10, 1)),
metrics_collections=[my_collection_name])
self.assertListEqual(ops.get_collection(my_collection_name), [points])
def testUpdatesCollection(self):
my_collection_name = '__updates__'
_, update_op = metric_ops.streaming_curve_points(
labels=array_ops.ones((10, 1)),
predictions=array_ops.ones((10, 1)),
updates_collections=[my_collection_name])
self.assertListEqual(ops.get_collection(my_collection_name), [update_op])
def _testValueTensorIsIdempotent(self, curve):
predictions = constant_op.constant(
np.random.uniform(size=(10, 3)), dtype=dtypes_lib.float32)
labels = constant_op.constant(
np.random.uniform(high=2, size=(10, 3)), dtype=dtypes_lib.float32)
points, update_op = metric_ops.streaming_curve_points(
labels, predictions=predictions, curve=curve)
with self.test_session() as sess:
sess.run(variables.local_variables_initializer())
sess.run(update_op)
initial_points = points.eval()
sess.run(update_op)
self.assertAllClose(initial_points, points.eval())
def testValueTensorIsIdempotentROC(self):
self._testValueTensorIsIdempotent(curve='ROC')
def testValueTensorIsIdempotentPR(self):
self._testValueTensorIsIdempotent(curve='PR')
def _testCase(self, labels, predictions, curve, expected_points):
with self.test_session() as sess:
predictions_tensor = constant_op.constant(
predictions, dtype=dtypes_lib.float32)
labels_tensor = constant_op.constant(labels, dtype=dtypes_lib.float32)
points, update_op = metric_ops.streaming_curve_points(
labels=labels_tensor,
predictions=predictions_tensor,
num_thresholds=3,
curve=curve)
sess.run(variables.local_variables_initializer())
sess.run(update_op)
self.assertAllClose(expected_points, points.eval())
def testEdgeCasesROC(self):
self._testCase([[1]], [[1]], 'ROC', [[0, 1], [0, 1], [0, 0]])
self._testCase([[0]], [[0]], 'ROC', [[1, 1], [0, 1], [0, 1]])
self._testCase([[0]], [[1]], 'ROC', [[1, 1], [1, 1], [0, 1]])
self._testCase([[1]], [[0]], 'ROC', [[0, 1], [0, 0], [0, 0]])
def testManyValuesROC(self):
self._testCase([[1.0, 0.0, 0.0, 1.0, 1.0, 1.0]],
[[0.2, 0.3, 0.4, 0.6, 0.7, 0.8]], 'ROC',
[[1.0, 1.0], [0.0, 0.75], [0.0, 0.0]])
def testEdgeCasesPR(self):
self._testCase([[1]], [[1]], 'PR', [[1, 1], [1, 1], [0, 1]])
self._testCase([[0]], [[0]], 'PR', [[1, 0], [1, 1], [1, 1]])
self._testCase([[0]], [[1]], 'PR', [[1, 0], [1, 0], [1, 1]])
self._testCase([[1]], [[0]], 'PR', [[1, 1], [0, 1], [0, 1]])
def testManyValuesPR(self):
self._testCase([[1.0, 0.0, 0.0, 1.0, 1.0, 1.0]],
[[0.2, 0.3, 0.4, 0.6, 0.7, 0.8]], 'PR',
[[1.0, 4.0 / 6.0], [0.75, 1.0], [0.0, 1.0]])
def _np_auc(predictions, labels, weights=None):
"""Computes the AUC explicitly using Numpy.
Args:
predictions: an ndarray with shape [N].
labels: an ndarray with shape [N].
weights: an ndarray with shape [N].
Returns:
the area under the ROC curve.
"""
if weights is None:
weights = np.ones(np.size(predictions))
is_positive = labels > 0
num_positives = np.sum(weights[is_positive])
num_negatives = np.sum(weights[~is_positive])
# Sort descending:
inds = np.argsort(-predictions)
sorted_labels = labels[inds]
sorted_weights = weights[inds]
is_positive = sorted_labels > 0
tp = np.cumsum(sorted_weights * is_positive) / num_positives
return np.sum((sorted_weights * tp)[~is_positive]) / num_negatives
class StreamingAUCTest(test.TestCase):
def setUp(self):
np.random.seed(1)
ops.reset_default_graph()
def testVars(self):
metrics.streaming_auc(
predictions=array_ops.ones((10, 1)), labels=array_ops.ones((10, 1)))
_assert_metric_variables(self,
('auc/true_positives:0', 'auc/false_negatives:0',
'auc/false_positives:0', 'auc/true_negatives:0'))
def testMetricsCollection(self):
my_collection_name = '__metrics__'
mean, _ = metrics.streaming_auc(
predictions=array_ops.ones((10, 1)),
labels=array_ops.ones((10, 1)),
metrics_collections=[my_collection_name])
self.assertListEqual(ops.get_collection(my_collection_name), [mean])
def testUpdatesCollection(self):
my_collection_name = '__updates__'
_, update_op = metrics.streaming_auc(
predictions=array_ops.ones((10, 1)),
labels=array_ops.ones((10, 1)),
updates_collections=[my_collection_name])
self.assertListEqual(ops.get_collection(my_collection_name), [update_op])
def testValueTensorIsIdempotent(self):
predictions = random_ops.random_uniform(
(10, 3), maxval=1, dtype=dtypes_lib.float32, seed=1)
labels = random_ops.random_uniform(
(10, 3), maxval=2, dtype=dtypes_lib.int64, seed=2)
auc, update_op = metrics.streaming_auc(predictions, labels)
with self.test_session() as sess:
sess.run(variables.local_variables_initializer())
# Run several updates.
for _ in range(10):
sess.run(update_op)
# Then verify idempotency.
initial_auc = auc.eval()
for _ in range(10):
self.assertAlmostEqual(initial_auc, auc.eval(), 5)
def testPredictionsOutOfRange(self):
with self.test_session() as sess:
predictions = constant_op.constant(
[1, -1, 1, -1], shape=(1, 4), dtype=dtypes_lib.float32)
labels = constant_op.constant([0, 1, 1, 0], shape=(1, 4))
_, update_op = metrics.streaming_auc(predictions, labels)
sess.run(variables.local_variables_initializer())
self.assertRaises(errors_impl.InvalidArgumentError, update_op.eval)
def testAllCorrect(self):
self.allCorrectAsExpected('ROC')
def allCorrectAsExpected(self, curve):
inputs = np.random.randint(0, 2, size=(100, 1))
with self.test_session() as sess:
predictions = constant_op.constant(inputs, dtype=dtypes_lib.float32)
labels = constant_op.constant(inputs)
auc, update_op = metrics.streaming_auc(predictions, labels, curve=curve)
sess.run(variables.local_variables_initializer())
self.assertEqual(1, sess.run(update_op))
self.assertEqual(1, auc.eval())
def testSomeCorrect(self):
with self.test_session() as sess:
predictions = constant_op.constant(
[1, 0, 1, 0], shape=(1, 4), dtype=dtypes_lib.float32)
labels = constant_op.constant([0, 1, 1, 0], shape=(1, 4))
auc, update_op = metrics.streaming_auc(predictions, labels)
sess.run(variables.local_variables_initializer())
self.assertAlmostEqual(0.5, sess.run(update_op))
self.assertAlmostEqual(0.5, auc.eval())
def testWeighted1d(self):
with self.test_session() as sess:
predictions = constant_op.constant(
[1, 0, 1, 0], shape=(1, 4), dtype=dtypes_lib.float32)
labels = constant_op.constant([0, 1, 1, 0], shape=(1, 4))
weights = constant_op.constant([2], shape=(1, 1))
auc, update_op = metrics.streaming_auc(
predictions, labels, weights=weights)
sess.run(variables.local_variables_initializer())
self.assertAlmostEqual(0.5, sess.run(update_op), 5)
self.assertAlmostEqual(0.5, auc.eval(), 5)
def testWeighted2d(self):
with self.test_session() as sess:
predictions = constant_op.constant(
[1, 0, 1, 0], shape=(1, 4), dtype=dtypes_lib.float32)
labels = constant_op.constant([0, 1, 1, 0], shape=(1, 4))
weights = constant_op.constant([1, 2, 3, 4], shape=(1, 4))
auc, update_op = metrics.streaming_auc(
predictions, labels, weights=weights)
sess.run(variables.local_variables_initializer())
self.assertAlmostEqual(0.7, sess.run(update_op), 5)
self.assertAlmostEqual(0.7, auc.eval(), 5)
def testAUCPRSpecialCase(self):
with self.test_session() as sess:
predictions = constant_op.constant(
[0.1, 0.4, 0.35, 0.8], shape=(1, 4), dtype=dtypes_lib.float32)
labels = constant_op.constant([0, 0, 1, 1], shape=(1, 4))
auc, update_op = metrics.streaming_auc(predictions, labels, curve='PR')
sess.run(variables.local_variables_initializer())
self.assertAlmostEqual(0.79166, sess.run(update_op), delta=1e-3)
self.assertAlmostEqual(0.79166, auc.eval(), delta=1e-3)
def testAnotherAUCPRSpecialCase(self):
with self.test_session() as sess:
predictions = constant_op.constant(
[0.1, 0.4, 0.35, 0.8, 0.1, 0.135, 0.81],
shape=(1, 7),
dtype=dtypes_lib.float32)
labels = constant_op.constant([0, 0, 1, 0, 1, 0, 1], shape=(1, 7))
auc, update_op = metrics.streaming_auc(predictions, labels, curve='PR')
sess.run(variables.local_variables_initializer())
self.assertAlmostEqual(0.610317, sess.run(update_op), delta=1e-3)
self.assertAlmostEqual(0.610317, auc.eval(), delta=1e-3)
def testThirdAUCPRSpecialCase(self):
with self.test_session() as sess:
predictions = constant_op.constant(
[0.0, 0.1, 0.2, 0.33, 0.3, 0.4, 0.5],
shape=(1, 7),
dtype=dtypes_lib.float32)
labels = constant_op.constant([0, 0, 0, 0, 1, 1, 1], shape=(1, 7))
auc, update_op = metrics.streaming_auc(predictions, labels, curve='PR')
sess.run(variables.local_variables_initializer())
self.assertAlmostEqual(0.90277, sess.run(update_op), delta=1e-3)
self.assertAlmostEqual(0.90277, auc.eval(), delta=1e-3)
def testAllIncorrect(self):
inputs = np.random.randint(0, 2, size=(100, 1))
with self.test_session() as sess:
predictions = constant_op.constant(inputs, dtype=dtypes_lib.float32)
labels = constant_op.constant(1 - inputs, dtype=dtypes_lib.float32)
auc, update_op = metrics.streaming_auc(predictions, labels)
sess.run(variables.local_variables_initializer())
self.assertAlmostEqual(0, sess.run(update_op))
self.assertAlmostEqual(0, auc.eval())
def testZeroTruePositivesAndFalseNegativesGivesOneAUC(self):
with self.test_session() as sess:
predictions = array_ops.zeros([4], dtype=dtypes_lib.float32)
labels = array_ops.zeros([4])
auc, update_op = metrics.streaming_auc(predictions, labels)
sess.run(variables.local_variables_initializer())
self.assertAlmostEqual(1, sess.run(update_op), 6)
self.assertAlmostEqual(1, auc.eval(), 6)
def testRecallOneAndPrecisionOneGivesOnePRAUC(self):
with self.test_session() as sess:
predictions = array_ops.ones([4], dtype=dtypes_lib.float32)
labels = array_ops.ones([4])
auc, update_op = metrics.streaming_auc(predictions, labels, curve='PR')
sess.run(variables.local_variables_initializer())
self.assertAlmostEqual(1, sess.run(update_op), 6)
self.assertAlmostEqual(1, auc.eval(), 6)
def testWithMultipleUpdates(self):
num_samples = 1000
batch_size = 10
num_batches = int(num_samples / batch_size)
# Create the labels and data.
labels = np.random.randint(0, 2, size=num_samples)
noise = np.random.normal(0.0, scale=0.2, size=num_samples)
predictions = 0.4 + 0.2 * labels + noise
predictions[predictions > 1] = 1
predictions[predictions < 0] = 0
def _enqueue_as_batches(x, enqueue_ops):
x_batches = x.astype(np.float32).reshape((num_batches, batch_size))
x_queue = data_flow_ops.FIFOQueue(
num_batches, dtypes=dtypes_lib.float32, shapes=(batch_size,))
for i in range(num_batches):
enqueue_ops[i].append(x_queue.enqueue(x_batches[i, :]))
return x_queue.dequeue()
for weights in (None, np.ones(num_samples), np.random.exponential(
scale=1.0, size=num_samples)):
expected_auc = _np_auc(predictions, labels, weights)
with self.test_session() as sess:
enqueue_ops = [[] for i in range(num_batches)]
tf_predictions = _enqueue_as_batches(predictions, enqueue_ops)
tf_labels = _enqueue_as_batches(labels, enqueue_ops)
tf_weights = (_enqueue_as_batches(weights, enqueue_ops) if
weights is not None else None)
for i in range(num_batches):
sess.run(enqueue_ops[i])
auc, update_op = metrics.streaming_auc(
tf_predictions,
tf_labels,
curve='ROC',
num_thresholds=500,
weights=tf_weights)
sess.run(variables.local_variables_initializer())
for i in range(num_batches):
sess.run(update_op)
# Since this is only approximate, we can't expect a 6 digits match.
# Although with higher number of samples/thresholds we should see the
# accuracy improving
self.assertAlmostEqual(expected_auc, auc.eval(), 2)
class StreamingDynamicAUCTest(test.TestCase):
def setUp(self):
super(StreamingDynamicAUCTest, self).setUp()
np.random.seed(1)
ops.reset_default_graph()
def testUnknownCurve(self):
with self.assertRaisesRegexp(
ValueError, 'curve must be either ROC or PR, TEST_CURVE unknown'):
metrics.streaming_dynamic_auc(labels=array_ops.ones((10, 1)),
predictions=array_ops.ones((10, 1)),
curve='TEST_CURVE')
def testVars(self):
metrics.streaming_dynamic_auc(
labels=array_ops.ones((10, 1)), predictions=array_ops.ones((10, 1)))
_assert_metric_variables(self, ['dynamic_auc/concat_labels/array:0',
'dynamic_auc/concat_labels/size:0',
'dynamic_auc/concat_preds/array:0',
'dynamic_auc/concat_preds/size:0'])
def testMetricsCollection(self):
my_collection_name = '__metrics__'
auc, _ = metrics.streaming_dynamic_auc(
labels=array_ops.ones((10, 1)),
predictions=array_ops.ones((10, 1)),
metrics_collections=[my_collection_name])
self.assertEqual(ops.get_collection(my_collection_name), [auc])
def testUpdatesCollection(self):
my_collection_name = '__updates__'
_, update_op = metrics.streaming_dynamic_auc(
labels=array_ops.ones((10, 1)),
predictions=array_ops.ones((10, 1)),
updates_collections=[my_collection_name])
self.assertEqual(ops.get_collection(my_collection_name), [update_op])
def testValueTensorIsIdempotent(self):
predictions = random_ops.random_uniform(
(10, 3), maxval=1, dtype=dtypes_lib.float32, seed=1)
labels = random_ops.random_uniform(
(10, 3), maxval=2, dtype=dtypes_lib.int64, seed=2)
auc, update_op = metrics.streaming_dynamic_auc(labels, predictions)
with self.test_session() as sess:
sess.run(variables.local_variables_initializer())
# Run several updates.
for _ in xrange(10):
sess.run(update_op)
# Then verify idempotency.
initial_auc = auc.eval()
for _ in xrange(10):
self.assertAlmostEqual(initial_auc, auc.eval(), 5)
def testAllLabelsOnes(self):
with self.test_session() as sess:
predictions = constant_op.constant([1., 1., 1.])
labels = constant_op.constant([1, 1, 1])
auc, update_op = metrics.streaming_dynamic_auc(labels, predictions)
sess.run(variables.local_variables_initializer())
sess.run(update_op)
self.assertEqual(0, auc.eval())
def testAllLabelsZeros(self):
with self.test_session() as sess:
predictions = constant_op.constant([1., 1., 1.])
labels = constant_op.constant([0, 0, 0])
auc, update_op = metrics.streaming_dynamic_auc(labels, predictions)
sess.run(variables.local_variables_initializer())
sess.run(update_op)
self.assertEqual(0, auc.eval())
def testNonZeroOnePredictions(self):
with self.test_session() as sess:
predictions = constant_op.constant([2.5, -2.5, 2.5, -2.5],
dtype=dtypes_lib.float32)
labels = constant_op.constant([1, 0, 1, 0])
auc, update_op = metrics.streaming_dynamic_auc(labels, predictions)
sess.run(variables.local_variables_initializer())
sess.run(update_op)
self.assertAlmostEqual(auc.eval(), 1.0)
def testAllCorrect(self):
inputs = np.random.randint(0, 2, size=(100, 1))
with self.test_session() as sess:
predictions = constant_op.constant(inputs)
labels = constant_op.constant(inputs)
auc, update_op = metrics.streaming_dynamic_auc(labels, predictions)
sess.run(variables.local_variables_initializer())
sess.run(update_op)
self.assertEqual(1, auc.eval())
def testSomeCorrect(self):
with self.test_session() as sess:
predictions = constant_op.constant([1, 0, 1, 0])
labels = constant_op.constant([0, 1, 1, 0])
auc, update_op = metrics.streaming_dynamic_auc(labels, predictions)
sess.run(variables.local_variables_initializer())
sess.run(update_op)
self.assertAlmostEqual(0.5, auc.eval())
def testAllIncorrect(self):
inputs = np.random.randint(0, 2, size=(100, 1))
with self.test_session() as sess:
predictions = constant_op.constant(inputs, dtype=dtypes_lib.float32)
labels = constant_op.constant(1 - inputs, dtype=dtypes_lib.float32)
auc, update_op = metrics.streaming_dynamic_auc(labels, predictions)
sess.run(variables.local_variables_initializer())
sess.run(update_op)
self.assertAlmostEqual(0, auc.eval())
def testExceptionOnIncompatibleShapes(self):
with self.test_session() as sess:
predictions = array_ops.ones([5])
labels = array_ops.zeros([6])
with self.assertRaisesRegexp(ValueError, 'Shapes .* are incompatible'):
_, update_op = metrics.streaming_dynamic_auc(labels, predictions)
sess.run(variables.local_variables_initializer())
sess.run(update_op)
def testExceptionOnGreaterThanOneLabel(self):
with self.test_session() as sess:
predictions = constant_op.constant([1, 0.5, 0], dtypes_lib.float32)
labels = constant_op.constant([2, 1, 0])
_, update_op = metrics.streaming_dynamic_auc(labels, predictions)
sess.run(variables.local_variables_initializer())
with self.assertRaisesRegexp(
errors_impl.InvalidArgumentError,
'.*labels must be 0 or 1, at least one is >1.*'):
sess.run(update_op)
def testExceptionOnNegativeLabel(self):
with self.test_session() as sess:
predictions = constant_op.constant([1, 0.5, 0], dtypes_lib.float32)
labels = constant_op.constant([1, 0, -1])
_, update_op = metrics.streaming_dynamic_auc(labels, predictions)
sess.run(variables.local_variables_initializer())
with self.assertRaisesRegexp(
errors_impl.InvalidArgumentError,
'.*labels must be 0 or 1, at least one is <0.*'):
sess.run(update_op)
def testWithMultipleUpdates(self):
batch_size = 10
num_batches = 100
labels = np.array([])
predictions = np.array([])
tf_labels = variables.Variable(array_ops.ones(batch_size, dtypes_lib.int32),
collections=[ops.GraphKeys.LOCAL_VARIABLES],
dtype=dtypes_lib.int32)
tf_predictions = variables.Variable(
array_ops.ones(batch_size),
collections=[ops.GraphKeys.LOCAL_VARIABLES],
dtype=dtypes_lib.float32)
auc, update_op = metrics.streaming_dynamic_auc(tf_labels, tf_predictions)
with self.test_session() as sess:
sess.run(variables.local_variables_initializer())
for _ in xrange(num_batches):
new_labels = np.random.randint(0, 2, size=batch_size)
noise = np.random.normal(0.0, scale=0.2, size=batch_size)
new_predictions = 0.4 + 0.2 * new_labels + noise
labels = np.concatenate([labels, new_labels])
predictions = np.concatenate([predictions, new_predictions])
sess.run(tf_labels.assign(new_labels))
sess.run(tf_predictions.assign(new_predictions))
sess.run(update_op)
expected_auc = _np_auc(predictions, labels)
self.assertAlmostEqual(expected_auc, auc.eval())
def testAUCPRReverseIncreasingPredictions(self):
with self.test_session() as sess:
predictions = constant_op.constant(
[0.1, 0.4, 0.35, 0.8], dtype=dtypes_lib.float32)
labels = constant_op.constant([0, 0, 1, 1])
auc, update_op = metrics.streaming_dynamic_auc(
labels, predictions, curve='PR')
sess.run(variables.local_variables_initializer())
sess.run(update_op)
self.assertAlmostEqual(0.79166, auc.eval(), delta=1e-5)
def testAUCPRJumbledPredictions(self):
with self.test_session() as sess:
predictions = constant_op.constant(
[0.1, 0.4, 0.35, 0.8, 0.1, 0.135, 0.81], dtypes_lib.float32)
labels = constant_op.constant([0, 0, 1, 0, 1, 0, 1])
auc, update_op = metrics.streaming_dynamic_auc(
labels, predictions, curve='PR')
sess.run(variables.local_variables_initializer())
sess.run(update_op)
self.assertAlmostEqual(0.610317, auc.eval(), delta=1e-6)
def testAUCPRPredictionsLessThanHalf(self):
with self.test_session() as sess:
predictions = constant_op.constant(
[0.0, 0.1, 0.2, 0.33, 0.3, 0.4, 0.5],
shape=(1, 7),
dtype=dtypes_lib.float32)
labels = constant_op.constant([0, 0, 0, 0, 1, 1, 1], shape=(1, 7))
auc, update_op = metrics.streaming_dynamic_auc(
labels, predictions, curve='PR')
sess.run(variables.local_variables_initializer())
sess.run(update_op)
self.assertAlmostEqual(0.90277, auc.eval(), delta=1e-5)
class StreamingPrecisionRecallAtEqualThresholdsTest(test.TestCase):
def setUp(self):
np.random.seed(1)
ops.reset_default_graph()
def _testResultsEqual(self, expected_dict, gotten_result):
"""Tests that 2 results (dicts) represent the same data.
Args:
expected_dict: A dictionary with keys that are the names of properties
of PrecisionRecallData and whose values are lists of floats.
gotten_result: A PrecisionRecallData object.
"""
gotten_dict = {k: t.eval() for k, t in gotten_result._asdict().items()}
self.assertItemsEqual(
list(expected_dict.keys()), list(gotten_dict.keys()))
for key, expected_values in expected_dict.items():
self.assertAllClose(expected_values, gotten_dict[key])
def _testCase(self, predictions, labels, expected_result, weights=None):
"""Performs a test given a certain scenario of labels, predictions, weights.
Args:
predictions: The predictions tensor. Of type float32.
labels: The labels tensor. Of type bool.
expected_result: The expected result (dict) that maps to tensors.
weights: Optional weights tensor.
"""
with self.test_session() as sess:
predictions_tensor = constant_op.constant(
predictions, dtype=dtypes_lib.float32)
labels_tensor = constant_op.constant(labels, dtype=dtypes_lib.bool)
weights_tensor = None
if weights:
weights_tensor = constant_op.constant(weights, dtype=dtypes_lib.float32)
gotten_result, update_op = (
metric_ops.precision_recall_at_equal_thresholds(
labels=labels_tensor,
predictions=predictions_tensor,
weights=weights_tensor,
num_thresholds=3))
sess.run(variables.local_variables_initializer())
sess.run(update_op)
self._testResultsEqual(expected_result, gotten_result)
def testVars(self):
metric_ops.precision_recall_at_equal_thresholds(
labels=constant_op.constant([True], dtype=dtypes_lib.bool),
predictions=constant_op.constant([0.42], dtype=dtypes_lib.float32))
_assert_metric_variables(
self, ('precision_recall_at_equal_thresholds/variables/tp_buckets:0',
'precision_recall_at_equal_thresholds/variables/fp_buckets:0'))
def testVarsWithName(self):
metric_ops.precision_recall_at_equal_thresholds(
labels=constant_op.constant([True], dtype=dtypes_lib.bool),
predictions=constant_op.constant([0.42], dtype=dtypes_lib.float32),
name='foo')
_assert_metric_variables(
self, ('foo/variables/tp_buckets:0', 'foo/variables/fp_buckets:0'))
def testValuesAreIdempotent(self):
predictions = constant_op.constant(
np.random.uniform(size=(10, 3)), dtype=dtypes_lib.float32)
labels = constant_op.constant(
np.random.uniform(size=(10, 3)) > 0.5, dtype=dtypes_lib.bool)
result, update_op = metric_ops.precision_recall_at_equal_thresholds(
labels=labels, predictions=predictions)
with self.test_session() as sess:
# Run several updates.
sess.run(variables.local_variables_initializer())
for _ in range(3):
sess.run(update_op)
# Then verify idempotency.
initial_result = {k: value.eval().tolist() for k, value in
result._asdict().items()}
for _ in range(3):
self._testResultsEqual(initial_result, result)
def testAllTruePositives(self):
self._testCase([[1]], [[True]], {
'tp': [1, 1, 1],
'fp': [0, 0, 0],
'tn': [0, 0, 0],
'fn': [0, 0, 0],
'precision': [1.0, 1.0, 1.0],
'recall': [1.0, 1.0, 1.0],
'thresholds': [0.0, 0.5, 1.0],
})
def testAllTrueNegatives(self):
self._testCase([[0]], [[False]], {
'tp': [0, 0, 0],
'fp': [1, 0, 0],
'tn': [0, 1, 1],
'fn': [0, 0, 0],
'precision': [0.0, 0.0, 0.0],
'recall': [0.0, 0.0, 0.0],
'thresholds': [0.0, 0.5, 1.0],
})
def testAllFalsePositives(self):
self._testCase([[1]], [[False]], {
'tp': [0, 0, 0],
'fp': [1, 1, 1],
'tn': [0, 0, 0],
'fn': [0, 0, 0],
'precision': [0.0, 0.0, 0.0],
'recall': [0.0, 0.0, 0.0],
'thresholds': [0.0, 0.5, 1.0],
})
def testAllFalseNegatives(self):
self._testCase([[0]], [[True]], {
'tp': [1, 0, 0],
'fp': [0, 0, 0],
'tn': [0, 0, 0],
'fn': [0, 1, 1],
'precision': [1.0, 0.0, 0.0],
'recall': [1.0, 0.0, 0.0],
'thresholds': [0.0, 0.5, 1.0],
})
def testManyValues(self):
self._testCase(
[[0.2, 0.3, 0.4, 0.6, 0.7, 0.8]],
[[True, False, False, True, True, True]],
{
'tp': [4, 3, 0],
'fp': [2, 0, 0],
'tn': [0, 2, 2],
'fn': [0, 1, 4],
'precision': [2.0 / 3.0, 1.0, 0.0],
'recall': [1.0, 0.75, 0.0],
'thresholds': [0.0, 0.5, 1.0],
})
def testManyValuesWithWeights(self):
self._testCase(
[[0.2, 0.3, 0.4, 0.6, 0.7, 0.8]],
[[True, False, False, True, True, True]],
{
'tp': [1.5, 1.5, 0.0],
'fp': [2.5, 0.0, 0.0],
'tn': [0.0, 2.5, 2.5],
'fn': [0.0, 0.0, 1.5],
'precision': [0.375, 1.0, 0.0],
'recall': [1.0, 1.0, 0.0],
'thresholds': [0.0, 0.5, 1.0],
},
weights=[[0.0, 0.5, 2.0, 0.0, 0.5, 1.0]])
class StreamingSpecificityAtSensitivityTest(test.TestCase):
def setUp(self):
np.random.seed(1)
ops.reset_default_graph()
def testVars(self):
metrics.streaming_specificity_at_sensitivity(
predictions=array_ops.ones((10, 1)),
labels=array_ops.ones((10, 1)),
sensitivity=0.7)
_assert_metric_variables(self,
('specificity_at_sensitivity/true_positives:0',
'specificity_at_sensitivity/false_negatives:0',
'specificity_at_sensitivity/false_positives:0',
'specificity_at_sensitivity/true_negatives:0'))
def testMetricsCollection(self):
my_collection_name = '__metrics__'
mean, _ = metrics.streaming_specificity_at_sensitivity(
predictions=array_ops.ones((10, 1)),
labels=array_ops.ones((10, 1)),
sensitivity=0.7,
metrics_collections=[my_collection_name])
self.assertListEqual(ops.get_collection(my_collection_name), [mean])
def testUpdatesCollection(self):
my_collection_name = '__updates__'
_, update_op = metrics.streaming_specificity_at_sensitivity(
predictions=array_ops.ones((10, 1)),
labels=array_ops.ones((10, 1)),
sensitivity=0.7,
updates_collections=[my_collection_name])
self.assertListEqual(ops.get_collection(my_collection_name), [update_op])
def testValueTensorIsIdempotent(self):
predictions = random_ops.random_uniform(
(10, 3), maxval=1, dtype=dtypes_lib.float32, seed=1)
labels = random_ops.random_uniform(
(10, 3), maxval=2, dtype=dtypes_lib.int64, seed=2)
specificity, update_op = metrics.streaming_specificity_at_sensitivity(
predictions, labels, sensitivity=0.7)
with self.test_session() as sess:
sess.run(variables.local_variables_initializer())
# Run several updates.
for _ in range(10):
sess.run(update_op)
# Then verify idempotency.
initial_specificity = specificity.eval()
for _ in range(10):
self.assertAlmostEqual(initial_specificity, specificity.eval(), 5)
def testAllCorrect(self):
inputs = np.random.randint(0, 2, size=(100, 1))
predictions = constant_op.constant(inputs, dtype=dtypes_lib.float32)
labels = constant_op.constant(inputs)
specificity, update_op = metrics.streaming_specificity_at_sensitivity(
predictions, labels, sensitivity=0.7)
with self.test_session() as sess:
sess.run(variables.local_variables_initializer())
self.assertEqual(1, sess.run(update_op))
self.assertEqual(1, specificity.eval())
def testSomeCorrectHighSensitivity(self):
predictions_values = [0.1, 0.2, 0.4, 0.3, 0.0, 0.1, 0.45, 0.5, 0.8, 0.9]
labels_values = [0, 0, 0, 0, 0, 1, 1, 1, 1, 1]
predictions = constant_op.constant(
predictions_values, dtype=dtypes_lib.float32)
labels = constant_op.constant(labels_values)
specificity, update_op = metrics.streaming_specificity_at_sensitivity(
predictions, labels, sensitivity=0.8)
with self.test_session() as sess:
sess.run(variables.local_variables_initializer())
self.assertAlmostEqual(1.0, sess.run(update_op))
self.assertAlmostEqual(1.0, specificity.eval())
def testSomeCorrectLowSensitivity(self):
predictions_values = [0.1, 0.2, 0.4, 0.3, 0.0, 0.1, 0.2, 0.2, 0.26, 0.26]
labels_values = [0, 0, 0, 0, 0, 1, 1, 1, 1, 1]
predictions = constant_op.constant(
predictions_values, dtype=dtypes_lib.float32)
labels = constant_op.constant(labels_values)
specificity, update_op = metrics.streaming_specificity_at_sensitivity(
predictions, labels, sensitivity=0.4)
with self.test_session() as sess:
sess.run(variables.local_variables_initializer())
self.assertAlmostEqual(0.6, sess.run(update_op))
self.assertAlmostEqual(0.6, specificity.eval())
def testWeighted1d(self):
predictions_values = [0.1, 0.2, 0.4, 0.3, 0.0, 0.1, 0.2, 0.2, 0.26, 0.26]
labels_values = [0, 0, 0, 0, 0, 1, 1, 1, 1, 1]
weights_values = [3]
predictions = constant_op.constant(
predictions_values, dtype=dtypes_lib.float32)
labels = constant_op.constant(labels_values)
weights = constant_op.constant(weights_values)
specificity, update_op = metrics.streaming_specificity_at_sensitivity(
predictions, labels, weights=weights, sensitivity=0.4)
with self.test_session() as sess:
sess.run(variables.local_variables_initializer())
self.assertAlmostEqual(0.6, sess.run(update_op))
self.assertAlmostEqual(0.6, specificity.eval())
def testWeighted2d(self):
predictions_values = [0.1, 0.2, 0.4, 0.3, 0.0, 0.1, 0.2, 0.2, 0.26, 0.26]
labels_values = [0, 0, 0, 0, 0, 1, 1, 1, 1, 1]
weights_values = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
predictions = constant_op.constant(
predictions_values, dtype=dtypes_lib.float32)
labels = constant_op.constant(labels_values)
weights = constant_op.constant(weights_values)
specificity, update_op = metrics.streaming_specificity_at_sensitivity(
predictions, labels, weights=weights, sensitivity=0.4)
with self.test_session() as sess:
sess.run(variables.local_variables_initializer())
self.assertAlmostEqual(8.0 / 15.0, sess.run(update_op))
self.assertAlmostEqual(8.0 / 15.0, specificity.eval())
class StreamingSensitivityAtSpecificityTest(test.TestCase):
def setUp(self):
np.random.seed(1)
ops.reset_default_graph()
def testVars(self):
metrics.streaming_sensitivity_at_specificity(
predictions=array_ops.ones((10, 1)),
labels=array_ops.ones((10, 1)),
specificity=0.7)
_assert_metric_variables(self,
('sensitivity_at_specificity/true_positives:0',
'sensitivity_at_specificity/false_negatives:0',
'sensitivity_at_specificity/false_positives:0',
'sensitivity_at_specificity/true_negatives:0'))
def testMetricsCollection(self):
my_collection_name = '__metrics__'
mean, _ = metrics.streaming_sensitivity_at_specificity(
predictions=array_ops.ones((10, 1)),
labels=array_ops.ones((10, 1)),
specificity=0.7,
metrics_collections=[my_collection_name])
self.assertListEqual(ops.get_collection(my_collection_name), [mean])
def testUpdatesCollection(self):
my_collection_name = '__updates__'
_, update_op = metrics.streaming_sensitivity_at_specificity(
predictions=array_ops.ones((10, 1)),
labels=array_ops.ones((10, 1)),
specificity=0.7,
updates_collections=[my_collection_name])
self.assertListEqual(ops.get_collection(my_collection_name), [update_op])
def testValueTensorIsIdempotent(self):
predictions = random_ops.random_uniform(
(10, 3), maxval=1, dtype=dtypes_lib.float32, seed=1)
labels = random_ops.random_uniform(
(10, 3), maxval=2, dtype=dtypes_lib.int64, seed=2)
sensitivity, update_op = metrics.streaming_sensitivity_at_specificity(
predictions, labels, specificity=0.7)
with self.test_session() as sess:
sess.run(variables.local_variables_initializer())
# Run several updates.
for _ in range(10):
sess.run(update_op)
# Then verify idempotency.
initial_sensitivity = sensitivity.eval()
for _ in range(10):
self.assertAlmostEqual(initial_sensitivity, sensitivity.eval(), 5)
def testAllCorrect(self):
inputs = np.random.randint(0, 2, size=(100, 1))
predictions = constant_op.constant(inputs, dtype=dtypes_lib.float32)
labels = constant_op.constant(inputs)
specificity, update_op = metrics.streaming_sensitivity_at_specificity(
predictions, labels, specificity=0.7)
with self.test_session() as sess:
sess.run(variables.local_variables_initializer())
self.assertEqual(1, sess.run(update_op))
self.assertEqual(1, specificity.eval())
def testSomeCorrectHighSpecificity(self):
predictions_values = [0.0, 0.1, 0.2, 0.3, 0.4, 0.1, 0.45, 0.5, 0.8, 0.9]
labels_values = [0, 0, 0, 0, 0, 1, 1, 1, 1, 1]
predictions = constant_op.constant(
predictions_values, dtype=dtypes_lib.float32)
labels = constant_op.constant(labels_values)
specificity, update_op = metrics.streaming_sensitivity_at_specificity(
predictions, labels, specificity=0.8)
with self.test_session() as sess:
sess.run(variables.local_variables_initializer())
self.assertAlmostEqual(0.8, sess.run(update_op))
self.assertAlmostEqual(0.8, specificity.eval())
def testSomeCorrectLowSpecificity(self):
predictions_values = [0.0, 0.1, 0.2, 0.3, 0.4, 0.01, 0.02, 0.25, 0.26, 0.26]
labels_values = [0, 0, 0, 0, 0, 1, 1, 1, 1, 1]
predictions = constant_op.constant(
predictions_values, dtype=dtypes_lib.float32)
labels = constant_op.constant(labels_values)
specificity, update_op = metrics.streaming_sensitivity_at_specificity(
predictions, labels, specificity=0.4)
with self.test_session() as sess:
sess.run(variables.local_variables_initializer())
self.assertAlmostEqual(0.6, sess.run(update_op))
self.assertAlmostEqual(0.6, specificity.eval())
def testWeighted(self):
predictions_values = [0.0, 0.1, 0.2, 0.3, 0.4, 0.01, 0.02, 0.25, 0.26, 0.26]
labels_values = [0, 0, 0, 0, 0, 1, 1, 1, 1, 1]
weights_values = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
predictions = constant_op.constant(
predictions_values, dtype=dtypes_lib.float32)
labels = constant_op.constant(labels_values)
weights = constant_op.constant(weights_values)
specificity, update_op = metrics.streaming_sensitivity_at_specificity(
predictions, labels, weights=weights, specificity=0.4)
with self.test_session() as sess:
sess.run(variables.local_variables_initializer())
self.assertAlmostEqual(0.675, sess.run(update_op))
self.assertAlmostEqual(0.675, specificity.eval())
# TODO(nsilberman): Break this up into two sets of tests.
class StreamingPrecisionRecallThresholdsTest(test.TestCase):
def setUp(self):
np.random.seed(1)
ops.reset_default_graph()
def testVars(self):
metrics.streaming_precision_at_thresholds(
predictions=array_ops.ones((10, 1)),
labels=array_ops.ones((10, 1)),
thresholds=[0, 0.5, 1.0])
_assert_metric_variables(self, (
'precision_at_thresholds/true_positives:0',
'precision_at_thresholds/false_positives:0',
))
def testMetricsCollection(self):
my_collection_name = '__metrics__'
prec, _ = metrics.streaming_precision_at_thresholds(
predictions=array_ops.ones((10, 1)),
labels=array_ops.ones((10, 1)),
thresholds=[0, 0.5, 1.0],
metrics_collections=[my_collection_name])
rec, _ = metrics.streaming_recall_at_thresholds(
predictions=array_ops.ones((10, 1)),
labels=array_ops.ones((10, 1)),
thresholds=[0, 0.5, 1.0],
metrics_collections=[my_collection_name])
self.assertListEqual(ops.get_collection(my_collection_name), [prec, rec])
def testUpdatesCollection(self):
my_collection_name = '__updates__'
_, precision_op = metrics.streaming_precision_at_thresholds(
predictions=array_ops.ones((10, 1)),
labels=array_ops.ones((10, 1)),
thresholds=[0, 0.5, 1.0],
updates_collections=[my_collection_name])
_, recall_op = metrics.streaming_recall_at_thresholds(
predictions=array_ops.ones((10, 1)),
labels=array_ops.ones((10, 1)),
thresholds=[0, 0.5, 1.0],
updates_collections=[my_collection_name])
self.assertListEqual(
ops.get_collection(my_collection_name), [precision_op, recall_op])
def testValueTensorIsIdempotent(self):
predictions = random_ops.random_uniform(
(10, 3), maxval=1, dtype=dtypes_lib.float32, seed=1)
labels = random_ops.random_uniform(
(10, 3), maxval=2, dtype=dtypes_lib.int64, seed=2)
thresholds = [0, 0.5, 1.0]
prec, prec_op = metrics.streaming_precision_at_thresholds(predictions,
labels,
thresholds)
rec, rec_op = metrics.streaming_recall_at_thresholds(predictions, labels,
thresholds)
with self.test_session() as sess:
sess.run(variables.local_variables_initializer())
# Run several updates.
for _ in range(10):
sess.run([prec_op, rec_op])
# Then verify idempotency.
initial_prec = prec.eval()
initial_rec = rec.eval()
for _ in range(10):
self.assertAllClose(initial_prec, prec.eval())
self.assertAllClose(initial_rec, rec.eval())
# TODO(nsilberman): fix tests (passing but incorrect).
def testAllCorrect(self):
inputs = np.random.randint(0, 2, size=(100, 1))
with self.test_session() as sess:
predictions = constant_op.constant(inputs, dtype=dtypes_lib.float32)
labels = constant_op.constant(inputs)
thresholds = [0.5]
prec, prec_op = metrics.streaming_precision_at_thresholds(predictions,
labels,
thresholds)
rec, rec_op = metrics.streaming_recall_at_thresholds(predictions, labels,
thresholds)
sess.run(variables.local_variables_initializer())
sess.run([prec_op, rec_op])
self.assertEqual(1, prec.eval())
self.assertEqual(1, rec.eval())
def testSomeCorrect(self):
with self.test_session() as sess:
predictions = constant_op.constant(
[1, 0, 1, 0], shape=(1, 4), dtype=dtypes_lib.float32)
labels = constant_op.constant([0, 1, 1, 0], shape=(1, 4))
thresholds = [0.5]
prec, prec_op = metrics.streaming_precision_at_thresholds(predictions,
labels,
thresholds)
rec, rec_op = metrics.streaming_recall_at_thresholds(predictions, labels,
thresholds)
sess.run(variables.local_variables_initializer())
sess.run([prec_op, rec_op])
self.assertAlmostEqual(0.5, prec.eval())
self.assertAlmostEqual(0.5, rec.eval())
def testAllIncorrect(self):
inputs = np.random.randint(0, 2, size=(100, 1))
with self.test_session() as sess:
predictions = constant_op.constant(inputs, dtype=dtypes_lib.float32)
labels = constant_op.constant(1 - inputs, dtype=dtypes_lib.float32)
thresholds = [0.5]
prec, prec_op = metrics.streaming_precision_at_thresholds(predictions,
labels,
thresholds)
rec, rec_op = metrics.streaming_recall_at_thresholds(predictions, labels,
thresholds)
sess.run(variables.local_variables_initializer())
sess.run([prec_op, rec_op])
self.assertAlmostEqual(0, prec.eval())
self.assertAlmostEqual(0, rec.eval())
def testWeights1d(self):
with self.test_session() as sess:
predictions = constant_op.constant(
[[1, 0], [1, 0]], shape=(2, 2), dtype=dtypes_lib.float32)
labels = constant_op.constant([[0, 1], [1, 0]], shape=(2, 2))
weights = constant_op.constant(
[[0], [1]], shape=(2, 1), dtype=dtypes_lib.float32)
thresholds = [0.5, 1.1]
prec, prec_op = metrics.streaming_precision_at_thresholds(
predictions, labels, thresholds, weights=weights)
rec, rec_op = metrics.streaming_recall_at_thresholds(
predictions, labels, thresholds, weights=weights)
prec_low = prec[0]
prec_high = prec[1]
rec_low = rec[0]
rec_high = rec[1]
sess.run(variables.local_variables_initializer())
sess.run([prec_op, rec_op])
self.assertAlmostEqual(1.0, prec_low.eval(), places=5)
self.assertAlmostEqual(0.0, prec_high.eval(), places=5)
self.assertAlmostEqual(1.0, rec_low.eval(), places=5)
self.assertAlmostEqual(0.0, rec_high.eval(), places=5)
def testWeights2d(self):
with self.test_session() as sess:
predictions = constant_op.constant(
[[1, 0], [1, 0]], shape=(2, 2), dtype=dtypes_lib.float32)
labels = constant_op.constant([[0, 1], [1, 0]], shape=(2, 2))
weights = constant_op.constant(
[[0, 0], [1, 1]], shape=(2, 2), dtype=dtypes_lib.float32)
thresholds = [0.5, 1.1]
prec, prec_op = metrics.streaming_precision_at_thresholds(
predictions, labels, thresholds, weights=weights)
rec, rec_op = metrics.streaming_recall_at_thresholds(
predictions, labels, thresholds, weights=weights)
prec_low = prec[0]
prec_high = prec[1]
rec_low = rec[0]
rec_high = rec[1]
sess.run(variables.local_variables_initializer())
sess.run([prec_op, rec_op])
self.assertAlmostEqual(1.0, prec_low.eval(), places=5)
self.assertAlmostEqual(0.0, prec_high.eval(), places=5)
self.assertAlmostEqual(1.0, rec_low.eval(), places=5)
self.assertAlmostEqual(0.0, rec_high.eval(), places=5)
def testExtremeThresholds(self):
with self.test_session() as sess:
predictions = constant_op.constant(
[1, 0, 1, 0], shape=(1, 4), dtype=dtypes_lib.float32)
labels = constant_op.constant([0, 1, 1, 1], shape=(1, 4))
thresholds = [-1.0, 2.0] # lower/higher than any values
prec, prec_op = metrics.streaming_precision_at_thresholds(predictions,
labels,
thresholds)
rec, rec_op = metrics.streaming_recall_at_thresholds(predictions, labels,
thresholds)
prec_low = prec[0]
prec_high = prec[1]
rec_low = rec[0]
rec_high = rec[1]
sess.run(variables.local_variables_initializer())
sess.run([prec_op, rec_op])
self.assertAlmostEqual(0.75, prec_low.eval())
self.assertAlmostEqual(0.0, prec_high.eval())
self.assertAlmostEqual(1.0, rec_low.eval())
self.assertAlmostEqual(0.0, rec_high.eval())
def testZeroLabelsPredictions(self):
with self.test_session() as sess:
predictions = array_ops.zeros([4], dtype=dtypes_lib.float32)
labels = array_ops.zeros([4])
thresholds = [0.5]
prec, prec_op = metrics.streaming_precision_at_thresholds(predictions,
labels,
thresholds)
rec, rec_op = metrics.streaming_recall_at_thresholds(predictions, labels,
thresholds)
sess.run(variables.local_variables_initializer())
sess.run([prec_op, rec_op])
self.assertAlmostEqual(0, prec.eval(), 6)
self.assertAlmostEqual(0, rec.eval(), 6)
def testWithMultipleUpdates(self):
num_samples = 1000
batch_size = 10
num_batches = int(num_samples / batch_size)
# Create the labels and data.
labels = np.random.randint(0, 2, size=(num_samples, 1))
noise = np.random.normal(0.0, scale=0.2, size=(num_samples, 1))
predictions = 0.4 + 0.2 * labels + noise
predictions[predictions > 1] = 1
predictions[predictions < 0] = 0
thresholds = [0.3]
tp = 0
fp = 0
fn = 0
tn = 0
for i in range(num_samples):
if predictions[i] > thresholds[0]:
if labels[i] == 1:
tp += 1
else:
fp += 1
else:
if labels[i] == 1:
fn += 1
else:
tn += 1
epsilon = 1e-7
expected_prec = tp / (epsilon + tp + fp)
expected_rec = tp / (epsilon + tp + fn)
labels = labels.astype(np.float32)
predictions = predictions.astype(np.float32)
with self.test_session() as sess:
# Reshape the data so its easy to queue up:
predictions_batches = predictions.reshape((batch_size, num_batches))
labels_batches = labels.reshape((batch_size, num_batches))
# Enqueue the data:
predictions_queue = data_flow_ops.FIFOQueue(
num_batches, dtypes=dtypes_lib.float32, shapes=(batch_size,))
labels_queue = data_flow_ops.FIFOQueue(
num_batches, dtypes=dtypes_lib.float32, shapes=(batch_size,))
for i in range(int(num_batches)):
tf_prediction = constant_op.constant(predictions_batches[:, i])
tf_label = constant_op.constant(labels_batches[:, i])
sess.run([
predictions_queue.enqueue(tf_prediction),
labels_queue.enqueue(tf_label)
])
tf_predictions = predictions_queue.dequeue()
tf_labels = labels_queue.dequeue()
prec, prec_op = metrics.streaming_precision_at_thresholds(tf_predictions,
tf_labels,
thresholds)
rec, rec_op = metrics.streaming_recall_at_thresholds(tf_predictions,
tf_labels,
thresholds)
sess.run(variables.local_variables_initializer())
for _ in range(int(num_samples / batch_size)):
sess.run([prec_op, rec_op])
# Since this is only approximate, we can't expect a 6 digits match.
# Although with higher number of samples/thresholds we should see the
# accuracy improving
self.assertAlmostEqual(expected_prec, prec.eval(), 2)
self.assertAlmostEqual(expected_rec, rec.eval(), 2)
class StreamingFPRThresholdsTest(test.TestCase):
def setUp(self):
np.random.seed(1)
ops.reset_default_graph()
def testVars(self):
metrics.streaming_false_positive_rate_at_thresholds(
predictions=array_ops.ones((10, 1)),
labels=array_ops.ones((10, 1)),
thresholds=[0, 0.5, 1.0])
_assert_metric_variables(self, (
'false_positive_rate_at_thresholds/false_positives:0',
'false_positive_rate_at_thresholds/true_negatives:0',
))
def testMetricsCollection(self):
my_collection_name = '__metrics__'
fpr, _ = metrics.streaming_false_positive_rate_at_thresholds(
predictions=array_ops.ones((10, 1)),
labels=array_ops.ones((10, 1)),
thresholds=[0, 0.5, 1.0],
metrics_collections=[my_collection_name])
self.assertListEqual(ops.get_collection(my_collection_name), [fpr])
def testUpdatesCollection(self):
my_collection_name = '__updates__'
_, update_op = metrics.streaming_false_positive_rate_at_thresholds(
predictions=array_ops.ones((10, 1)),
labels=array_ops.ones((10, 1)),
thresholds=[0, 0.5, 1.0],
updates_collections=[my_collection_name])
self.assertListEqual(
ops.get_collection(my_collection_name), [update_op])
def testValueTensorIsIdempotent(self):
predictions = random_ops.random_uniform(
(10, 3), maxval=1, dtype=dtypes_lib.float32, seed=1)
labels = random_ops.random_uniform(
(10, 3), maxval=2, dtype=dtypes_lib.int64, seed=2)
thresholds = [0, 0.5, 1.0]
fpr, fpr_op = metrics.streaming_false_positive_rate_at_thresholds(
predictions, labels, thresholds)
with self.test_session() as sess:
sess.run(variables.local_variables_initializer())
# Run several updates.
for _ in range(10):
sess.run(fpr_op)
# Then verify idempotency.
initial_fpr = fpr.eval()
for _ in range(10):
self.assertAllClose(initial_fpr, fpr.eval())
def testAllCorrect(self):
inputs = np.random.randint(0, 2, size=(100, 1))
with self.test_session() as sess:
predictions = constant_op.constant(inputs, dtype=dtypes_lib.float32)
labels = constant_op.constant(inputs)
thresholds = [0.5]
fpr, fpr_op = metrics.streaming_false_positive_rate_at_thresholds(
predictions, labels, thresholds)
sess.run(variables.local_variables_initializer())
sess.run(fpr_op)
self.assertEqual(0, fpr.eval())
def testSomeCorrect(self):
with self.test_session() as sess:
predictions = constant_op.constant(
[1, 0, 1, 0], shape=(1, 4), dtype=dtypes_lib.float32)
labels = constant_op.constant([0, 1, 1, 0], shape=(1, 4))
thresholds = [0.5]
fpr, fpr_op = metrics.streaming_false_positive_rate_at_thresholds(
predictions, labels, thresholds)
sess.run(variables.local_variables_initializer())
sess.run(fpr_op)
self.assertAlmostEqual(0.5, fpr.eval())
def testAllIncorrect(self):
inputs = np.random.randint(0, 2, size=(100, 1))
with self.test_session() as sess:
predictions = constant_op.constant(inputs, dtype=dtypes_lib.float32)
labels = constant_op.constant(1 - inputs, dtype=dtypes_lib.float32)
thresholds = [0.5]
fpr, fpr_op = metrics.streaming_false_positive_rate_at_thresholds(
predictions, labels, thresholds)
sess.run(variables.local_variables_initializer())
sess.run(fpr_op)
self.assertAlmostEqual(1, fpr.eval())
def testWeights1d(self):
with self.test_session() as sess:
predictions = constant_op.constant(
[[1, 0], [1, 0]], shape=(2, 2), dtype=dtypes_lib.float32)
labels = constant_op.constant([[0, 1], [1, 0]], shape=(2, 2))
weights = constant_op.constant(
[[0], [1]], shape=(2, 1), dtype=dtypes_lib.float32)
thresholds = [0.5, 1.1]
fpr, fpr_op = metrics.streaming_false_positive_rate_at_thresholds(
predictions, labels, thresholds, weights=weights)
fpr_low = fpr[0]
fpr_high = fpr[1]
sess.run(variables.local_variables_initializer())
sess.run(fpr_op)
self.assertAlmostEqual(0.0, fpr_low.eval(), places=5)
self.assertAlmostEqual(0.0, fpr_high.eval(), places=5)
def testWeights2d(self):
with self.test_session() as sess:
predictions = constant_op.constant(
[[1, 0], [1, 0]], shape=(2, 2), dtype=dtypes_lib.float32)
labels = constant_op.constant([[0, 1], [1, 0]], shape=(2, 2))
weights = constant_op.constant(
[[0, 0], [1, 1]], shape=(2, 2), dtype=dtypes_lib.float32)
thresholds = [0.5, 1.1]
fpr, fpr_op = metrics.streaming_false_positive_rate_at_thresholds(
predictions, labels, thresholds, weights=weights)
fpr_low = fpr[0]
fpr_high = fpr[1]
sess.run(variables.local_variables_initializer())
sess.run(fpr_op)
self.assertAlmostEqual(0.0, fpr_low.eval(), places=5)
self.assertAlmostEqual(0.0, fpr_high.eval(), places=5)
def testExtremeThresholds(self):
with self.test_session() as sess:
predictions = constant_op.constant(
[1, 0, 1, 0], shape=(1, 4), dtype=dtypes_lib.float32)
labels = constant_op.constant([0, 1, 1, 1], shape=(1, 4))
thresholds = [-1.0, 2.0] # lower/higher than any values
fpr, fpr_op = metrics.streaming_false_positive_rate_at_thresholds(
predictions, labels, thresholds)
fpr_low = fpr[0]
fpr_high = fpr[1]
sess.run(variables.local_variables_initializer())
sess.run(fpr_op)
self.assertAlmostEqual(1.0, fpr_low.eval(), places=5)
self.assertAlmostEqual(0.0, fpr_high.eval(), places=5)
def testZeroLabelsPredictions(self):
with self.test_session() as sess:
predictions = array_ops.zeros([4], dtype=dtypes_lib.float32)
labels = array_ops.zeros([4])
thresholds = [0.5]
fpr, fpr_op = metrics.streaming_false_positive_rate_at_thresholds(
predictions, labels, thresholds)
sess.run(variables.local_variables_initializer())
sess.run(fpr_op)
self.assertAlmostEqual(0, fpr.eval(), 6)
def testWithMultipleUpdates(self):
num_samples = 1000
batch_size = 10
num_batches = int(num_samples / batch_size)
# Create the labels and data.
labels = np.random.randint(0, 2, size=(num_samples, 1))
noise = np.random.normal(0.0, scale=0.2, size=(num_samples, 1))
predictions = 0.4 + 0.2 * labels + noise
predictions[predictions > 1] = 1
predictions[predictions < 0] = 0
thresholds = [0.3]
fp = 0
tn = 0
for i in range(num_samples):
if predictions[i] > thresholds[0]:
if labels[i] == 0:
fp += 1
else:
if labels[i] == 0:
tn += 1
epsilon = 1e-7
expected_fpr = fp / (epsilon + fp + tn)
labels = labels.astype(np.float32)
predictions = predictions.astype(np.float32)
with self.test_session() as sess:
# Reshape the data so its easy to queue up:
predictions_batches = predictions.reshape((batch_size, num_batches))
labels_batches = labels.reshape((batch_size, num_batches))
# Enqueue the data:
predictions_queue = data_flow_ops.FIFOQueue(
num_batches, dtypes=dtypes_lib.float32, shapes=(batch_size,))
labels_queue = data_flow_ops.FIFOQueue(
num_batches, dtypes=dtypes_lib.float32, shapes=(batch_size,))
for i in range(int(num_batches)):
tf_prediction = constant_op.constant(predictions_batches[:, i])
tf_label = constant_op.constant(labels_batches[:, i])
sess.run([
predictions_queue.enqueue(tf_prediction),
labels_queue.enqueue(tf_label)
])
tf_predictions = predictions_queue.dequeue()
tf_labels = labels_queue.dequeue()
fpr, fpr_op = metrics.streaming_false_positive_rate_at_thresholds(
tf_predictions, tf_labels, thresholds)
sess.run(variables.local_variables_initializer())
for _ in range(int(num_samples / batch_size)):
sess.run(fpr_op)
# Since this is only approximate, we can't expect a 6 digits match.
# Although with higher number of samples/thresholds we should see the
# accuracy improving
self.assertAlmostEqual(expected_fpr, fpr.eval(), 2)
class RecallAtPrecisionTest(test.TestCase):
def setUp(self):
np.random.seed(1)
ops.reset_default_graph()
def testVars(self):
metrics.recall_at_precision(
predictions=array_ops.ones((10, 1)),
labels=array_ops.ones((10, 1)),
precision=0.7)
_assert_metric_variables(self, ('recall_at_precision/true_positives:0',
'recall_at_precision/false_negatives:0',
'recall_at_precision/false_positives:0',
'recall_at_precision/true_negatives:0'))
def testMetricsCollection(self):
my_collection_name = '__metrics__'
mean, _ = metrics.recall_at_precision(
predictions=array_ops.ones((10, 1)),
labels=array_ops.ones((10, 1)),
precision=0.7,
metrics_collections=[my_collection_name])
self.assertListEqual(ops.get_collection(my_collection_name), [mean])
def testUpdatesCollection(self):
my_collection_name = '__updates__'
_, update_op = metrics.recall_at_precision(
predictions=array_ops.ones((10, 1)),
labels=array_ops.ones((10, 1)),
precision=0.7,
updates_collections=[my_collection_name])
self.assertListEqual(ops.get_collection(my_collection_name), [update_op])
def testValueTensorIsIdempotent(self):
predictions = random_ops.random_uniform(
(10, 3), maxval=1, dtype=dtypes_lib.float32, seed=1)
labels = random_ops.random_uniform(
(10, 3), maxval=2, dtype=dtypes_lib.int64, seed=2)
recall, update_op = metrics.recall_at_precision(
labels, predictions, precision=0.7)
with self.test_session() as sess:
sess.run(variables.local_variables_initializer())
# Run several updates.
for _ in range(10):
sess.run(update_op)
# Then verify idempotency.
initial_recall = recall.eval()
for _ in range(10):
self.assertAlmostEqual(initial_recall, recall.eval(), 5)
def testAllCorrect(self):
inputs = np.random.randint(0, 2, size=(100, 1))
predictions = constant_op.constant(inputs, dtype=dtypes_lib.float32)
labels = constant_op.constant(inputs)
recall, update_op = metrics.recall_at_precision(
labels, predictions, precision=1.0)
with self.test_session() as sess:
sess.run(variables.local_variables_initializer())
self.assertEqual(1, sess.run(update_op))
self.assertEqual(1, recall.eval())
def testSomeCorrectHighPrecision(self):
predictions_values = [1, .9, .8, .7, .6, .5, .4, .3]
labels_values = [1, 1, 1, 1, 0, 0, 0, 1]
predictions = constant_op.constant(
predictions_values, dtype=dtypes_lib.float32)
labels = constant_op.constant(labels_values)
recall, update_op = metrics.recall_at_precision(
labels, predictions, precision=0.8)
with self.test_session() as sess:
sess.run(variables.local_variables_initializer())
self.assertAlmostEqual(0.8, sess.run(update_op))
self.assertAlmostEqual(0.8, recall.eval())
def testSomeCorrectLowPrecision(self):
predictions_values = [1, .9, .8, .7, .6, .5, .4, .3, .2, .1]
labels_values = [1, 1, 0, 0, 0, 0, 0, 0, 0, 1]
predictions = constant_op.constant(
predictions_values, dtype=dtypes_lib.float32)
labels = constant_op.constant(labels_values)
recall, update_op = metrics.recall_at_precision(
labels, predictions, precision=0.4)
with self.test_session() as sess:
sess.run(variables.local_variables_initializer())
target_recall = 2.0 / 3.0
self.assertAlmostEqual(target_recall, sess.run(update_op))
self.assertAlmostEqual(target_recall, recall.eval())
def testWeighted(self):
predictions_values = [1, .9, .8, .7, .6]
labels_values = [1, 1, 0, 0, 1]
weights_values = [1, 1, 3, 4, 1]
predictions = constant_op.constant(
predictions_values, dtype=dtypes_lib.float32)
labels = constant_op.constant(labels_values)
weights = constant_op.constant(weights_values)
recall, update_op = metrics.recall_at_precision(
labels, predictions, weights=weights, precision=0.4)
with self.test_session() as sess:
sess.run(variables.local_variables_initializer())
target_recall = 2.0 / 3.0
self.assertAlmostEqual(target_recall, sess.run(update_op))
self.assertAlmostEqual(target_recall, recall.eval())
class StreamingFNRThresholdsTest(test.TestCase):
def setUp(self):
np.random.seed(1)
ops.reset_default_graph()
def testVars(self):
metrics.streaming_false_negative_rate_at_thresholds(
predictions=array_ops.ones((10, 1)),
labels=array_ops.ones((10, 1)),
thresholds=[0, 0.5, 1.0])
_assert_metric_variables(self, (
'false_negative_rate_at_thresholds/false_negatives:0',
'false_negative_rate_at_thresholds/true_positives:0',
))
def testMetricsCollection(self):
my_collection_name = '__metrics__'
fnr, _ = metrics.streaming_false_negative_rate_at_thresholds(
predictions=array_ops.ones((10, 1)),
labels=array_ops.ones((10, 1)),
thresholds=[0, 0.5, 1.0],
metrics_collections=[my_collection_name])
self.assertListEqual(ops.get_collection(my_collection_name), [fnr])
def testUpdatesCollection(self):
my_collection_name = '__updates__'
_, update_op = metrics.streaming_false_negative_rate_at_thresholds(
predictions=array_ops.ones((10, 1)),
labels=array_ops.ones((10, 1)),
thresholds=[0, 0.5, 1.0],
updates_collections=[my_collection_name])
self.assertListEqual(
ops.get_collection(my_collection_name), [update_op])
def testValueTensorIsIdempotent(self):
predictions = random_ops.random_uniform(
(10, 3), maxval=1, dtype=dtypes_lib.float32, seed=1)
labels = random_ops.random_uniform(
(10, 3), maxval=2, dtype=dtypes_lib.int64, seed=2)
thresholds = [0, 0.5, 1.0]
fnr, fnr_op = metrics.streaming_false_negative_rate_at_thresholds(
predictions, labels, thresholds)
with self.test_session() as sess:
sess.run(variables.local_variables_initializer())
# Run several updates.
for _ in range(10):
sess.run(fnr_op)
# Then verify idempotency.
initial_fnr = fnr.eval()
for _ in range(10):
self.assertAllClose(initial_fnr, fnr.eval())
def testAllCorrect(self):
inputs = np.random.randint(0, 2, size=(100, 1))
with self.test_session() as sess:
predictions = constant_op.constant(inputs, dtype=dtypes_lib.float32)
labels = constant_op.constant(inputs)
thresholds = [0.5]
fnr, fnr_op = metrics.streaming_false_negative_rate_at_thresholds(
predictions, labels, thresholds)
sess.run(variables.local_variables_initializer())
sess.run(fnr_op)
self.assertEqual(0, fnr.eval())
def testSomeCorrect(self):
with self.test_session() as sess:
predictions = constant_op.constant(
[1, 0, 1, 0], shape=(1, 4), dtype=dtypes_lib.float32)
labels = constant_op.constant([0, 1, 1, 0], shape=(1, 4))
thresholds = [0.5]
fnr, fnr_op = metrics.streaming_false_negative_rate_at_thresholds(
predictions, labels, thresholds)
sess.run(variables.local_variables_initializer())
sess.run(fnr_op)
self.assertAlmostEqual(0.5, fnr.eval())
def testAllIncorrect(self):
inputs = np.random.randint(0, 2, size=(100, 1))
with self.test_session() as sess:
predictions = constant_op.constant(inputs, dtype=dtypes_lib.float32)
labels = constant_op.constant(1 - inputs, dtype=dtypes_lib.float32)
thresholds = [0.5]
fnr, fnr_op = metrics.streaming_false_negative_rate_at_thresholds(
predictions, labels, thresholds)
sess.run(variables.local_variables_initializer())
sess.run(fnr_op)
self.assertAlmostEqual(1, fnr.eval())
def testWeights1d(self):
with self.test_session() as sess:
predictions = constant_op.constant(
[[1, 0], [1, 0]], shape=(2, 2), dtype=dtypes_lib.float32)
labels = constant_op.constant([[0, 1], [1, 0]], shape=(2, 2))
weights = constant_op.constant(
[[0], [1]], shape=(2, 1), dtype=dtypes_lib.float32)
thresholds = [0.5, 1.1]
fnr, fnr_op = metrics.streaming_false_negative_rate_at_thresholds(
predictions, labels, thresholds, weights=weights)
fnr_low = fnr[0]
fnr_high = fnr[1]
sess.run(variables.local_variables_initializer())
sess.run(fnr_op)
self.assertAlmostEqual(0.0, fnr_low.eval(), places=5)
self.assertAlmostEqual(1.0, fnr_high.eval(), places=5)
def testWeights2d(self):
with self.test_session() as sess:
predictions = constant_op.constant(
[[1, 0], [1, 0]], shape=(2, 2), dtype=dtypes_lib.float32)
labels = constant_op.constant([[0, 1], [1, 0]], shape=(2, 2))
weights = constant_op.constant(
[[0, 0], [1, 1]], shape=(2, 2), dtype=dtypes_lib.float32)
thresholds = [0.5, 1.1]
fnr, fnr_op = metrics.streaming_false_negative_rate_at_thresholds(
predictions, labels, thresholds, weights=weights)
fnr_low = fnr[0]
fnr_high = fnr[1]
sess.run(variables.local_variables_initializer())
sess.run(fnr_op)
self.assertAlmostEqual(0.0, fnr_low.eval(), places=5)
self.assertAlmostEqual(1.0, fnr_high.eval(), places=5)
def testExtremeThresholds(self):
with self.test_session() as sess:
predictions = constant_op.constant(
[1, 0, 1, 0], shape=(1, 4), dtype=dtypes_lib.float32)
labels = constant_op.constant([0, 1, 1, 1], shape=(1, 4))
thresholds = [-1.0, 2.0] # lower/higher than any values
fnr, fnr_op = metrics.streaming_false_negative_rate_at_thresholds(
predictions, labels, thresholds)
fnr_low = fnr[0]
fnr_high = fnr[1]
sess.run(variables.local_variables_initializer())
sess.run(fnr_op)
self.assertAlmostEqual(0.0, fnr_low.eval())
self.assertAlmostEqual(1.0, fnr_high.eval())
def testZeroLabelsPredictions(self):
with self.test_session() as sess:
predictions = array_ops.zeros([4], dtype=dtypes_lib.float32)
labels = array_ops.zeros([4])
thresholds = [0.5]
fnr, fnr_op = metrics.streaming_false_negative_rate_at_thresholds(
predictions, labels, thresholds)
sess.run(variables.local_variables_initializer())
sess.run(fnr_op)
self.assertAlmostEqual(0, fnr.eval(), 6)
def testWithMultipleUpdates(self):
num_samples = 1000
batch_size = 10
num_batches = int(num_samples / batch_size)
# Create the labels and data.
labels = np.random.randint(0, 2, size=(num_samples, 1))
noise = np.random.normal(0.0, scale=0.2, size=(num_samples, 1))
predictions = 0.4 + 0.2 * labels + noise
predictions[predictions > 1] = 1
predictions[predictions < 0] = 0
thresholds = [0.3]
fn = 0
tp = 0
for i in range(num_samples):
if predictions[i] > thresholds[0]:
if labels[i] == 1:
tp += 1
else:
if labels[i] == 1:
fn += 1
epsilon = 1e-7
expected_fnr = fn / (epsilon + fn + tp)
labels = labels.astype(np.float32)
predictions = predictions.astype(np.float32)
with self.test_session() as sess:
# Reshape the data so its easy to queue up:
predictions_batches = predictions.reshape((batch_size, num_batches))
labels_batches = labels.reshape((batch_size, num_batches))
# Enqueue the data:
predictions_queue = data_flow_ops.FIFOQueue(
num_batches, dtypes=dtypes_lib.float32, shapes=(batch_size,))
labels_queue = data_flow_ops.FIFOQueue(
num_batches, dtypes=dtypes_lib.float32, shapes=(batch_size,))
for i in range(int(num_batches)):
tf_prediction = constant_op.constant(predictions_batches[:, i])
tf_label = constant_op.constant(labels_batches[:, i])
sess.run([
predictions_queue.enqueue(tf_prediction),
labels_queue.enqueue(tf_label)
])
tf_predictions = predictions_queue.dequeue()
tf_labels = labels_queue.dequeue()
fnr, fnr_op = metrics.streaming_false_negative_rate_at_thresholds(
tf_predictions, tf_labels, thresholds)
sess.run(variables.local_variables_initializer())
for _ in range(int(num_samples / batch_size)):
sess.run(fnr_op)
# Since this is only approximate, we can't expect a 6 digits match.
# Although with higher number of samples/thresholds we should see the
# accuracy improving
self.assertAlmostEqual(expected_fnr, fnr.eval(), 2)
# TODO(ptucker): Remove when we remove `streaming_recall_at_k`.
# This op will be deprecated soon in favor of `streaming_sparse_recall_at_k`.
# Until then, this test validates that both ops yield the same results.
class StreamingRecallAtKTest(test.TestCase):
def setUp(self):
np.random.seed(1)
ops.reset_default_graph()
self._batch_size = 4
self._num_classes = 3
self._np_predictions = np.matrix(('0.1 0.2 0.7;'
'0.6 0.2 0.2;'
'0.0 0.9 0.1;'
'0.2 0.0 0.8'))
self._np_labels = [0, 0, 0, 0]
def testVars(self):
metrics.streaming_recall_at_k(
predictions=array_ops.ones((self._batch_size, self._num_classes)),
labels=array_ops.ones(
(self._batch_size,), dtype=dtypes_lib.int32),
k=1)
_assert_metric_variables(self,
('recall_at_1/count:0', 'recall_at_1/total:0'))
def testMetricsCollection(self):
my_collection_name = '__metrics__'
mean, _ = metrics.streaming_recall_at_k(
predictions=array_ops.ones((self._batch_size, self._num_classes)),
labels=array_ops.ones(
(self._batch_size,), dtype=dtypes_lib.int32),
k=1,
metrics_collections=[my_collection_name])
self.assertListEqual(ops.get_collection(my_collection_name), [mean])
def testUpdatesCollection(self):
my_collection_name = '__updates__'
_, update_op = metrics.streaming_recall_at_k(
predictions=array_ops.ones((self._batch_size, self._num_classes)),
labels=array_ops.ones(
(self._batch_size,), dtype=dtypes_lib.int32),
k=1,
updates_collections=[my_collection_name])
self.assertListEqual(ops.get_collection(my_collection_name), [update_op])
def testSingleUpdateKIs1(self):
predictions = constant_op.constant(
self._np_predictions,
shape=(self._batch_size, self._num_classes),
dtype=dtypes_lib.float32)
labels = constant_op.constant(
self._np_labels, shape=(self._batch_size,), dtype=dtypes_lib.int64)
recall, update_op = metrics.streaming_recall_at_k(predictions, labels, k=1)
sp_recall, sp_update_op = metrics.streaming_sparse_recall_at_k(
predictions, array_ops.reshape(labels, (self._batch_size, 1)), k=1)
with self.test_session() as sess:
sess.run(variables.local_variables_initializer())
self.assertEqual(0.25, sess.run(update_op))
self.assertEqual(0.25, recall.eval())
self.assertEqual(0.25, sess.run(sp_update_op))
self.assertEqual(0.25, sp_recall.eval())
def testSingleUpdateKIs2(self):
predictions = constant_op.constant(
self._np_predictions,
shape=(self._batch_size, self._num_classes),
dtype=dtypes_lib.float32)
labels = constant_op.constant(
self._np_labels, shape=(self._batch_size,), dtype=dtypes_lib.int64)
recall, update_op = metrics.streaming_recall_at_k(predictions, labels, k=2)
sp_recall, sp_update_op = metrics.streaming_sparse_recall_at_k(
predictions, array_ops.reshape(labels, (self._batch_size, 1)), k=2)
with self.test_session() as sess:
sess.run(variables.local_variables_initializer())
self.assertEqual(0.5, sess.run(update_op))
self.assertEqual(0.5, recall.eval())
self.assertEqual(0.5, sess.run(sp_update_op))
self.assertEqual(0.5, sp_recall.eval())
def testSingleUpdateKIs3(self):
predictions = constant_op.constant(
self._np_predictions,
shape=(self._batch_size, self._num_classes),
dtype=dtypes_lib.float32)
labels = constant_op.constant(
self._np_labels, shape=(self._batch_size,), dtype=dtypes_lib.int64)
recall, update_op = metrics.streaming_recall_at_k(predictions, labels, k=3)
sp_recall, sp_update_op = metrics.streaming_sparse_recall_at_k(
predictions, array_ops.reshape(labels, (self._batch_size, 1)), k=3)
with self.test_session() as sess:
sess.run(variables.local_variables_initializer())
self.assertEqual(1.0, sess.run(update_op))
self.assertEqual(1.0, recall.eval())
self.assertEqual(1.0, sess.run(sp_update_op))
self.assertEqual(1.0, sp_recall.eval())
def testSingleUpdateSomeMissingKIs2(self):
predictions = constant_op.constant(
self._np_predictions,
shape=(self._batch_size, self._num_classes),
dtype=dtypes_lib.float32)
labels = constant_op.constant(
self._np_labels, shape=(self._batch_size,), dtype=dtypes_lib.int64)
weights = constant_op.constant(
[0, 1, 0, 1], shape=(self._batch_size,), dtype=dtypes_lib.float32)
recall, update_op = metrics.streaming_recall_at_k(
predictions, labels, k=2, weights=weights)
sp_recall, sp_update_op = metrics.streaming_sparse_recall_at_k(
predictions,
array_ops.reshape(labels, (self._batch_size, 1)),
k=2,
weights=weights)
with self.test_session() as sess:
sess.run(variables.local_variables_initializer())
self.assertEqual(1.0, sess.run(update_op))
self.assertEqual(1.0, recall.eval())
self.assertEqual(1.0, sess.run(sp_update_op))
self.assertEqual(1.0, sp_recall.eval())
class StreamingSparsePrecisionTest(test.TestCase):
def _test_streaming_sparse_precision_at_k(self,
predictions,
labels,
k,
expected,
class_id=None,
weights=None):
with ops.Graph().as_default() as g, self.test_session(g):
if weights is not None:
weights = constant_op.constant(weights, dtypes_lib.float32)
metric, update = metrics.streaming_sparse_precision_at_k(
predictions=constant_op.constant(predictions, dtypes_lib.float32),
labels=labels,
k=k,
class_id=class_id,
weights=weights)
# Fails without initialized vars.
self.assertRaises(errors_impl.OpError, metric.eval)
self.assertRaises(errors_impl.OpError, update.eval)
variables.variables_initializer(variables.local_variables()).run()
# Run per-step op and assert expected values.
if math.isnan(expected):
_assert_nan(self, update.eval())
_assert_nan(self, metric.eval())
else:
self.assertEqual(expected, update.eval())
self.assertEqual(expected, metric.eval())
def _test_streaming_sparse_precision_at_top_k(self,
top_k_predictions,
labels,
expected,
class_id=None,
weights=None):
with ops.Graph().as_default() as g, self.test_session(g):
if weights is not None:
weights = constant_op.constant(weights, dtypes_lib.float32)
metric, update = metrics.streaming_sparse_precision_at_top_k(
top_k_predictions=constant_op.constant(top_k_predictions,
dtypes_lib.int32),
labels=labels,
class_id=class_id,
weights=weights)
# Fails without initialized vars.
self.assertRaises(errors_impl.OpError, metric.eval)
self.assertRaises(errors_impl.OpError, update.eval)
variables.variables_initializer(variables.local_variables()).run()
# Run per-step op and assert expected values.
if math.isnan(expected):
self.assertTrue(math.isnan(update.eval()))
self.assertTrue(math.isnan(metric.eval()))
else:
self.assertEqual(expected, update.eval())
self.assertEqual(expected, metric.eval())
def _test_streaming_sparse_average_precision_at_k(self,
predictions,
labels,
k,
expected,
weights=None):
with ops.Graph().as_default() as g, self.test_session(g):
if weights is not None:
weights = constant_op.constant(weights, dtypes_lib.float32)
predictions = constant_op.constant(predictions, dtypes_lib.float32)
metric, update = metrics.streaming_sparse_average_precision_at_k(
predictions, labels, k, weights=weights)
# Fails without initialized vars.
self.assertRaises(errors_impl.OpError, metric.eval)
self.assertRaises(errors_impl.OpError, update.eval)
local_variables = variables.local_variables()
variables.variables_initializer(local_variables).run()
# Run per-step op and assert expected values.
if math.isnan(expected):
_assert_nan(self, update.eval())
_assert_nan(self, metric.eval())
else:
self.assertAlmostEqual(expected, update.eval())
self.assertAlmostEqual(expected, metric.eval())
def _test_streaming_sparse_average_precision_at_top_k(self,
top_k_predictions,
labels,
expected,
weights=None):
with ops.Graph().as_default() as g, self.test_session(g):
if weights is not None:
weights = constant_op.constant(weights, dtypes_lib.float32)
metric, update = metrics.streaming_sparse_average_precision_at_top_k(
top_k_predictions, labels, weights=weights)
# Fails without initialized vars.
self.assertRaises(errors_impl.OpError, metric.eval)
self.assertRaises(errors_impl.OpError, update.eval)
local_variables = variables.local_variables()
variables.variables_initializer(local_variables).run()
# Run per-step op and assert expected values.
if math.isnan(expected):
_assert_nan(self, update.eval())
_assert_nan(self, metric.eval())
else:
self.assertAlmostEqual(expected, update.eval())
self.assertAlmostEqual(expected, metric.eval())
def test_top_k_rank_invalid(self):
with self.test_session():
# top_k_predictions has rank < 2.
top_k_predictions = [9, 4, 6, 2, 0]
sp_labels = sparse_tensor.SparseTensorValue(
indices=np.array([[0,], [1,], [2,]], np.int64),
values=np.array([2, 7, 8], np.int64),
dense_shape=np.array([10,], np.int64))
with self.assertRaises(ValueError):
precision, _ = metrics.streaming_sparse_precision_at_top_k(
top_k_predictions=constant_op.constant(top_k_predictions,
dtypes_lib.int64),
labels=sp_labels)
variables.variables_initializer(variables.local_variables()).run()
precision.eval()
def test_average_precision(self):
# Example 1.
# Matches example here:
# fastml.com/what-you-wanted-to-know-about-mean-average-precision
labels_ex1 = (0, 1, 2, 3, 4)
labels = np.array([labels_ex1], dtype=np.int64)
predictions_ex1 = (0.2, 0.1, 0.0, 0.4, 0.0, 0.5, 0.3)
predictions = (predictions_ex1,)
predictions_top_k_ex1 = (5, 3, 6, 0, 1, 2)
precision_ex1 = (0.0 / 1, 1.0 / 2, 1.0 / 3, 2.0 / 4)
avg_precision_ex1 = (0.0 / 1, precision_ex1[1] / 2, precision_ex1[1] / 3,
(precision_ex1[1] + precision_ex1[3]) / 4)
for i in xrange(4):
k = i + 1
self._test_streaming_sparse_precision_at_k(
predictions, labels, k, expected=precision_ex1[i])
self._test_streaming_sparse_precision_at_top_k(
(predictions_top_k_ex1[:k],), labels, expected=precision_ex1[i])
self._test_streaming_sparse_average_precision_at_k(
predictions, labels, k, expected=avg_precision_ex1[i])
self._test_streaming_sparse_average_precision_at_top_k(
(predictions_top_k_ex1[:k],), labels, expected=avg_precision_ex1[i])
# Example 2.
labels_ex2 = (0, 2, 4, 5, 6)
labels = np.array([labels_ex2], dtype=np.int64)
predictions_ex2 = (0.3, 0.5, 0.0, 0.4, 0.0, 0.1, 0.2)
predictions = (predictions_ex2,)
predictions_top_k_ex2 = (1, 3, 0, 6, 5)
precision_ex2 = (0.0 / 1, 0.0 / 2, 1.0 / 3, 2.0 / 4)
avg_precision_ex2 = (0.0 / 1, 0.0 / 2, precision_ex2[2] / 3,
(precision_ex2[2] + precision_ex2[3]) / 4)
for i in xrange(4):
k = i + 1
self._test_streaming_sparse_precision_at_k(
predictions, labels, k, expected=precision_ex2[i])
self._test_streaming_sparse_precision_at_top_k(
(predictions_top_k_ex2[:k],), labels, expected=precision_ex2[i])
self._test_streaming_sparse_average_precision_at_k(
predictions, labels, k, expected=avg_precision_ex2[i])
self._test_streaming_sparse_average_precision_at_top_k(
(predictions_top_k_ex2[:k],), labels, expected=avg_precision_ex2[i])
# Both examples, we expect both precision and average precision to be the
# average of the 2 examples.
labels = np.array([labels_ex1, labels_ex2], dtype=np.int64)
predictions = (predictions_ex1, predictions_ex2)
streaming_precision = [(ex1 + ex2) / 2
for ex1, ex2 in zip(precision_ex1, precision_ex2)]
streaming_average_precision = [
(ex1 + ex2) / 2
for ex1, ex2 in zip(avg_precision_ex1, avg_precision_ex2)
]
for i in xrange(4):
k = i + 1
self._test_streaming_sparse_precision_at_k(
predictions, labels, k, expected=streaming_precision[i])
predictions_top_k = (predictions_top_k_ex1[:k], predictions_top_k_ex2[:k])
self._test_streaming_sparse_precision_at_top_k(
predictions_top_k, labels, expected=streaming_precision[i])
self._test_streaming_sparse_average_precision_at_k(
predictions, labels, k, expected=streaming_average_precision[i])
self._test_streaming_sparse_average_precision_at_top_k(
predictions_top_k, labels, expected=streaming_average_precision[i])
# Weighted examples, we expect streaming average precision to be the
# weighted average of the 2 examples.
weights = (0.3, 0.6)
streaming_average_precision = [
(weights[0] * ex1 + weights[1] * ex2) / (weights[0] + weights[1])
for ex1, ex2 in zip(avg_precision_ex1, avg_precision_ex2)
]
for i in xrange(4):
k = i + 1
self._test_streaming_sparse_average_precision_at_k(
predictions,
labels,
k,
expected=streaming_average_precision[i],
weights=weights)
self._test_streaming_sparse_average_precision_at_top_k(
(predictions_top_k_ex1[:k], predictions_top_k_ex2[:k]),
labels,
expected=streaming_average_precision[i],
weights=weights)
def test_average_precision_some_labels_out_of_range(self):
"""Tests that labels outside the [0, n_classes) range are ignored."""
labels_ex1 = (-1, 0, 1, 2, 3, 4, 7)
labels = np.array([labels_ex1], dtype=np.int64)
predictions_ex1 = (0.2, 0.1, 0.0, 0.4, 0.0, 0.5, 0.3)
predictions = (predictions_ex1,)
predictions_top_k_ex1 = (5, 3, 6, 0, 1, 2)
precision_ex1 = (0.0 / 1, 1.0 / 2, 1.0 / 3, 2.0 / 4)
avg_precision_ex1 = (0.0 / 1, precision_ex1[1] / 2, precision_ex1[1] / 3,
(precision_ex1[1] + precision_ex1[3]) / 4)
for i in xrange(4):
k = i + 1
self._test_streaming_sparse_precision_at_k(
predictions, labels, k, expected=precision_ex1[i])
self._test_streaming_sparse_precision_at_top_k(
(predictions_top_k_ex1[:k],), labels, expected=precision_ex1[i])
self._test_streaming_sparse_average_precision_at_k(
predictions, labels, k, expected=avg_precision_ex1[i])
self._test_streaming_sparse_average_precision_at_top_k(
(predictions_top_k_ex1[:k],), labels, expected=avg_precision_ex1[i])
def test_average_precision_at_top_k_static_shape_check(self):
predictions_top_k = array_ops.placeholder(shape=(2, None),
dtype=dtypes_lib.int64)
labels = np.array(((1,), (2,)), dtype=np.int64)
# Fails due to non-static predictions_idx shape.
with self.assertRaises(ValueError):
metric_ops.streaming_sparse_average_precision_at_top_k(predictions_top_k,
labels)
predictions_top_k = (2, 1)
# Fails since rank of predictions_idx is less than one.
with self.assertRaises(ValueError):
metric_ops.streaming_sparse_average_precision_at_top_k(predictions_top_k,
labels)
predictions_top_k = ((2,), (1,))
# Valid static shape.
metric_ops.streaming_sparse_average_precision_at_top_k(predictions_top_k,
labels)
def test_one_label_at_k1_nan(self):
predictions = [[0.1, 0.3, 0.2, 0.4], [0.1, 0.2, 0.3, 0.4]]
top_k_predictions = [[3], [3]]
sparse_labels = _binary_2d_label_to_sparse_value(
[[0, 0, 0, 1], [0, 0, 1, 0]])
dense_labels = np.array([[3], [2]], dtype=np.int64)
for labels in (sparse_labels, dense_labels):
# Classes 0,1,2 have 0 predictions, classes -1 and 4 are out of range.
for class_id in (-1, 0, 1, 2, 4):
self._test_streaming_sparse_precision_at_k(
predictions, labels, k=1, expected=NAN, class_id=class_id)
self._test_streaming_sparse_precision_at_top_k(
top_k_predictions, labels, expected=NAN, class_id=class_id)
def test_one_label_at_k1(self):
predictions = [[0.1, 0.3, 0.2, 0.4], [0.1, 0.2, 0.3, 0.4]]
top_k_predictions = [[3], [3]]
sparse_labels = _binary_2d_label_to_sparse_value(
[[0, 0, 0, 1], [0, 0, 1, 0]])
dense_labels = np.array([[3], [2]], dtype=np.int64)
for labels in (sparse_labels, dense_labels):
# Class 3: 1 label, 2 predictions, 1 correct.
self._test_streaming_sparse_precision_at_k(
predictions, labels, k=1, expected=1.0 / 2, class_id=3)
self._test_streaming_sparse_precision_at_top_k(
top_k_predictions, labels, expected=1.0 / 2, class_id=3)
# All classes: 2 labels, 2 predictions, 1 correct.
self._test_streaming_sparse_precision_at_k(
predictions, labels, k=1, expected=1.0 / 2)
self._test_streaming_sparse_precision_at_top_k(
top_k_predictions, labels, expected=1.0 / 2)
def test_three_labels_at_k5_no_predictions(self):
predictions = [[0.5, 0.1, 0.6, 0.3, 0.8, 0.0, 0.7, 0.2, 0.4, 0.9],
[0.3, 0.0, 0.7, 0.2, 0.4, 0.9, 0.5, 0.8, 0.1, 0.6]]
top_k_predictions = [
[9, 4, 6, 2, 0],
[5, 7, 2, 9, 6],
]
sparse_labels = _binary_2d_label_to_sparse_value(
[[0, 0, 1, 0, 0, 0, 0, 1, 1, 0], [0, 1, 1, 0, 0, 1, 0, 0, 0, 0]])
dense_labels = np.array([[2, 7, 8], [1, 2, 5]], dtype=np.int64)
for labels in (sparse_labels, dense_labels):
# Classes 1,3,8 have 0 predictions, classes -1 and 10 are out of range.
for class_id in (-1, 1, 3, 8, 10):
self._test_streaming_sparse_precision_at_k(
predictions, labels, k=5, expected=NAN, class_id=class_id)
self._test_streaming_sparse_precision_at_top_k(
top_k_predictions, labels, expected=NAN, class_id=class_id)
def test_three_labels_at_k5_no_labels(self):
predictions = [[0.5, 0.1, 0.6, 0.3, 0.8, 0.0, 0.7, 0.2, 0.4, 0.9],
[0.3, 0.0, 0.7, 0.2, 0.4, 0.9, 0.5, 0.8, 0.1, 0.6]]
top_k_predictions = [
[9, 4, 6, 2, 0],
[5, 7, 2, 9, 6],
]
sparse_labels = _binary_2d_label_to_sparse_value(
[[0, 0, 1, 0, 0, 0, 0, 1, 1, 0], [0, 1, 1, 0, 0, 1, 0, 0, 0, 0]])
dense_labels = np.array([[2, 7, 8], [1, 2, 5]], dtype=np.int64)
for labels in (sparse_labels, dense_labels):
# Classes 0,4,6,9: 0 labels, >=1 prediction.
for class_id in (0, 4, 6, 9):
self._test_streaming_sparse_precision_at_k(
predictions, labels, k=5, expected=0.0, class_id=class_id)
self._test_streaming_sparse_precision_at_top_k(
top_k_predictions, labels, expected=0.0, class_id=class_id)
def test_three_labels_at_k5(self):
predictions = [[0.5, 0.1, 0.6, 0.3, 0.8, 0.0, 0.7, 0.2, 0.4, 0.9],
[0.3, 0.0, 0.7, 0.2, 0.4, 0.9, 0.5, 0.8, 0.1, 0.6]]
top_k_predictions = [
[9, 4, 6, 2, 0],
[5, 7, 2, 9, 6],
]
sparse_labels = _binary_2d_label_to_sparse_value(
[[0, 0, 1, 0, 0, 0, 0, 1, 1, 0], [0, 1, 1, 0, 0, 1, 0, 0, 0, 0]])
dense_labels = np.array([[2, 7, 8], [1, 2, 5]], dtype=np.int64)
for labels in (sparse_labels, dense_labels):
# Class 2: 2 labels, 2 correct predictions.
self._test_streaming_sparse_precision_at_k(
predictions, labels, k=5, expected=2.0 / 2, class_id=2)
self._test_streaming_sparse_precision_at_top_k(
top_k_predictions, labels, expected=2.0 / 2, class_id=2)
# Class 5: 1 label, 1 correct prediction.
self._test_streaming_sparse_precision_at_k(
predictions, labels, k=5, expected=1.0 / 1, class_id=5)
self._test_streaming_sparse_precision_at_top_k(
top_k_predictions, labels, expected=1.0 / 1, class_id=5)
# Class 7: 1 label, 1 incorrect prediction.
self._test_streaming_sparse_precision_at_k(
predictions, labels, k=5, expected=0.0 / 1, class_id=7)
self._test_streaming_sparse_precision_at_top_k(
top_k_predictions, labels, expected=0.0 / 1, class_id=7)
# All classes: 10 predictions, 3 correct.
self._test_streaming_sparse_precision_at_k(
predictions, labels, k=5, expected=3.0 / 10)
self._test_streaming_sparse_precision_at_top_k(
top_k_predictions, labels, expected=3.0 / 10)
def test_three_labels_at_k5_some_out_of_range(self):
"""Tests that labels outside the [0, n_classes) range are ignored."""
predictions = [[0.5, 0.1, 0.6, 0.3, 0.8, 0.0, 0.7, 0.2, 0.4, 0.9],
[0.3, 0.0, 0.7, 0.2, 0.4, 0.9, 0.5, 0.8, 0.1, 0.6]]
top_k_predictions = [
[9, 4, 6, 2, 0],
[5, 7, 2, 9, 6],
]
sp_labels = sparse_tensor.SparseTensorValue(
indices=[[0, 0], [0, 1], [0, 2], [0, 3], [1, 0], [1, 1], [1, 2],
[1, 3]],
# values -1 and 10 are outside the [0, n_classes) range and are ignored.
values=np.array([2, 7, -1, 8, 1, 2, 5, 10], np.int64),
dense_shape=[2, 4])
# Class 2: 2 labels, 2 correct predictions.
self._test_streaming_sparse_precision_at_k(
predictions, sp_labels, k=5, expected=2.0 / 2, class_id=2)
self._test_streaming_sparse_precision_at_top_k(
top_k_predictions, sp_labels, expected=2.0 / 2, class_id=2)
# Class 5: 1 label, 1 correct prediction.
self._test_streaming_sparse_precision_at_k(
predictions, sp_labels, k=5, expected=1.0 / 1, class_id=5)
self._test_streaming_sparse_precision_at_top_k(
top_k_predictions, sp_labels, expected=1.0 / 1, class_id=5)
# Class 7: 1 label, 1 incorrect prediction.
self._test_streaming_sparse_precision_at_k(
predictions, sp_labels, k=5, expected=0.0 / 1, class_id=7)
self._test_streaming_sparse_precision_at_top_k(
top_k_predictions, sp_labels, expected=0.0 / 1, class_id=7)
# All classes: 10 predictions, 3 correct.
self._test_streaming_sparse_precision_at_k(
predictions, sp_labels, k=5, expected=3.0 / 10)
self._test_streaming_sparse_precision_at_top_k(
top_k_predictions, sp_labels, expected=3.0 / 10)
def test_3d_nan(self):
predictions = [[[0.5, 0.1, 0.6, 0.3, 0.8, 0.0, 0.7, 0.2, 0.4, 0.9],
[0.3, 0.0, 0.7, 0.2, 0.4, 0.9, 0.5, 0.8, 0.1, 0.6]],
[[0.3, 0.0, 0.7, 0.2, 0.4, 0.9, 0.5, 0.8, 0.1, 0.6],
[0.5, 0.1, 0.6, 0.3, 0.8, 0.0, 0.7, 0.2, 0.4, 0.9]]]
top_k_predictions = [[
[9, 4, 6, 2, 0],
[5, 7, 2, 9, 6],
], [
[5, 7, 2, 9, 6],
[9, 4, 6, 2, 0],
]]
labels = _binary_3d_label_to_sparse_value(
[[[0, 0, 1, 0, 0, 0, 0, 1, 1, 0], [0, 1, 1, 0, 0, 1, 0, 0, 0, 0]],
[[0, 1, 1, 0, 0, 1, 0, 1, 0, 0], [0, 0, 1, 0, 0, 0, 0, 0, 1, 0]]])
# Classes 1,3,8 have 0 predictions, classes -1 and 10 are out of range.
for class_id in (-1, 1, 3, 8, 10):
self._test_streaming_sparse_precision_at_k(
predictions, labels, k=5, expected=NAN, class_id=class_id)
self._test_streaming_sparse_precision_at_top_k(
top_k_predictions, labels, expected=NAN, class_id=class_id)
def test_3d_no_labels(self):
predictions = [[[0.5, 0.1, 0.6, 0.3, 0.8, 0.0, 0.7, 0.2, 0.4, 0.9],
[0.3, 0.0, 0.7, 0.2, 0.4, 0.9, 0.5, 0.8, 0.1, 0.6]],
[[0.3, 0.0, 0.7, 0.2, 0.4, 0.9, 0.5, 0.8, 0.1, 0.6],
[0.5, 0.1, 0.6, 0.3, 0.8, 0.0, 0.7, 0.2, 0.4, 0.9]]]
top_k_predictions = [[
[9, 4, 6, 2, 0],
[5, 7, 2, 9, 6],
], [
[5, 7, 2, 9, 6],
[9, 4, 6, 2, 0],
]]
labels = _binary_3d_label_to_sparse_value(
[[[0, 0, 1, 0, 0, 0, 0, 1, 1, 0], [0, 1, 1, 0, 0, 1, 0, 0, 0, 0]],
[[0, 1, 1, 0, 0, 1, 0, 1, 0, 0], [0, 0, 1, 0, 0, 0, 0, 0, 1, 0]]])
# Classes 0,4,6,9: 0 labels, >=1 prediction.
for class_id in (0, 4, 6, 9):
self._test_streaming_sparse_precision_at_k(
predictions, labels, k=5, expected=0.0, class_id=class_id)
self._test_streaming_sparse_precision_at_top_k(
top_k_predictions, labels, expected=0.0, class_id=class_id)
def test_3d(self):
predictions = [[[0.5, 0.1, 0.6, 0.3, 0.8, 0.0, 0.7, 0.2, 0.4, 0.9],
[0.3, 0.0, 0.7, 0.2, 0.4, 0.9, 0.5, 0.8, 0.1, 0.6]],
[[0.3, 0.0, 0.7, 0.2, 0.4, 0.9, 0.5, 0.8, 0.1, 0.6],
[0.5, 0.1, 0.6, 0.3, 0.8, 0.0, 0.7, 0.2, 0.4, 0.9]]]
top_k_predictions = [[
[9, 4, 6, 2, 0],
[5, 7, 2, 9, 6],
], [
[5, 7, 2, 9, 6],
[9, 4, 6, 2, 0],
]]
labels = _binary_3d_label_to_sparse_value(
[[[0, 0, 1, 0, 0, 0, 0, 1, 1, 0], [0, 1, 1, 0, 0, 1, 0, 0, 0, 0]],
[[0, 1, 1, 0, 0, 1, 0, 1, 0, 0], [0, 0, 1, 0, 0, 0, 0, 0, 1, 0]]])
# Class 2: 4 predictions, all correct.
self._test_streaming_sparse_precision_at_k(
predictions, labels, k=5, expected=4.0 / 4, class_id=2)
self._test_streaming_sparse_precision_at_top_k(
top_k_predictions, labels, expected=4.0 / 4, class_id=2)
# Class 5: 2 predictions, both correct.
self._test_streaming_sparse_precision_at_k(
predictions, labels, k=5, expected=2.0 / 2, class_id=5)
self._test_streaming_sparse_precision_at_top_k(
top_k_predictions, labels, expected=2.0 / 2, class_id=5)
# Class 7: 2 predictions, 1 correct.
self._test_streaming_sparse_precision_at_k(
predictions, labels, k=5, expected=1.0 / 2, class_id=7)
self._test_streaming_sparse_precision_at_top_k(
top_k_predictions, labels, expected=1.0 / 2, class_id=7)
# All classes: 20 predictions, 7 correct.
self._test_streaming_sparse_precision_at_k(
predictions, labels, k=5, expected=7.0 / 20)
self._test_streaming_sparse_precision_at_top_k(
top_k_predictions, labels, expected=7.0 / 20)
def test_3d_ignore_all(self):
predictions = [[[0.5, 0.1, 0.6, 0.3, 0.8, 0.0, 0.7, 0.2, 0.4, 0.9],
[0.3, 0.0, 0.7, 0.2, 0.4, 0.9, 0.5, 0.8, 0.1, 0.6]],
[[0.3, 0.0, 0.7, 0.2, 0.4, 0.9, 0.5, 0.8, 0.1, 0.6],
[0.5, 0.1, 0.6, 0.3, 0.8, 0.0, 0.7, 0.2, 0.4, 0.9]]]
top_k_predictions = [[
[9, 4, 6, 2, 0],
[5, 7, 2, 9, 6],
], [
[5, 7, 2, 9, 6],
[9, 4, 6, 2, 0],
]]
labels = _binary_3d_label_to_sparse_value(
[[[0, 0, 1, 0, 0, 0, 0, 1, 1, 0], [0, 1, 1, 0, 0, 1, 0, 0, 0, 0]],
[[0, 1, 1, 0, 0, 1, 0, 1, 0, 0], [0, 0, 1, 0, 0, 0, 0, 0, 1, 0]]])
for class_id in xrange(10):
self._test_streaming_sparse_precision_at_k(
predictions,
labels,
k=5,
expected=NAN,
class_id=class_id,
weights=[[0], [0]])
self._test_streaming_sparse_precision_at_top_k(
top_k_predictions,
labels,
expected=NAN,
class_id=class_id,
weights=[[0], [0]])
self._test_streaming_sparse_precision_at_k(
predictions,
labels,
k=5,
expected=NAN,
class_id=class_id,
weights=[[0, 0], [0, 0]])
self._test_streaming_sparse_precision_at_top_k(
top_k_predictions,
labels,
expected=NAN,
class_id=class_id,
weights=[[0, 0], [0, 0]])
self._test_streaming_sparse_precision_at_k(
predictions, labels, k=5, expected=NAN, weights=[[0], [0]])
self._test_streaming_sparse_precision_at_top_k(
top_k_predictions, labels, expected=NAN, weights=[[0], [0]])
self._test_streaming_sparse_precision_at_k(
predictions, labels, k=5, expected=NAN, weights=[[0, 0], [0, 0]])
self._test_streaming_sparse_precision_at_top_k(
top_k_predictions, labels, expected=NAN, weights=[[0, 0], [0, 0]])
def test_3d_ignore_some(self):
predictions = [[[0.5, 0.1, 0.6, 0.3, 0.8, 0.0, 0.7, 0.2, 0.4, 0.9],
[0.3, 0.0, 0.7, 0.2, 0.4, 0.9, 0.5, 0.8, 0.1, 0.6]],
[[0.3, 0.0, 0.7, 0.2, 0.4, 0.9, 0.5, 0.8, 0.1, 0.6],
[0.5, 0.1, 0.6, 0.3, 0.8, 0.0, 0.7, 0.2, 0.4, 0.9]]]
top_k_predictions = [[
[9, 4, 6, 2, 0],
[5, 7, 2, 9, 6],
], [
[5, 7, 2, 9, 6],
[9, 4, 6, 2, 0],
]]
labels = _binary_3d_label_to_sparse_value(
[[[0, 0, 1, 0, 0, 0, 0, 1, 1, 0], [0, 1, 1, 0, 0, 1, 0, 0, 0, 0]],
[[0, 1, 1, 0, 0, 1, 0, 1, 0, 0], [0, 0, 1, 0, 0, 0, 0, 0, 1, 0]]])
# Class 2: 2 predictions, both correct.
self._test_streaming_sparse_precision_at_k(
predictions,
labels,
k=5,
expected=2.0 / 2.0,
class_id=2,
weights=[[1], [0]])
self._test_streaming_sparse_precision_at_top_k(
top_k_predictions,
labels,
expected=2.0 / 2.0,
class_id=2,
weights=[[1], [0]])
# Class 2: 2 predictions, both correct.
self._test_streaming_sparse_precision_at_k(
predictions,
labels,
k=5,
expected=2.0 / 2.0,
class_id=2,
weights=[[0], [1]])
self._test_streaming_sparse_precision_at_top_k(
top_k_predictions,
labels,
expected=2.0 / 2.0,
class_id=2,
weights=[[0], [1]])
# Class 7: 1 incorrect prediction.
self._test_streaming_sparse_precision_at_k(
predictions,
labels,
k=5,
expected=0.0 / 1.0,
class_id=7,
weights=[[1], [0]])
self._test_streaming_sparse_precision_at_top_k(
top_k_predictions,
labels,
expected=0.0 / 1.0,
class_id=7,
weights=[[1], [0]])
# Class 7: 1 correct prediction.
self._test_streaming_sparse_precision_at_k(
predictions,
labels,
k=5,
expected=1.0 / 1.0,
class_id=7,
weights=[[0], [1]])
self._test_streaming_sparse_precision_at_top_k(
top_k_predictions,
labels,
expected=1.0 / 1.0,
class_id=7,
weights=[[0], [1]])
# Class 7: no predictions.
self._test_streaming_sparse_precision_at_k(
predictions,
labels,
k=5,
expected=NAN,
class_id=7,
weights=[[1, 0], [0, 1]])
self._test_streaming_sparse_precision_at_top_k(
top_k_predictions,
labels,
expected=NAN,
class_id=7,
weights=[[1, 0], [0, 1]])
# Class 7: 2 predictions, 1 correct.
self._test_streaming_sparse_precision_at_k(
predictions,
labels,
k=5,
expected=1.0 / 2.0,
class_id=7,
weights=[[0, 1], [1, 0]])
self._test_streaming_sparse_precision_at_top_k(
top_k_predictions,
labels,
expected=1.0 / 2.0,
class_id=7,
weights=[[0, 1], [1, 0]])
def test_sparse_tensor_value(self):
predictions = [[0.1, 0.3, 0.2, 0.4], [0.1, 0.2, 0.3, 0.4]]
labels = [[0, 0, 0, 1], [0, 0, 1, 0]]
expected_precision = 0.5
with self.test_session():
_, precision = metrics.streaming_sparse_precision_at_k(
predictions=constant_op.constant(predictions, dtypes_lib.float32),
labels=_binary_2d_label_to_sparse_value(labels),
k=1)
variables.variables_initializer(variables.local_variables()).run()
self.assertEqual(expected_precision, precision.eval())
class StreamingSparseRecallTest(test.TestCase):
def _test_streaming_sparse_recall_at_k(self,
predictions,
labels,
k,
expected,
class_id=None,
weights=None):
with ops.Graph().as_default() as g, self.test_session(g):
if weights is not None:
weights = constant_op.constant(weights, dtypes_lib.float32)
metric, update = metrics.streaming_sparse_recall_at_k(
predictions=constant_op.constant(predictions, dtypes_lib.float32),
labels=labels,
k=k,
class_id=class_id,
weights=weights)
# Fails without initialized vars.
self.assertRaises(errors_impl.OpError, metric.eval)
self.assertRaises(errors_impl.OpError, update.eval)
variables.variables_initializer(variables.local_variables()).run()
# Run per-step op and assert expected values.
if math.isnan(expected):
_assert_nan(self, update.eval())
_assert_nan(self, metric.eval())
else:
self.assertEqual(expected, update.eval())
self.assertEqual(expected, metric.eval())
def _test_sparse_recall_at_top_k(self,
labels,
top_k_predictions,
expected,
class_id=None,
weights=None):
with ops.Graph().as_default() as g, self.test_session(g):
if weights is not None:
weights = constant_op.constant(weights, dtypes_lib.float32)
metric, update = metric_ops.sparse_recall_at_top_k(
labels=labels,
top_k_predictions=constant_op.constant(top_k_predictions,
dtypes_lib.int32),
class_id=class_id,
weights=weights)
# Fails without initialized vars.
self.assertRaises(errors_impl.OpError, metric.eval)
self.assertRaises(errors_impl.OpError, update.eval)
variables.variables_initializer(variables.local_variables()).run()
# Run per-step op and assert expected values.
if math.isnan(expected):
self.assertTrue(math.isnan(update.eval()))
self.assertTrue(math.isnan(metric.eval()))
else:
self.assertEqual(expected, update.eval())
self.assertEqual(expected, metric.eval())
def test_one_label_at_k1_nan(self):
predictions = [[0.1, 0.3, 0.2, 0.4], [0.1, 0.2, 0.3, 0.4]]
top_k_predictions = [[3], [3]]
sparse_labels = _binary_2d_label_to_sparse_value(
[[0, 0, 0, 1], [0, 0, 1, 0]])
dense_labels = np.array([[3], [2]], dtype=np.int64)
# Classes 0,1 have 0 labels, 0 predictions, classes -1 and 4 are out of
# range.
for labels in (sparse_labels, dense_labels):
for class_id in (-1, 0, 1, 4):
self._test_streaming_sparse_recall_at_k(
predictions, labels, k=1, expected=NAN, class_id=class_id)
self._test_sparse_recall_at_top_k(
labels, top_k_predictions, expected=NAN, class_id=class_id)
def test_one_label_at_k1_no_predictions(self):
predictions = [[0.1, 0.3, 0.2, 0.4], [0.1, 0.2, 0.3, 0.4]]
top_k_predictions = [[3], [3]]
sparse_labels = _binary_2d_label_to_sparse_value(
[[0, 0, 0, 1], [0, 0, 1, 0]])
dense_labels = np.array([[3], [2]], dtype=np.int64)
for labels in (sparse_labels, dense_labels):
# Class 2: 0 predictions.
self._test_streaming_sparse_recall_at_k(
predictions, labels, k=1, expected=0.0, class_id=2)
self._test_sparse_recall_at_top_k(
labels, top_k_predictions, expected=0.0, class_id=2)
def test_one_label_at_k1(self):
predictions = [[0.1, 0.3, 0.2, 0.4], [0.1, 0.2, 0.3, 0.4]]
top_k_predictions = [[3], [3]]
sparse_labels = _binary_2d_label_to_sparse_value(
[[0, 0, 0, 1], [0, 0, 1, 0]])
dense_labels = np.array([[3], [2]], dtype=np.int64)
for labels in (sparse_labels, dense_labels):
# Class 3: 1 label, 2 predictions, 1 correct.
self._test_streaming_sparse_recall_at_k(
predictions, labels, k=1, expected=1.0 / 1, class_id=3)
self._test_sparse_recall_at_top_k(
labels, top_k_predictions, expected=1.0 / 1, class_id=3)
# All classes: 2 labels, 2 predictions, 1 correct.
self._test_streaming_sparse_recall_at_k(
predictions, labels, k=1, expected=1.0 / 2)
self._test_sparse_recall_at_top_k(
labels, top_k_predictions, expected=1.0 / 2)
def test_one_label_at_k1_weighted(self):
predictions = [[0.1, 0.3, 0.2, 0.4], [0.1, 0.2, 0.3, 0.4]]
top_k_predictions = [[3], [3]]
sparse_labels = _binary_2d_label_to_sparse_value(
[[0, 0, 0, 1], [0, 0, 1, 0]])
dense_labels = np.array([[3], [2]], dtype=np.int64)
for labels in (sparse_labels, dense_labels):
# Class 3: 1 label, 2 predictions, 1 correct.
self._test_streaming_sparse_recall_at_k(
predictions, labels, k=1, expected=NAN, class_id=3, weights=(0.0,))
self._test_sparse_recall_at_top_k(
labels, top_k_predictions, expected=NAN, class_id=3, weights=(0.0,))
self._test_streaming_sparse_recall_at_k(
predictions,
labels,
k=1,
expected=1.0 / 1,
class_id=3,
weights=(1.0,))
self._test_sparse_recall_at_top_k(
labels,
top_k_predictions,
expected=1.0 / 1,
class_id=3,
weights=(1.0,))
self._test_streaming_sparse_recall_at_k(
predictions,
labels,
k=1,
expected=1.0 / 1,
class_id=3,
weights=(2.0,))
self._test_sparse_recall_at_top_k(
labels,
top_k_predictions,
expected=1.0 / 1,
class_id=3,
weights=(2.0,))
self._test_streaming_sparse_recall_at_k(
predictions,
labels,
k=1,
expected=NAN,
class_id=3,
weights=(0.0, 0.0))
self._test_sparse_recall_at_top_k(
labels,
top_k_predictions,
expected=NAN,
class_id=3,
weights=(0.0, 0.0))
self._test_streaming_sparse_recall_at_k(
predictions,
labels,
k=1,
expected=NAN,
class_id=3,
weights=(0.0, 1.0))
self._test_sparse_recall_at_top_k(
labels,
top_k_predictions,
expected=NAN,
class_id=3,
weights=(0.0, 1.0))
self._test_streaming_sparse_recall_at_k(
predictions,
labels,
k=1,
expected=1.0 / 1,
class_id=3,
weights=(1.0, 0.0))
self._test_sparse_recall_at_top_k(
labels,
top_k_predictions,
expected=1.0 / 1,
class_id=3,
weights=(1.0, 0.0))
self._test_streaming_sparse_recall_at_k(
predictions,
labels,
k=1,
expected=1.0 / 1,
class_id=3,
weights=(1.0, 1.0))
self._test_sparse_recall_at_top_k(
labels,
top_k_predictions,
expected=1.0 / 1,
class_id=3,
weights=(1.0, 1.0))
self._test_streaming_sparse_recall_at_k(
predictions,
labels,
k=1,
expected=2.0 / 2,
class_id=3,
weights=(2.0, 3.0))
self._test_sparse_recall_at_top_k(
labels,
top_k_predictions,
expected=2.0 / 2,
class_id=3,
weights=(2.0, 3.0))
self._test_streaming_sparse_recall_at_k(
predictions,
labels,
k=1,
expected=3.0 / 3,
class_id=3,
weights=(3.0, 2.0))
self._test_sparse_recall_at_top_k(
labels,
top_k_predictions,
expected=3.0 / 3,
class_id=3,
weights=(3.0, 2.0))
self._test_streaming_sparse_recall_at_k(
predictions,
labels,
k=1,
expected=0.3 / 0.3,
class_id=3,
weights=(0.3, 0.6))
self._test_sparse_recall_at_top_k(
labels,
top_k_predictions,
expected=0.3 / 0.3,
class_id=3,
weights=(0.3, 0.6))
self._test_streaming_sparse_recall_at_k(
predictions,
labels,
k=1,
expected=0.6 / 0.6,
class_id=3,
weights=(0.6, 0.3))
self._test_sparse_recall_at_top_k(
labels,
top_k_predictions,
expected=0.6 / 0.6,
class_id=3,
weights=(0.6, 0.3))
# All classes: 2 labels, 2 predictions, 1 correct.
self._test_streaming_sparse_recall_at_k(
predictions, labels, k=1, expected=NAN, weights=(0.0,))
self._test_sparse_recall_at_top_k(
labels, top_k_predictions, expected=NAN, weights=(0.0,))
self._test_streaming_sparse_recall_at_k(
predictions, labels, k=1, expected=1.0 / 2, weights=(1.0,))
self._test_sparse_recall_at_top_k(
labels, top_k_predictions, expected=1.0 / 2, weights=(1.0,))
self._test_streaming_sparse_recall_at_k(
predictions, labels, k=1, expected=1.0 / 2, weights=(2.0,))
self._test_sparse_recall_at_top_k(
labels, top_k_predictions, expected=1.0 / 2, weights=(2.0,))
self._test_streaming_sparse_recall_at_k(
predictions, labels, k=1, expected=1.0 / 1, weights=(1.0, 0.0))
self._test_sparse_recall_at_top_k(
labels, top_k_predictions, expected=1.0 / 1, weights=(1.0, 0.0))
self._test_streaming_sparse_recall_at_k(
predictions, labels, k=1, expected=0.0 / 1, weights=(0.0, 1.0))
self._test_sparse_recall_at_top_k(
labels, top_k_predictions, expected=0.0 / 1, weights=(0.0, 1.0))
self._test_streaming_sparse_recall_at_k(
predictions, labels, k=1, expected=1.0 / 2, weights=(1.0, 1.0))
self._test_sparse_recall_at_top_k(
labels, top_k_predictions, expected=1.0 / 2, weights=(1.0, 1.0))
self._test_streaming_sparse_recall_at_k(
predictions, labels, k=1, expected=2.0 / 5, weights=(2.0, 3.0))
self._test_sparse_recall_at_top_k(
labels, top_k_predictions, expected=2.0 / 5, weights=(2.0, 3.0))
self._test_streaming_sparse_recall_at_k(
predictions, labels, k=1, expected=3.0 / 5, weights=(3.0, 2.0))
self._test_sparse_recall_at_top_k(
labels, top_k_predictions, expected=3.0 / 5, weights=(3.0, 2.0))
self._test_streaming_sparse_recall_at_k(
predictions, labels, k=1, expected=0.3 / 0.9, weights=(0.3, 0.6))
self._test_sparse_recall_at_top_k(
labels, top_k_predictions, expected=0.3 / 0.9, weights=(0.3, 0.6))
self._test_streaming_sparse_recall_at_k(
predictions, labels, k=1, expected=0.6 / 0.9, weights=(0.6, 0.3))
self._test_sparse_recall_at_top_k(
labels, top_k_predictions, expected=0.6 / 0.9, weights=(0.6, 0.3))
def test_three_labels_at_k5_nan(self):
predictions = [[0.5, 0.1, 0.6, 0.3, 0.8, 0.0, 0.7, 0.2, 0.4, 0.9],
[0.3, 0.0, 0.7, 0.2, 0.4, 0.9, 0.5, 0.8, 0.1, 0.6]]
top_k_predictions = [
[9, 4, 6, 2, 0],
[5, 7, 2, 9, 6],
]
sparse_labels = _binary_2d_label_to_sparse_value(
[[0, 0, 1, 0, 0, 0, 0, 1, 1, 0], [0, 1, 1, 0, 0, 1, 0, 0, 0, 0]])
dense_labels = np.array([[2, 7, 8], [1, 2, 5]], dtype=np.int64)
for labels in (sparse_labels, dense_labels):
# Classes 0,3,4,6,9 have 0 labels, class 10 is out of range.
for class_id in (0, 3, 4, 6, 9, 10):
self._test_streaming_sparse_recall_at_k(
predictions, labels, k=5, expected=NAN, class_id=class_id)
self._test_sparse_recall_at_top_k(
labels, top_k_predictions, expected=NAN, class_id=class_id)
def test_three_labels_at_k5_no_predictions(self):
predictions = [[0.5, 0.1, 0.6, 0.3, 0.8, 0.0, 0.7, 0.2, 0.4, 0.9],
[0.3, 0.0, 0.7, 0.2, 0.4, 0.9, 0.5, 0.8, 0.1, 0.6]]
top_k_predictions = [
[9, 4, 6, 2, 0],
[5, 7, 2, 9, 6],
]
sparse_labels = _binary_2d_label_to_sparse_value(
[[0, 0, 1, 0, 0, 0, 0, 1, 1, 0], [0, 1, 1, 0, 0, 1, 0, 0, 0, 0]])
dense_labels = np.array([[2, 7, 8], [1, 2, 5]], dtype=np.int64)
for labels in (sparse_labels, dense_labels):
# Class 8: 1 label, no predictions.
self._test_streaming_sparse_recall_at_k(
predictions, labels, k=5, expected=0.0 / 1, class_id=8)
self._test_sparse_recall_at_top_k(
labels, top_k_predictions, expected=0.0 / 1, class_id=8)
def test_three_labels_at_k5(self):
predictions = [[0.5, 0.1, 0.6, 0.3, 0.8, 0.0, 0.7, 0.2, 0.4, 0.9],
[0.3, 0.0, 0.7, 0.2, 0.4, 0.9, 0.5, 0.8, 0.1, 0.6]]
top_k_predictions = [
[9, 4, 6, 2, 0],
[5, 7, 2, 9, 6],
]
sparse_labels = _binary_2d_label_to_sparse_value(
[[0, 0, 1, 0, 0, 0, 0, 1, 1, 0], [0, 1, 1, 0, 0, 1, 0, 0, 0, 0]])
dense_labels = np.array([[2, 7, 8], [1, 2, 5]], dtype=np.int64)
for labels in (sparse_labels, dense_labels):
# Class 2: 2 labels, both correct.
self._test_streaming_sparse_recall_at_k(
predictions, labels, k=5, expected=2.0 / 2, class_id=2)
self._test_sparse_recall_at_top_k(
labels, top_k_predictions, expected=2.0 / 2, class_id=2)
# Class 5: 1 label, incorrect.
self._test_streaming_sparse_recall_at_k(
predictions, labels, k=5, expected=1.0 / 1, class_id=5)
self._test_sparse_recall_at_top_k(
labels, top_k_predictions, expected=1.0 / 1, class_id=5)
# Class 7: 1 label, incorrect.
self._test_streaming_sparse_recall_at_k(
predictions, labels, k=5, expected=0.0 / 1, class_id=7)
self._test_sparse_recall_at_top_k(
labels, top_k_predictions, expected=0.0 / 1, class_id=7)
# All classes: 6 labels, 3 correct.
self._test_streaming_sparse_recall_at_k(
predictions, labels, k=5, expected=3.0 / 6)
self._test_sparse_recall_at_top_k(
labels, top_k_predictions, expected=3.0 / 6)
def test_three_labels_at_k5_some_out_of_range(self):
"""Tests that labels outside the [0, n_classes) count in denominator."""
predictions = [[0.5, 0.1, 0.6, 0.3, 0.8, 0.0, 0.7, 0.2, 0.4, 0.9],
[0.3, 0.0, 0.7, 0.2, 0.4, 0.9, 0.5, 0.8, 0.1, 0.6]]
top_k_predictions = [
[9, 4, 6, 2, 0],
[5, 7, 2, 9, 6],
]
sp_labels = sparse_tensor.SparseTensorValue(
indices=[[0, 0], [0, 1], [0, 2], [0, 3], [1, 0], [1, 1], [1, 2],
[1, 3]],
# values -1 and 10 are outside the [0, n_classes) range.
values=np.array([2, 7, -1, 8, 1, 2, 5, 10], np.int64),
dense_shape=[2, 4])
# Class 2: 2 labels, both correct.
self._test_streaming_sparse_recall_at_k(
predictions=predictions,
labels=sp_labels,
k=5,
expected=2.0 / 2,
class_id=2)
self._test_sparse_recall_at_top_k(
sp_labels,
top_k_predictions,
expected=2.0 / 2,
class_id=2)
# Class 5: 1 label, incorrect.
self._test_streaming_sparse_recall_at_k(
predictions=predictions,
labels=sp_labels,
k=5,
expected=1.0 / 1,
class_id=5)
self._test_sparse_recall_at_top_k(
sp_labels,
top_k_predictions,
expected=1.0 / 1,
class_id=5)
# Class 7: 1 label, incorrect.
self._test_streaming_sparse_recall_at_k(
predictions=predictions,
labels=sp_labels,
k=5,
expected=0.0 / 1,
class_id=7)
self._test_sparse_recall_at_top_k(
sp_labels,
top_k_predictions,
expected=0.0 / 1,
class_id=7)
# All classes: 8 labels, 3 correct.
self._test_streaming_sparse_recall_at_k(
predictions=predictions, labels=sp_labels, k=5, expected=3.0 / 8)
self._test_sparse_recall_at_top_k(
sp_labels, top_k_predictions, expected=3.0 / 8)
def test_3d_nan(self):
predictions = [[[0.5, 0.1, 0.6, 0.3, 0.8, 0.0, 0.7, 0.2, 0.4, 0.9],
[0.3, 0.0, 0.7, 0.2, 0.4, 0.9, 0.5, 0.8, 0.1, 0.6]],
[[0.3, 0.0, 0.7, 0.2, 0.4, 0.9, 0.5, 0.8, 0.1, 0.6],
[0.5, 0.1, 0.6, 0.3, 0.8, 0.0, 0.7, 0.2, 0.4, 0.9]]]
top_k_predictions = [[
[9, 4, 6, 2, 0],
[5, 7, 2, 9, 6],
], [
[5, 7, 2, 9, 6],
[9, 4, 6, 2, 0],
]]
sparse_labels = _binary_3d_label_to_sparse_value(
[[[0, 0, 1, 0, 0, 0, 0, 1, 1, 0], [0, 1, 1, 0, 0, 1, 0, 0, 0, 0]],
[[0, 1, 1, 0, 0, 1, 0, 0, 0, 0], [0, 0, 1, 0, 0, 0, 0, 1, 1, 0]]])
dense_labels = np.array(
[[[2, 7, 8], [1, 2, 5]], [
[1, 2, 5],
[2, 7, 8],
]], dtype=np.int64)
for labels in (sparse_labels, dense_labels):
# Classes 0,3,4,6,9 have 0 labels, class 10 is out of range.
for class_id in (0, 3, 4, 6, 9, 10):
self._test_streaming_sparse_recall_at_k(
predictions, labels, k=5, expected=NAN, class_id=class_id)
self._test_sparse_recall_at_top_k(
labels, top_k_predictions, expected=NAN, class_id=class_id)
def test_3d_no_predictions(self):
predictions = [[[0.5, 0.1, 0.6, 0.3, 0.8, 0.0, 0.7, 0.2, 0.4, 0.9],
[0.3, 0.0, 0.7, 0.2, 0.4, 0.9, 0.5, 0.8, 0.1, 0.6]],
[[0.3, 0.0, 0.7, 0.2, 0.4, 0.9, 0.5, 0.8, 0.1, 0.6],
[0.5, 0.1, 0.6, 0.3, 0.8, 0.0, 0.7, 0.2, 0.4, 0.9]]]
top_k_predictions = [[
[9, 4, 6, 2, 0],
[5, 7, 2, 9, 6],
], [
[5, 7, 2, 9, 6],
[9, 4, 6, 2, 0],
]]
sparse_labels = _binary_3d_label_to_sparse_value(
[[[0, 0, 1, 0, 0, 0, 0, 1, 1, 0],
[0, 1, 1, 0, 0, 1, 0, 0, 0, 0]],
[[0, 1, 1, 0, 0, 1, 0, 0, 0, 0],
[0, 0, 1, 0, 0, 0, 0, 1, 1, 0]]])
dense_labels = np.array(
[[[2, 7, 8], [1, 2, 5]], [
[1, 2, 5],
[2, 7, 8],
]], dtype=np.int64)
for labels in (sparse_labels, dense_labels):
# Classes 1,8 have 0 predictions, >=1 label.
for class_id in (1, 8):
self._test_streaming_sparse_recall_at_k(
predictions, labels, k=5, expected=0.0, class_id=class_id)
self._test_sparse_recall_at_top_k(
labels, top_k_predictions, expected=0.0, class_id=class_id)
def test_3d(self):
predictions = [[[0.5, 0.1, 0.6, 0.3, 0.8, 0.0, 0.7, 0.2, 0.4, 0.9],
[0.3, 0.0, 0.7, 0.2, 0.4, 0.9, 0.5, 0.8, 0.1, 0.6]],
[[0.3, 0.0, 0.7, 0.2, 0.4, 0.9, 0.5, 0.8, 0.1, 0.6],
[0.5, 0.1, 0.6, 0.3, 0.8, 0.0, 0.7, 0.2, 0.4, 0.9]]]
top_k_predictions = [[
[9, 4, 6, 2, 0],
[5, 7, 2, 9, 6],
], [
[5, 7, 2, 9, 6],
[9, 4, 6, 2, 0],
]]
labels = _binary_3d_label_to_sparse_value(
[[[0, 0, 1, 0, 0, 0, 0, 1, 1, 0],
[0, 1, 1, 0, 0, 1, 0, 0, 0, 0]],
[[0, 1, 1, 0, 0, 1, 0, 1, 0, 0],
[0, 0, 1, 0, 0, 0, 0, 0, 1, 0]]])
# Class 2: 4 labels, all correct.
self._test_streaming_sparse_recall_at_k(
predictions, labels, k=5, expected=4.0 / 4, class_id=2)
self._test_sparse_recall_at_top_k(
labels, top_k_predictions, expected=4.0 / 4, class_id=2)
# Class 5: 2 labels, both correct.
self._test_streaming_sparse_recall_at_k(
predictions, labels, k=5, expected=2.0 / 2, class_id=5)
self._test_sparse_recall_at_top_k(
labels, top_k_predictions, expected=2.0 / 2, class_id=5)
# Class 7: 2 labels, 1 incorrect.
self._test_streaming_sparse_recall_at_k(
predictions, labels, k=5, expected=1.0 / 2, class_id=7)
self._test_sparse_recall_at_top_k(
labels, top_k_predictions, expected=1.0 / 2, class_id=7)
# All classes: 12 labels, 7 correct.
self._test_streaming_sparse_recall_at_k(
predictions, labels, k=5, expected=7.0 / 12)
self._test_sparse_recall_at_top_k(
labels, top_k_predictions, expected=7.0 / 12)
def test_3d_ignore_all(self):
predictions = [[[0.5, 0.1, 0.6, 0.3, 0.8, 0.0, 0.7, 0.2, 0.4, 0.9],
[0.3, 0.0, 0.7, 0.2, 0.4, 0.9, 0.5, 0.8, 0.1, 0.6]],
[[0.3, 0.0, 0.7, 0.2, 0.4, 0.9, 0.5, 0.8, 0.1, 0.6],
[0.5, 0.1, 0.6, 0.3, 0.8, 0.0, 0.7, 0.2, 0.4, 0.9]]]
top_k_predictions = [[
[9, 4, 6, 2, 0],
[5, 7, 2, 9, 6],
], [
[5, 7, 2, 9, 6],
[9, 4, 6, 2, 0],
]]
labels = _binary_3d_label_to_sparse_value(
[[[0, 0, 1, 0, 0, 0, 0, 1, 1, 0],
[0, 1, 1, 0, 0, 1, 0, 0, 0, 0]],
[[0, 1, 1, 0, 0, 1, 0, 1, 0, 0],
[0, 0, 1, 0, 0, 0, 0, 0, 1, 0]]])
for class_id in xrange(10):
self._test_streaming_sparse_recall_at_k(
predictions,
labels,
k=5,
expected=NAN,
class_id=class_id,
weights=[[0], [0]])
self._test_sparse_recall_at_top_k(
labels,
top_k_predictions,
expected=NAN,
class_id=class_id,
weights=[[0], [0]])
self._test_streaming_sparse_recall_at_k(
predictions,
labels,
k=5,
expected=NAN,
class_id=class_id,
weights=[[0, 0], [0, 0]])
self._test_sparse_recall_at_top_k(
labels,
top_k_predictions,
expected=NAN,
class_id=class_id,
weights=[[0, 0], [0, 0]])
self._test_streaming_sparse_recall_at_k(
predictions, labels, k=5, expected=NAN, weights=[[0], [0]])
self._test_sparse_recall_at_top_k(
labels, top_k_predictions, expected=NAN, weights=[[0], [0]])
self._test_streaming_sparse_recall_at_k(
predictions, labels, k=5, expected=NAN, weights=[[0, 0], [0, 0]])
self._test_sparse_recall_at_top_k(
labels, top_k_predictions, expected=NAN, weights=[[0, 0], [0, 0]])
def test_3d_ignore_some(self):
predictions = [[[0.5, 0.1, 0.6, 0.3, 0.8, 0.0, 0.7, 0.2, 0.4, 0.9],
[0.3, 0.0, 0.7, 0.2, 0.4, 0.9, 0.5, 0.8, 0.1, 0.6]],
[[0.3, 0.0, 0.7, 0.2, 0.4, 0.9, 0.5, 0.8, 0.1, 0.6],
[0.5, 0.1, 0.6, 0.3, 0.8, 0.0, 0.7, 0.2, 0.4, 0.9]]]
top_k_predictions = [[
[9, 4, 6, 2, 0],
[5, 7, 2, 9, 6],
], [
[5, 7, 2, 9, 6],
[9, 4, 6, 2, 0],
]]
labels = _binary_3d_label_to_sparse_value(
[[[0, 0, 1, 0, 0, 0, 0, 1, 1, 0],
[0, 1, 1, 0, 0, 1, 0, 0, 0, 0]],
[[0, 1, 1, 0, 0, 1, 0, 1, 0, 0],
[0, 0, 1, 0, 0, 0, 0, 0, 1, 0]]])
# Class 2: 2 labels, both correct.
self._test_streaming_sparse_recall_at_k(
predictions,
labels,
k=5,
expected=2.0 / 2.0,
class_id=2,
weights=[[1], [0]])
self._test_sparse_recall_at_top_k(
labels,
top_k_predictions,
expected=2.0 / 2.0,
class_id=2,
weights=[[1], [0]])
# Class 2: 2 labels, both correct.
self._test_streaming_sparse_recall_at_k(
predictions,
labels,
k=5,
expected=2.0 / 2.0,
class_id=2,
weights=[[0], [1]])
self._test_sparse_recall_at_top_k(
labels,
top_k_predictions,
expected=2.0 / 2.0,
class_id=2,
weights=[[0], [1]])
# Class 7: 1 label, correct.
self._test_streaming_sparse_recall_at_k(
predictions,
labels,
k=5,
expected=1.0 / 1.0,
class_id=7,
weights=[[0], [1]])
self._test_sparse_recall_at_top_k(
labels,
top_k_predictions,
expected=1.0 / 1.0,
class_id=7,
weights=[[0], [1]])
# Class 7: 1 label, incorrect.
self._test_streaming_sparse_recall_at_k(
predictions,
labels,
k=5,
expected=0.0 / 1.0,
class_id=7,
weights=[[1], [0]])
self._test_sparse_recall_at_top_k(
labels,
top_k_predictions,
expected=0.0 / 1.0,
class_id=7,
weights=[[1], [0]])
# Class 7: 2 labels, 1 correct.
self._test_streaming_sparse_recall_at_k(
predictions,
labels,
k=5,
expected=1.0 / 2.0,
class_id=7,
weights=[[1, 0], [1, 0]])
self._test_sparse_recall_at_top_k(
labels,
top_k_predictions,
expected=1.0 / 2.0,
class_id=7,
weights=[[1, 0], [1, 0]])
# Class 7: No labels.
self._test_streaming_sparse_recall_at_k(
predictions,
labels,
k=5,
expected=NAN,
class_id=7,
weights=[[0, 1], [0, 1]])
self._test_sparse_recall_at_top_k(
labels,
top_k_predictions,
expected=NAN,
class_id=7,
weights=[[0, 1], [0, 1]])
def test_sparse_tensor_value(self):
predictions = [[0.1, 0.3, 0.2, 0.4],
[0.1, 0.2, 0.3, 0.4]]
labels = [[0, 0, 1, 0],
[0, 0, 0, 1]]
expected_recall = 0.5
with self.test_session():
_, recall = metrics.streaming_sparse_recall_at_k(
predictions=constant_op.constant(predictions, dtypes_lib.float32),
labels=_binary_2d_label_to_sparse_value(labels),
k=1)
variables.variables_initializer(variables.local_variables()).run()
self.assertEqual(expected_recall, recall.eval())
class StreamingMeanAbsoluteErrorTest(test.TestCase):
def setUp(self):
ops.reset_default_graph()
def testVars(self):
metrics.streaming_mean_absolute_error(
predictions=array_ops.ones((10, 1)), labels=array_ops.ones((10, 1)))
_assert_metric_variables(
self, ('mean_absolute_error/count:0', 'mean_absolute_error/total:0'))
def testMetricsCollection(self):
my_collection_name = '__metrics__'
mean, _ = metrics.streaming_mean_absolute_error(
predictions=array_ops.ones((10, 1)),
labels=array_ops.ones((10, 1)),
metrics_collections=[my_collection_name])
self.assertListEqual(ops.get_collection(my_collection_name), [mean])
def testUpdatesCollection(self):
my_collection_name = '__updates__'
_, update_op = metrics.streaming_mean_absolute_error(
predictions=array_ops.ones((10, 1)),
labels=array_ops.ones((10, 1)),
updates_collections=[my_collection_name])
self.assertListEqual(ops.get_collection(my_collection_name), [update_op])
def testValueTensorIsIdempotent(self):
predictions = random_ops.random_normal((10, 3), seed=1)
labels = random_ops.random_normal((10, 3), seed=2)
error, update_op = metrics.streaming_mean_absolute_error(predictions,
labels)
with self.test_session() as sess:
sess.run(variables.local_variables_initializer())
# Run several updates.
for _ in range(10):
sess.run(update_op)
# Then verify idempotency.
initial_error = error.eval()
for _ in range(10):
self.assertEqual(initial_error, error.eval())
def testSingleUpdateWithErrorAndWeights(self):
predictions = constant_op.constant(
[2, 4, 6, 8], shape=(1, 4), dtype=dtypes_lib.float32)
labels = constant_op.constant(
[1, 3, 2, 3], shape=(1, 4), dtype=dtypes_lib.float32)
weights = constant_op.constant([0, 1, 0, 1], shape=(1, 4))
error, update_op = metrics.streaming_mean_absolute_error(predictions,
labels, weights)
with self.test_session() as sess:
sess.run(variables.local_variables_initializer())
self.assertEqual(3, sess.run(update_op))
self.assertEqual(3, error.eval())
class StreamingMeanRelativeErrorTest(test.TestCase):
def setUp(self):
ops.reset_default_graph()
def testVars(self):
metrics.streaming_mean_relative_error(
predictions=array_ops.ones((10, 1)),
labels=array_ops.ones((10, 1)),
normalizer=array_ops.ones((10, 1)))
_assert_metric_variables(
self, ('mean_relative_error/count:0', 'mean_relative_error/total:0'))
def testMetricsCollection(self):
my_collection_name = '__metrics__'
mean, _ = metrics.streaming_mean_relative_error(
predictions=array_ops.ones((10, 1)),
labels=array_ops.ones((10, 1)),
normalizer=array_ops.ones((10, 1)),
metrics_collections=[my_collection_name])
self.assertListEqual(ops.get_collection(my_collection_name), [mean])
def testUpdatesCollection(self):
my_collection_name = '__updates__'
_, update_op = metrics.streaming_mean_relative_error(
predictions=array_ops.ones((10, 1)),
labels=array_ops.ones((10, 1)),
normalizer=array_ops.ones((10, 1)),
updates_collections=[my_collection_name])
self.assertListEqual(ops.get_collection(my_collection_name), [update_op])
def testValueTensorIsIdempotent(self):
predictions = random_ops.random_normal((10, 3), seed=1)
labels = random_ops.random_normal((10, 3), seed=2)
normalizer = random_ops.random_normal((10, 3), seed=3)
error, update_op = metrics.streaming_mean_relative_error(predictions,
labels, normalizer)
with self.test_session() as sess:
sess.run(variables.local_variables_initializer())
# Run several updates.
for _ in range(10):
sess.run(update_op)
# Then verify idempotency.
initial_error = error.eval()
for _ in range(10):
self.assertEqual(initial_error, error.eval())
def testSingleUpdateNormalizedByLabels(self):
np_predictions = np.asarray([2, 4, 6, 8], dtype=np.float32)
np_labels = np.asarray([1, 3, 2, 3], dtype=np.float32)
expected_error = np.mean(
np.divide(np.absolute(np_predictions - np_labels), np_labels))
predictions = constant_op.constant(
np_predictions, shape=(1, 4), dtype=dtypes_lib.float32)
labels = constant_op.constant(np_labels, shape=(1, 4))
error, update_op = metrics.streaming_mean_relative_error(
predictions, labels, normalizer=labels)
with self.test_session() as sess:
sess.run(variables.local_variables_initializer())
self.assertEqual(expected_error, sess.run(update_op))
self.assertEqual(expected_error, error.eval())
def testSingleUpdateNormalizedByZeros(self):
np_predictions = np.asarray([2, 4, 6, 8], dtype=np.float32)
predictions = constant_op.constant(
np_predictions, shape=(1, 4), dtype=dtypes_lib.float32)
labels = constant_op.constant(
[1, 3, 2, 3], shape=(1, 4), dtype=dtypes_lib.float32)
error, update_op = metrics.streaming_mean_relative_error(
predictions, labels, normalizer=array_ops.zeros_like(labels))
with self.test_session() as sess:
sess.run(variables.local_variables_initializer())
self.assertEqual(0.0, sess.run(update_op))
self.assertEqual(0.0, error.eval())
class StreamingMeanSquaredErrorTest(test.TestCase):
def setUp(self):
ops.reset_default_graph()
def testVars(self):
metrics.streaming_mean_squared_error(
predictions=array_ops.ones((10, 1)), labels=array_ops.ones((10, 1)))
_assert_metric_variables(
self, ('mean_squared_error/count:0', 'mean_squared_error/total:0'))
def testMetricsCollection(self):
my_collection_name = '__metrics__'
mean, _ = metrics.streaming_mean_squared_error(
predictions=array_ops.ones((10, 1)),
labels=array_ops.ones((10, 1)),
metrics_collections=[my_collection_name])
self.assertListEqual(ops.get_collection(my_collection_name), [mean])
def testUpdatesCollection(self):
my_collection_name = '__updates__'
_, update_op = metrics.streaming_mean_squared_error(
predictions=array_ops.ones((10, 1)),
labels=array_ops.ones((10, 1)),
updates_collections=[my_collection_name])
self.assertListEqual(ops.get_collection(my_collection_name), [update_op])
def testValueTensorIsIdempotent(self):
predictions = random_ops.random_normal((10, 3), seed=1)
labels = random_ops.random_normal((10, 3), seed=2)
error, update_op = metrics.streaming_mean_squared_error(predictions, labels)
with self.test_session() as sess:
sess.run(variables.local_variables_initializer())
# Run several updates.
for _ in range(10):
sess.run(update_op)
# Then verify idempotency.
initial_error = error.eval()
for _ in range(10):
self.assertEqual(initial_error, error.eval())
def testSingleUpdateZeroError(self):
predictions = array_ops.zeros((1, 3), dtype=dtypes_lib.float32)
labels = array_ops.zeros((1, 3), dtype=dtypes_lib.float32)
error, update_op = metrics.streaming_mean_squared_error(predictions, labels)
with self.test_session() as sess:
sess.run(variables.local_variables_initializer())
self.assertEqual(0, sess.run(update_op))
self.assertEqual(0, error.eval())
def testSingleUpdateWithError(self):
predictions = constant_op.constant(
[2, 4, 6], shape=(1, 3), dtype=dtypes_lib.float32)
labels = constant_op.constant(
[1, 3, 2], shape=(1, 3), dtype=dtypes_lib.float32)
error, update_op = metrics.streaming_mean_squared_error(predictions, labels)
with self.test_session() as sess:
sess.run(variables.local_variables_initializer())
self.assertEqual(6, sess.run(update_op))
self.assertEqual(6, error.eval())
def testSingleUpdateWithErrorAndWeights(self):
predictions = constant_op.constant(
[2, 4, 6, 8], shape=(1, 4), dtype=dtypes_lib.float32)
labels = constant_op.constant(
[1, 3, 2, 3], shape=(1, 4), dtype=dtypes_lib.float32)
weights = constant_op.constant([0, 1, 0, 1], shape=(1, 4))
error, update_op = metrics.streaming_mean_squared_error(predictions, labels,
weights)
with self.test_session() as sess:
sess.run(variables.local_variables_initializer())
self.assertEqual(13, sess.run(update_op))
self.assertEqual(13, error.eval())
def testMultipleBatchesOfSizeOne(self):
with self.test_session() as sess:
# Create the queue that populates the predictions.
preds_queue = data_flow_ops.FIFOQueue(
2, dtypes=dtypes_lib.float32, shapes=(1, 3))
_enqueue_vector(sess, preds_queue, [10, 8, 6])
_enqueue_vector(sess, preds_queue, [-4, 3, -1])
predictions = preds_queue.dequeue()
# Create the queue that populates the labels.
labels_queue = data_flow_ops.FIFOQueue(
2, dtypes=dtypes_lib.float32, shapes=(1, 3))
_enqueue_vector(sess, labels_queue, [1, 3, 2])
_enqueue_vector(sess, labels_queue, [2, 4, 6])
labels = labels_queue.dequeue()
error, update_op = metrics.streaming_mean_squared_error(predictions,
labels)
sess.run(variables.local_variables_initializer())
sess.run(update_op)
self.assertAlmostEqual(208.0 / 6, sess.run(update_op), 5)
self.assertAlmostEqual(208.0 / 6, error.eval(), 5)
def testMetricsComputedConcurrently(self):
with self.test_session() as sess:
# Create the queue that populates one set of predictions.
preds_queue0 = data_flow_ops.FIFOQueue(
2, dtypes=dtypes_lib.float32, shapes=(1, 3))
_enqueue_vector(sess, preds_queue0, [10, 8, 6])
_enqueue_vector(sess, preds_queue0, [-4, 3, -1])
predictions0 = preds_queue0.dequeue()
# Create the queue that populates one set of predictions.
preds_queue1 = data_flow_ops.FIFOQueue(
2, dtypes=dtypes_lib.float32, shapes=(1, 3))
_enqueue_vector(sess, preds_queue1, [0, 1, 1])
_enqueue_vector(sess, preds_queue1, [1, 1, 0])
predictions1 = preds_queue1.dequeue()
# Create the queue that populates one set of labels.
labels_queue0 = data_flow_ops.FIFOQueue(
2, dtypes=dtypes_lib.float32, shapes=(1, 3))
_enqueue_vector(sess, labels_queue0, [1, 3, 2])
_enqueue_vector(sess, labels_queue0, [2, 4, 6])
labels0 = labels_queue0.dequeue()
# Create the queue that populates another set of labels.
labels_queue1 = data_flow_ops.FIFOQueue(
2, dtypes=dtypes_lib.float32, shapes=(1, 3))
_enqueue_vector(sess, labels_queue1, [-5, -3, -1])
_enqueue_vector(sess, labels_queue1, [5, 4, 3])
labels1 = labels_queue1.dequeue()
mse0, update_op0 = metrics.streaming_mean_squared_error(
predictions0, labels0, name='msd0')
mse1, update_op1 = metrics.streaming_mean_squared_error(
predictions1, labels1, name='msd1')
sess.run(variables.local_variables_initializer())
sess.run([update_op0, update_op1])
sess.run([update_op0, update_op1])
mse0, mse1 = sess.run([mse0, mse1])
self.assertAlmostEqual(208.0 / 6, mse0, 5)
self.assertAlmostEqual(79.0 / 6, mse1, 5)
def testMultipleMetricsOnMultipleBatchesOfSizeOne(self):
with self.test_session() as sess:
# Create the queue that populates the predictions.
preds_queue = data_flow_ops.FIFOQueue(
2, dtypes=dtypes_lib.float32, shapes=(1, 3))
_enqueue_vector(sess, preds_queue, [10, 8, 6])
_enqueue_vector(sess, preds_queue, [-4, 3, -1])
predictions = preds_queue.dequeue()
# Create the queue that populates the labels.
labels_queue = data_flow_ops.FIFOQueue(
2, dtypes=dtypes_lib.float32, shapes=(1, 3))
_enqueue_vector(sess, labels_queue, [1, 3, 2])
_enqueue_vector(sess, labels_queue, [2, 4, 6])
labels = labels_queue.dequeue()
mae, ma_update_op = metrics.streaming_mean_absolute_error(predictions,
labels)
mse, ms_update_op = metrics.streaming_mean_squared_error(predictions,
labels)
sess.run(variables.local_variables_initializer())
sess.run([ma_update_op, ms_update_op])
sess.run([ma_update_op, ms_update_op])
self.assertAlmostEqual(32.0 / 6, mae.eval(), 5)
self.assertAlmostEqual(208.0 / 6, mse.eval(), 5)
class StreamingRootMeanSquaredErrorTest(test.TestCase):
def setUp(self):
ops.reset_default_graph()
def testVars(self):
metrics.streaming_root_mean_squared_error(
predictions=array_ops.ones((10, 1)), labels=array_ops.ones((10, 1)))
_assert_metric_variables(
self,
('root_mean_squared_error/count:0', 'root_mean_squared_error/total:0'))
def testMetricsCollection(self):
my_collection_name = '__metrics__'
mean, _ = metrics.streaming_root_mean_squared_error(
predictions=array_ops.ones((10, 1)),
labels=array_ops.ones((10, 1)),
metrics_collections=[my_collection_name])
self.assertListEqual(ops.get_collection(my_collection_name), [mean])
def testUpdatesCollection(self):
my_collection_name = '__updates__'
_, update_op = metrics.streaming_root_mean_squared_error(
predictions=array_ops.ones((10, 1)),
labels=array_ops.ones((10, 1)),
updates_collections=[my_collection_name])
self.assertListEqual(ops.get_collection(my_collection_name), [update_op])
def testValueTensorIsIdempotent(self):
predictions = random_ops.random_normal((10, 3), seed=1)
labels = random_ops.random_normal((10, 3), seed=2)
error, update_op = metrics.streaming_root_mean_squared_error(predictions,
labels)
with self.test_session() as sess:
sess.run(variables.local_variables_initializer())
# Run several updates.
for _ in range(10):
sess.run(update_op)
# Then verify idempotency.
initial_error = error.eval()
for _ in range(10):
self.assertEqual(initial_error, error.eval())
def testSingleUpdateZeroError(self):
with self.test_session() as sess:
predictions = constant_op.constant(
0.0, shape=(1, 3), dtype=dtypes_lib.float32)
labels = constant_op.constant(0.0, shape=(1, 3), dtype=dtypes_lib.float32)
rmse, update_op = metrics.streaming_root_mean_squared_error(predictions,
labels)
sess.run(variables.local_variables_initializer())
self.assertEqual(0, sess.run(update_op))
self.assertEqual(0, rmse.eval())
def testSingleUpdateWithError(self):
with self.test_session() as sess:
predictions = constant_op.constant(
[2, 4, 6], shape=(1, 3), dtype=dtypes_lib.float32)
labels = constant_op.constant(
[1, 3, 2], shape=(1, 3), dtype=dtypes_lib.float32)
rmse, update_op = metrics.streaming_root_mean_squared_error(predictions,
labels)
sess.run(variables.local_variables_initializer())
self.assertAlmostEqual(math.sqrt(6), update_op.eval(), 5)
self.assertAlmostEqual(math.sqrt(6), rmse.eval(), 5)
def testSingleUpdateWithErrorAndWeights(self):
with self.test_session() as sess:
predictions = constant_op.constant(
[2, 4, 6, 8], shape=(1, 4), dtype=dtypes_lib.float32)
labels = constant_op.constant(
[1, 3, 2, 3], shape=(1, 4), dtype=dtypes_lib.float32)
weights = constant_op.constant([0, 1, 0, 1], shape=(1, 4))
rmse, update_op = metrics.streaming_root_mean_squared_error(predictions,
labels,
weights)
sess.run(variables.local_variables_initializer())
self.assertAlmostEqual(math.sqrt(13), sess.run(update_op))
self.assertAlmostEqual(math.sqrt(13), rmse.eval(), 5)
class StreamingCovarianceTest(test.TestCase):
def setUp(self):
ops.reset_default_graph()
def testVars(self):
metrics.streaming_covariance(
predictions=math_ops.to_float(math_ops.range(10)) + array_ops.ones(
[10, 10]),
labels=math_ops.to_float(math_ops.range(10)) + array_ops.ones([10, 10]))
_assert_metric_variables(self, (
'covariance/comoment:0',
'covariance/count:0',
'covariance/mean_label:0',
'covariance/mean_prediction:0',
))
def testMetricsCollection(self):
my_collection_name = '__metrics__'
cov, _ = metrics.streaming_covariance(
predictions=math_ops.to_float(math_ops.range(10)) + array_ops.ones(
[10, 10]),
labels=math_ops.to_float(math_ops.range(10)) + array_ops.ones([10, 10]),
metrics_collections=[my_collection_name])
self.assertListEqual(ops.get_collection(my_collection_name), [cov])
def testUpdatesCollection(self):
my_collection_name = '__updates__'
_, update_op = metrics.streaming_covariance(
predictions=math_ops.to_float(math_ops.range(10)) + array_ops.ones(
[10, 10]),
labels=math_ops.to_float(math_ops.range(10)) + array_ops.ones([10, 10]),
updates_collections=[my_collection_name])
self.assertListEqual(ops.get_collection(my_collection_name), [update_op])
def testValueTensorIsIdempotent(self):
labels = random_ops.random_normal((10, 3), seed=2)
predictions = labels * 0.5 + random_ops.random_normal((10, 3), seed=1) * 0.5
cov, update_op = metrics.streaming_covariance(predictions, labels)
with self.test_session() as sess:
sess.run(variables.local_variables_initializer())
# Run several updates.
for _ in range(10):
sess.run(update_op)
# Then verify idempotency.
initial_cov = cov.eval()
for _ in range(10):
self.assertEqual(initial_cov, cov.eval())
def testSingleUpdateIdentical(self):
with self.test_session() as sess:
predictions = math_ops.to_float(math_ops.range(10))
labels = math_ops.to_float(math_ops.range(10))
cov, update_op = metrics.streaming_covariance(predictions, labels)
expected_cov = np.cov(np.arange(10), np.arange(10))[0, 1]
sess.run(variables.local_variables_initializer())
self.assertAlmostEqual(expected_cov, sess.run(update_op), 5)
self.assertAlmostEqual(expected_cov, cov.eval(), 5)
def testSingleUpdateNonIdentical(self):
with self.test_session() as sess:
predictions = constant_op.constant(
[2, 4, 6], shape=(1, 3), dtype=dtypes_lib.float32)
labels = constant_op.constant(
[1, 3, 2], shape=(1, 3), dtype=dtypes_lib.float32)
cov, update_op = metrics.streaming_covariance(predictions, labels)
expected_cov = np.cov([2, 4, 6], [1, 3, 2])[0, 1]
sess.run(variables.local_variables_initializer())
self.assertAlmostEqual(expected_cov, update_op.eval())
self.assertAlmostEqual(expected_cov, cov.eval())
def testSingleUpdateWithErrorAndWeights(self):
with self.test_session() as sess:
predictions = constant_op.constant(
[2, 4, 6, 8], shape=(1, 4), dtype=dtypes_lib.float32)
labels = constant_op.constant(
[1, 3, 2, 7], shape=(1, 4), dtype=dtypes_lib.float32)
weights = constant_op.constant(
[0, 1, 3, 1], shape=(1, 4), dtype=dtypes_lib.float32)
cov, update_op = metrics.streaming_covariance(
predictions, labels, weights=weights)
expected_cov = np.cov([2, 4, 6, 8],
[1, 3, 2, 7],
fweights=[0, 1, 3, 1])[0, 1]
sess.run(variables.local_variables_initializer())
self.assertAlmostEqual(expected_cov, sess.run(update_op))
self.assertAlmostEqual(expected_cov, cov.eval())
def testMultiUpdateWithErrorNoWeights(self):
with self.test_session() as sess:
np.random.seed(123)
n = 100
predictions = np.random.randn(n)
labels = 0.5 * predictions + np.random.randn(n)
stride = 10
predictions_t = array_ops.placeholder(dtypes_lib.float32, [stride])
labels_t = array_ops.placeholder(dtypes_lib.float32, [stride])
cov, update_op = metrics.streaming_covariance(predictions_t, labels_t)
sess.run(variables.local_variables_initializer())
prev_expected_cov = NAN
for i in range(n // stride):
feed_dict = {
predictions_t: predictions[stride * i:stride * (i + 1)],
labels_t: labels[stride * i:stride * (i + 1)]
}
self.assertEqual(np.isnan(prev_expected_cov),
np.isnan(sess.run(cov, feed_dict=feed_dict)))
if not np.isnan(prev_expected_cov):
self.assertAlmostEqual(
prev_expected_cov, sess.run(cov, feed_dict=feed_dict), 5)
expected_cov = np.cov(predictions[:stride * (i + 1)],
labels[:stride * (i + 1)])[0, 1]
self.assertAlmostEqual(
expected_cov, sess.run(update_op, feed_dict=feed_dict), 5)
self.assertAlmostEqual(
expected_cov, sess.run(cov, feed_dict=feed_dict), 5)
prev_expected_cov = expected_cov
def testMultiUpdateWithErrorAndWeights(self):
with self.test_session() as sess:
np.random.seed(123)
n = 100
predictions = np.random.randn(n)
labels = 0.5 * predictions + np.random.randn(n)
weights = np.tile(np.arange(n // 10), n // 10)
np.random.shuffle(weights)
stride = 10
predictions_t = array_ops.placeholder(dtypes_lib.float32, [stride])
labels_t = array_ops.placeholder(dtypes_lib.float32, [stride])
weights_t = array_ops.placeholder(dtypes_lib.float32, [stride])
cov, update_op = metrics.streaming_covariance(
predictions_t, labels_t, weights=weights_t)
sess.run(variables.local_variables_initializer())
prev_expected_cov = NAN
for i in range(n // stride):
feed_dict = {
predictions_t: predictions[stride * i:stride * (i + 1)],
labels_t: labels[stride * i:stride * (i + 1)],
weights_t: weights[stride * i:stride * (i + 1)]
}
self.assertEqual(np.isnan(prev_expected_cov),
np.isnan(sess.run(cov, feed_dict=feed_dict)))
if not np.isnan(prev_expected_cov):
self.assertAlmostEqual(
prev_expected_cov, sess.run(cov, feed_dict=feed_dict), 5)
expected_cov = np.cov(predictions[:stride * (i + 1)],
labels[:stride * (i + 1)],
fweights=weights[:stride * (i + 1)])[0, 1]
self.assertAlmostEqual(
expected_cov, sess.run(update_op, feed_dict=feed_dict), 5)
self.assertAlmostEqual(
expected_cov, sess.run(cov, feed_dict=feed_dict), 5)
prev_expected_cov = expected_cov
class StreamingPearsonRTest(test.TestCase):
def setUp(self):
ops.reset_default_graph()
def testVars(self):
metrics.streaming_pearson_correlation(
predictions=math_ops.to_float(math_ops.range(10)) + array_ops.ones(
[10, 10]),
labels=math_ops.to_float(math_ops.range(10)) + array_ops.ones([10, 10]))
_assert_metric_variables(self, (
'pearson_r/covariance/comoment:0',
'pearson_r/covariance/count:0',
'pearson_r/covariance/mean_label:0',
'pearson_r/covariance/mean_prediction:0',
'pearson_r/variance_labels/count:0',
'pearson_r/variance_labels/comoment:0',
'pearson_r/variance_labels/mean_label:0',
'pearson_r/variance_labels/mean_prediction:0',
'pearson_r/variance_predictions/comoment:0',
'pearson_r/variance_predictions/count:0',
'pearson_r/variance_predictions/mean_label:0',
'pearson_r/variance_predictions/mean_prediction:0',
))
def testMetricsCollection(self):
my_collection_name = '__metrics__'
pearson_r, _ = metrics.streaming_pearson_correlation(
predictions=math_ops.to_float(math_ops.range(10)) + array_ops.ones(
[10, 10]),
labels=math_ops.to_float(math_ops.range(10)) + array_ops.ones([10, 10]),
metrics_collections=[my_collection_name])
self.assertListEqual(ops.get_collection(my_collection_name), [pearson_r])
def testUpdatesCollection(self):
my_collection_name = '__updates__'
_, update_op = metrics.streaming_pearson_correlation(
predictions=math_ops.to_float(math_ops.range(10)) + array_ops.ones(
[10, 10]),
labels=math_ops.to_float(math_ops.range(10)) + array_ops.ones([10, 10]),
updates_collections=[my_collection_name])
self.assertListEqual(ops.get_collection(my_collection_name), [update_op])
def testValueTensorIsIdempotent(self):
labels = random_ops.random_normal((10, 3), seed=2)
predictions = labels * 0.5 + random_ops.random_normal((10, 3), seed=1) * 0.5
pearson_r, update_op = metrics.streaming_pearson_correlation(predictions,
labels)
with self.test_session() as sess:
sess.run(variables.local_variables_initializer())
# Run several updates.
for _ in range(10):
sess.run(update_op)
# Then verify idempotency.
initial_r = pearson_r.eval()
for _ in range(10):
self.assertEqual(initial_r, pearson_r.eval())
def testSingleUpdateIdentical(self):
with self.test_session() as sess:
predictions = math_ops.to_float(math_ops.range(10))
labels = math_ops.to_float(math_ops.range(10))
pearson_r, update_op = metrics.streaming_pearson_correlation(predictions,
labels)
expected_r = np.corrcoef(np.arange(10), np.arange(10))[0, 1]
sess.run(variables.local_variables_initializer())
self.assertAlmostEqual(expected_r, sess.run(update_op), 5)
self.assertAlmostEqual(expected_r, pearson_r.eval(), 5)
def testSingleUpdateNonIdentical(self):
with self.test_session() as sess:
predictions = constant_op.constant(
[2, 4, 6], shape=(1, 3), dtype=dtypes_lib.float32)
labels = constant_op.constant(
[1, 3, 2], shape=(1, 3), dtype=dtypes_lib.float32)
pearson_r, update_op = metrics.streaming_pearson_correlation(predictions,
labels)
expected_r = np.corrcoef([2, 4, 6], [1, 3, 2])[0, 1]
sess.run(variables.local_variables_initializer())
self.assertAlmostEqual(expected_r, update_op.eval())
self.assertAlmostEqual(expected_r, pearson_r.eval())
def testSingleUpdateWithErrorAndWeights(self):
with self.test_session() as sess:
predictions = np.array([2, 4, 6, 8])
labels = np.array([1, 3, 2, 7])
weights = np.array([0, 1, 3, 1])
predictions_t = constant_op.constant(
predictions, shape=(1, 4), dtype=dtypes_lib.float32)
labels_t = constant_op.constant(
labels, shape=(1, 4), dtype=dtypes_lib.float32)
weights_t = constant_op.constant(
weights, shape=(1, 4), dtype=dtypes_lib.float32)
pearson_r, update_op = metrics.streaming_pearson_correlation(
predictions_t, labels_t, weights=weights_t)
cmat = np.cov(predictions, labels, fweights=weights)
expected_r = cmat[0, 1] / np.sqrt(cmat[0, 0] * cmat[1, 1])
sess.run(variables.local_variables_initializer())
self.assertAlmostEqual(expected_r, sess.run(update_op))
self.assertAlmostEqual(expected_r, pearson_r.eval())
def testMultiUpdateWithErrorNoWeights(self):
with self.test_session() as sess:
np.random.seed(123)
n = 100
predictions = np.random.randn(n)
labels = 0.5 * predictions + np.random.randn(n)
stride = 10
predictions_t = array_ops.placeholder(dtypes_lib.float32, [stride])
labels_t = array_ops.placeholder(dtypes_lib.float32, [stride])
pearson_r, update_op = metrics.streaming_pearson_correlation(
predictions_t, labels_t)
sess.run(variables.local_variables_initializer())
prev_expected_r = NAN
for i in range(n // stride):
feed_dict = {
predictions_t: predictions[stride * i:stride * (i + 1)],
labels_t: labels[stride * i:stride * (i + 1)]
}
self.assertEqual(np.isnan(prev_expected_r),
np.isnan(sess.run(pearson_r, feed_dict=feed_dict)))
if not np.isnan(prev_expected_r):
self.assertAlmostEqual(
prev_expected_r, sess.run(pearson_r, feed_dict=feed_dict), 5)
expected_r = np.corrcoef(predictions[:stride * (i + 1)],
labels[:stride * (i + 1)])[0, 1]
self.assertAlmostEqual(
expected_r, sess.run(update_op, feed_dict=feed_dict), 5)
self.assertAlmostEqual(
expected_r, sess.run(pearson_r, feed_dict=feed_dict), 5)
prev_expected_r = expected_r
def testMultiUpdateWithErrorAndWeights(self):
with self.test_session() as sess:
np.random.seed(123)
n = 100
predictions = np.random.randn(n)
labels = 0.5 * predictions + np.random.randn(n)
weights = np.tile(np.arange(n // 10), n // 10)
np.random.shuffle(weights)
stride = 10
predictions_t = array_ops.placeholder(dtypes_lib.float32, [stride])
labels_t = array_ops.placeholder(dtypes_lib.float32, [stride])
weights_t = array_ops.placeholder(dtypes_lib.float32, [stride])
pearson_r, update_op = metrics.streaming_pearson_correlation(
predictions_t, labels_t, weights=weights_t)
sess.run(variables.local_variables_initializer())
prev_expected_r = NAN
for i in range(n // stride):
feed_dict = {
predictions_t: predictions[stride * i:stride * (i + 1)],
labels_t: labels[stride * i:stride * (i + 1)],
weights_t: weights[stride * i:stride * (i + 1)]
}
self.assertEqual(np.isnan(prev_expected_r),
np.isnan(sess.run(pearson_r, feed_dict=feed_dict)))
if not np.isnan(prev_expected_r):
self.assertAlmostEqual(
prev_expected_r, sess.run(pearson_r, feed_dict=feed_dict), 5)
cmat = np.cov(predictions[:stride * (i + 1)],
labels[:stride * (i + 1)],
fweights=weights[:stride * (i + 1)])
expected_r = cmat[0, 1] / np.sqrt(cmat[0, 0] * cmat[1, 1])
self.assertAlmostEqual(
expected_r, sess.run(update_op, feed_dict=feed_dict), 5)
self.assertAlmostEqual(
expected_r, sess.run(pearson_r, feed_dict=feed_dict), 5)
prev_expected_r = expected_r
def testMultiUpdateWithErrorAndSingletonBatches(self):
with self.test_session() as sess:
np.random.seed(123)
n = 100
predictions = np.random.randn(n)
labels = 0.5 * predictions + np.random.randn(n)
stride = 10
weights = (np.arange(n).reshape(n//stride, stride) % stride == 0)
for row in weights:
np.random.shuffle(row)
# Now, weights is one-hot by row - one item per batch has non-zero weight.
weights = weights.reshape((n,))
predictions_t = array_ops.placeholder(dtypes_lib.float32, [stride])
labels_t = array_ops.placeholder(dtypes_lib.float32, [stride])
weights_t = array_ops.placeholder(dtypes_lib.float32, [stride])
pearson_r, update_op = metrics.streaming_pearson_correlation(
predictions_t, labels_t, weights=weights_t)
sess.run(variables.local_variables_initializer())
for i in range(n // stride):
feed_dict = {
predictions_t: predictions[stride * i:stride * (i + 1)],
labels_t: labels[stride * i:stride * (i + 1)],
weights_t: weights[stride * i:stride * (i + 1)]
}
cmat = np.cov(predictions[:stride * (i + 1)],
labels[:stride * (i + 1)],
fweights=weights[:stride * (i + 1)])
expected_r = cmat[0, 1] / np.sqrt(cmat[0, 0] * cmat[1, 1])
actual_r = sess.run(update_op, feed_dict=feed_dict)
self.assertEqual(np.isnan(expected_r), np.isnan(actual_r))
self.assertEqual(np.isnan(expected_r),
np.isnan(sess.run(pearson_r, feed_dict=feed_dict)))
if not np.isnan(expected_r):
self.assertAlmostEqual(
expected_r, actual_r, 5)
self.assertAlmostEqual(
expected_r, sess.run(pearson_r, feed_dict=feed_dict), 5)
class StreamingMeanCosineDistanceTest(test.TestCase):
def setUp(self):
ops.reset_default_graph()
def testVars(self):
metrics.streaming_mean_cosine_distance(
predictions=array_ops.ones((10, 3)),
labels=array_ops.ones((10, 3)),
dim=1)
_assert_metric_variables(self, (
'mean_cosine_distance/count:0',
'mean_cosine_distance/total:0',
))
def testMetricsCollection(self):
my_collection_name = '__metrics__'
mean, _ = metrics.streaming_mean_cosine_distance(
predictions=array_ops.ones((10, 3)),
labels=array_ops.ones((10, 3)),
dim=1,
metrics_collections=[my_collection_name])
self.assertListEqual(ops.get_collection(my_collection_name), [mean])
def testUpdatesCollection(self):
my_collection_name = '__updates__'
_, update_op = metrics.streaming_mean_cosine_distance(
predictions=array_ops.ones((10, 3)),
labels=array_ops.ones((10, 3)),
dim=1,
updates_collections=[my_collection_name])
self.assertListEqual(ops.get_collection(my_collection_name), [update_op])
def testValueTensorIsIdempotent(self):
predictions = random_ops.random_normal((10, 3), seed=1)
labels = random_ops.random_normal((10, 3), seed=2)
error, update_op = metrics.streaming_mean_cosine_distance(
predictions, labels, dim=1)
with self.test_session() as sess:
sess.run(variables.local_variables_initializer())
# Run several updates.
for _ in range(10):
sess.run(update_op)
# Then verify idempotency.
initial_error = error.eval()
for _ in range(10):
self.assertEqual(initial_error, error.eval())
def testSingleUpdateZeroError(self):
np_labels = np.matrix(('1 0 0;' '0 0 1;' '0 1 0'))
predictions = constant_op.constant(
np_labels, shape=(1, 3, 3), dtype=dtypes_lib.float32)
labels = constant_op.constant(
np_labels, shape=(1, 3, 3), dtype=dtypes_lib.float32)
error, update_op = metrics.streaming_mean_cosine_distance(
predictions, labels, dim=2)
with self.test_session() as sess:
sess.run(variables.local_variables_initializer())
self.assertEqual(0, sess.run(update_op))
self.assertEqual(0, error.eval())
def testSingleUpdateWithError1(self):
np_labels = np.matrix(('1 0 0;' '0 0 1;' '0 1 0'))
np_predictions = np.matrix(('1 0 0;' '0 0 -1;' '1 0 0'))
predictions = constant_op.constant(
np_predictions, shape=(3, 1, 3), dtype=dtypes_lib.float32)
labels = constant_op.constant(
np_labels, shape=(3, 1, 3), dtype=dtypes_lib.float32)
error, update_op = metrics.streaming_mean_cosine_distance(
predictions, labels, dim=2)
with self.test_session() as sess:
sess.run(variables.local_variables_initializer())
self.assertAlmostEqual(1, sess.run(update_op), 5)
self.assertAlmostEqual(1, error.eval(), 5)
def testSingleUpdateWithError2(self):
np_predictions = np.matrix(
('0.819031913261206 0.567041924552012 0.087465312324590;'
'-0.665139432070255 -0.739487441769973 -0.103671883216994;'
'0.707106781186548 -0.707106781186548 0'))
np_labels = np.matrix(
('0.819031913261206 0.567041924552012 0.087465312324590;'
'0.665139432070255 0.739487441769973 0.103671883216994;'
'0.707106781186548 0.707106781186548 0'))
predictions = constant_op.constant(
np_predictions, shape=(3, 1, 3), dtype=dtypes_lib.float32)
labels = constant_op.constant(
np_labels, shape=(3, 1, 3), dtype=dtypes_lib.float32)
error, update_op = metrics.streaming_mean_cosine_distance(
predictions, labels, dim=2)
with self.test_session() as sess:
sess.run(variables.local_variables_initializer())
self.assertAlmostEqual(1.0, sess.run(update_op), 5)
self.assertAlmostEqual(1.0, error.eval(), 5)
def testSingleUpdateWithErrorAndWeights1(self):
np_predictions = np.matrix(('1 0 0;' '0 0 -1;' '1 0 0'))
np_labels = np.matrix(('1 0 0;' '0 0 1;' '0 1 0'))
predictions = constant_op.constant(
np_predictions, shape=(3, 1, 3), dtype=dtypes_lib.float32)
labels = constant_op.constant(
np_labels, shape=(3, 1, 3), dtype=dtypes_lib.float32)
weights = constant_op.constant(
[1, 0, 0], shape=(3, 1, 1), dtype=dtypes_lib.float32)
error, update_op = metrics.streaming_mean_cosine_distance(
predictions, labels, dim=2, weights=weights)
with self.test_session() as sess:
sess.run(variables.local_variables_initializer())
self.assertEqual(0, sess.run(update_op))
self.assertEqual(0, error.eval())
def testSingleUpdateWithErrorAndWeights2(self):
np_predictions = np.matrix(('1 0 0;' '0 0 -1;' '1 0 0'))
np_labels = np.matrix(('1 0 0;' '0 0 1;' '0 1 0'))
predictions = constant_op.constant(
np_predictions, shape=(3, 1, 3), dtype=dtypes_lib.float32)
labels = constant_op.constant(
np_labels, shape=(3, 1, 3), dtype=dtypes_lib.float32)
weights = constant_op.constant(
[0, 1, 1], shape=(3, 1, 1), dtype=dtypes_lib.float32)
error, update_op = metrics.streaming_mean_cosine_distance(
predictions, labels, dim=2, weights=weights)
with self.test_session() as sess:
sess.run(variables.local_variables_initializer())
self.assertEqual(1.5, update_op.eval())
self.assertEqual(1.5, error.eval())
class PcntBelowThreshTest(test.TestCase):
def setUp(self):
ops.reset_default_graph()
def testVars(self):
metrics.streaming_percentage_less(values=array_ops.ones((10,)), threshold=2)
_assert_metric_variables(self, (
'percentage_below_threshold/count:0',
'percentage_below_threshold/total:0',
))
def testMetricsCollection(self):
my_collection_name = '__metrics__'
mean, _ = metrics.streaming_percentage_less(
values=array_ops.ones((10,)),
threshold=2,
metrics_collections=[my_collection_name])
self.assertListEqual(ops.get_collection(my_collection_name), [mean])
def testUpdatesCollection(self):
my_collection_name = '__updates__'
_, update_op = metrics.streaming_percentage_less(
values=array_ops.ones((10,)),
threshold=2,
updates_collections=[my_collection_name])
self.assertListEqual(ops.get_collection(my_collection_name), [update_op])
def testOneUpdate(self):
with self.test_session() as sess:
values = constant_op.constant(
[2, 4, 6, 8], shape=(1, 4), dtype=dtypes_lib.float32)
pcnt0, update_op0 = metrics.streaming_percentage_less(
values, 100, name='high')
pcnt1, update_op1 = metrics.streaming_percentage_less(
values, 7, name='medium')
pcnt2, update_op2 = metrics.streaming_percentage_less(
values, 1, name='low')
sess.run(variables.local_variables_initializer())
sess.run([update_op0, update_op1, update_op2])
pcnt0, pcnt1, pcnt2 = sess.run([pcnt0, pcnt1, pcnt2])
self.assertAlmostEqual(1.0, pcnt0, 5)
self.assertAlmostEqual(0.75, pcnt1, 5)
self.assertAlmostEqual(0.0, pcnt2, 5)
def testSomePresentOneUpdate(self):
with self.test_session() as sess:
values = constant_op.constant(
[2, 4, 6, 8], shape=(1, 4), dtype=dtypes_lib.float32)
weights = constant_op.constant(
[1, 0, 0, 1], shape=(1, 4), dtype=dtypes_lib.float32)
pcnt0, update_op0 = metrics.streaming_percentage_less(
values, 100, weights=weights, name='high')
pcnt1, update_op1 = metrics.streaming_percentage_less(
values, 7, weights=weights, name='medium')
pcnt2, update_op2 = metrics.streaming_percentage_less(
values, 1, weights=weights, name='low')
sess.run(variables.local_variables_initializer())
self.assertListEqual([1.0, 0.5, 0.0],
sess.run([update_op0, update_op1, update_op2]))
pcnt0, pcnt1, pcnt2 = sess.run([pcnt0, pcnt1, pcnt2])
self.assertAlmostEqual(1.0, pcnt0, 5)
self.assertAlmostEqual(0.5, pcnt1, 5)
self.assertAlmostEqual(0.0, pcnt2, 5)
class StreamingMeanIOUTest(test.TestCase):
def setUp(self):
np.random.seed(1)
ops.reset_default_graph()
def testVars(self):
metrics.streaming_mean_iou(
predictions=array_ops.ones([10, 1]),
labels=array_ops.ones([10, 1]),
num_classes=2)
_assert_metric_variables(self, ('mean_iou/total_confusion_matrix:0',))
def testMetricsCollections(self):
my_collection_name = '__metrics__'
mean_iou, _ = metrics.streaming_mean_iou(
predictions=array_ops.ones([10, 1]),
labels=array_ops.ones([10, 1]),
num_classes=2,
metrics_collections=[my_collection_name])
self.assertListEqual(ops.get_collection(my_collection_name), [mean_iou])
def testUpdatesCollection(self):
my_collection_name = '__updates__'
_, update_op = metrics.streaming_mean_iou(
predictions=array_ops.ones([10, 1]),
labels=array_ops.ones([10, 1]),
num_classes=2,
updates_collections=[my_collection_name])
self.assertListEqual(ops.get_collection(my_collection_name), [update_op])
def testPredictionsAndLabelsOfDifferentSizeRaisesValueError(self):
predictions = array_ops.ones([10, 3])
labels = array_ops.ones([10, 4])
with self.assertRaises(ValueError):
metrics.streaming_mean_iou(predictions, labels, num_classes=2)
def testLabelsAndWeightsOfDifferentSizeRaisesValueError(self):
predictions = array_ops.ones([10])
labels = array_ops.ones([10])
weights = array_ops.zeros([9])
with self.assertRaises(ValueError):
metrics.streaming_mean_iou(
predictions, labels, num_classes=2, weights=weights)
def testValueTensorIsIdempotent(self):
num_classes = 3
predictions = random_ops.random_uniform(
[10], maxval=num_classes, dtype=dtypes_lib.int64, seed=1)
labels = random_ops.random_uniform(
[10], maxval=num_classes, dtype=dtypes_lib.int64, seed=2)
miou, update_op = metrics.streaming_mean_iou(
predictions, labels, num_classes=num_classes)
with self.test_session() as sess:
sess.run(variables.local_variables_initializer())
# Run several updates.
for _ in range(10):
sess.run(update_op)
# Then verify idempotency.
initial_miou = miou.eval()
for _ in range(10):
self.assertEqual(initial_miou, miou.eval())
def testMultipleUpdates(self):
num_classes = 3
with self.test_session() as sess:
# Create the queue that populates the predictions.
preds_queue = data_flow_ops.FIFOQueue(
5, dtypes=dtypes_lib.int32, shapes=(1, 1))
_enqueue_vector(sess, preds_queue, [0])
_enqueue_vector(sess, preds_queue, [1])
_enqueue_vector(sess, preds_queue, [2])
_enqueue_vector(sess, preds_queue, [1])
_enqueue_vector(sess, preds_queue, [0])
predictions = preds_queue.dequeue()
# Create the queue that populates the labels.
labels_queue = data_flow_ops.FIFOQueue(
5, dtypes=dtypes_lib.int32, shapes=(1, 1))
_enqueue_vector(sess, labels_queue, [0])
_enqueue_vector(sess, labels_queue, [1])
_enqueue_vector(sess, labels_queue, [1])
_enqueue_vector(sess, labels_queue, [2])
_enqueue_vector(sess, labels_queue, [1])
labels = labels_queue.dequeue()
miou, update_op = metrics.streaming_mean_iou(predictions, labels,
num_classes)
sess.run(variables.local_variables_initializer())
for _ in range(5):
sess.run(update_op)
desired_output = np.mean([1.0 / 2.0, 1.0 / 4.0, 0.])
self.assertEqual(desired_output, miou.eval())
def testMultipleUpdatesWithWeights(self):
num_classes = 2
with self.test_session() as sess:
# Create the queue that populates the predictions.
preds_queue = data_flow_ops.FIFOQueue(
6, dtypes=dtypes_lib.int32, shapes=(1, 1))
_enqueue_vector(sess, preds_queue, [0])
_enqueue_vector(sess, preds_queue, [1])
_enqueue_vector(sess, preds_queue, [0])
_enqueue_vector(sess, preds_queue, [1])
_enqueue_vector(sess, preds_queue, [0])
_enqueue_vector(sess, preds_queue, [1])
predictions = preds_queue.dequeue()
# Create the queue that populates the labels.
labels_queue = data_flow_ops.FIFOQueue(
6, dtypes=dtypes_lib.int32, shapes=(1, 1))
_enqueue_vector(sess, labels_queue, [0])
_enqueue_vector(sess, labels_queue, [1])
_enqueue_vector(sess, labels_queue, [1])
_enqueue_vector(sess, labels_queue, [0])
_enqueue_vector(sess, labels_queue, [0])
_enqueue_vector(sess, labels_queue, [1])
labels = labels_queue.dequeue()
# Create the queue that populates the weights.
weights_queue = data_flow_ops.FIFOQueue(
6, dtypes=dtypes_lib.float32, shapes=(1, 1))
_enqueue_vector(sess, weights_queue, [1.0])
_enqueue_vector(sess, weights_queue, [1.0])
_enqueue_vector(sess, weights_queue, [1.0])
_enqueue_vector(sess, weights_queue, [0.0])
_enqueue_vector(sess, weights_queue, [1.0])
_enqueue_vector(sess, weights_queue, [0.0])
weights = weights_queue.dequeue()
miou, update_op = metrics.streaming_mean_iou(
predictions, labels, num_classes, weights=weights)
sess.run(variables.local_variables_initializer())
for _ in range(6):
sess.run(update_op)
desired_output = np.mean([2.0 / 3.0, 1.0 / 2.0])
self.assertAlmostEqual(desired_output, miou.eval())
def testMultipleUpdatesWithMissingClass(self):
# Test the case where there are no predicions and labels for
# one class, and thus there is one row and one column with
# zero entries in the confusion matrix.
num_classes = 3
with self.test_session() as sess:
# Create the queue that populates the predictions.
# There is no prediction for class 2.
preds_queue = data_flow_ops.FIFOQueue(
5, dtypes=dtypes_lib.int32, shapes=(1, 1))
_enqueue_vector(sess, preds_queue, [0])
_enqueue_vector(sess, preds_queue, [1])
_enqueue_vector(sess, preds_queue, [1])
_enqueue_vector(sess, preds_queue, [1])
_enqueue_vector(sess, preds_queue, [0])
predictions = preds_queue.dequeue()
# Create the queue that populates the labels.
# There is label for class 2.
labels_queue = data_flow_ops.FIFOQueue(
5, dtypes=dtypes_lib.int32, shapes=(1, 1))
_enqueue_vector(sess, labels_queue, [0])
_enqueue_vector(sess, labels_queue, [1])
_enqueue_vector(sess, labels_queue, [1])
_enqueue_vector(sess, labels_queue, [0])
_enqueue_vector(sess, labels_queue, [1])
labels = labels_queue.dequeue()
miou, update_op = metrics.streaming_mean_iou(predictions, labels,
num_classes)
sess.run(variables.local_variables_initializer())
for _ in range(5):
sess.run(update_op)
desired_output = np.mean([1.0 / 3.0, 2.0 / 4.0])
self.assertAlmostEqual(desired_output, miou.eval())
def testUpdateOpEvalIsAccumulatedConfusionMatrix(self):
predictions = array_ops.concat(
[
constant_op.constant(
0, shape=[5]), constant_op.constant(
1, shape=[5])
],
0)
labels = array_ops.concat(
[
constant_op.constant(
0, shape=[3]), constant_op.constant(
1, shape=[7])
],
0)
num_classes = 2
with self.test_session() as sess:
miou, update_op = metrics.streaming_mean_iou(predictions, labels,
num_classes)
sess.run(variables.local_variables_initializer())
confusion_matrix = update_op.eval()
self.assertAllEqual([[3, 0], [2, 5]], confusion_matrix)
desired_miou = np.mean([3. / 5., 5. / 7.])
self.assertAlmostEqual(desired_miou, miou.eval())
def testAllCorrect(self):
predictions = array_ops.zeros([40])
labels = array_ops.zeros([40])
num_classes = 1
with self.test_session() as sess:
miou, update_op = metrics.streaming_mean_iou(predictions, labels,
num_classes)
sess.run(variables.local_variables_initializer())
self.assertEqual(40, update_op.eval()[0])
self.assertEqual(1.0, miou.eval())
def testAllWrong(self):
predictions = array_ops.zeros([40])
labels = array_ops.ones([40])
num_classes = 2
with self.test_session() as sess:
miou, update_op = metrics.streaming_mean_iou(predictions, labels,
num_classes)
sess.run(variables.local_variables_initializer())
self.assertAllEqual([[0, 0], [40, 0]], update_op.eval())
self.assertEqual(0., miou.eval())
def testResultsWithSomeMissing(self):
predictions = array_ops.concat(
[
constant_op.constant(
0, shape=[5]), constant_op.constant(
1, shape=[5])
],
0)
labels = array_ops.concat(
[
constant_op.constant(
0, shape=[3]), constant_op.constant(
1, shape=[7])
],
0)
num_classes = 2
weights = array_ops.concat(
[
constant_op.constant(
0, shape=[1]), constant_op.constant(
1, shape=[8]), constant_op.constant(
0, shape=[1])
],
0)
with self.test_session() as sess:
miou, update_op = metrics.streaming_mean_iou(
predictions, labels, num_classes, weights=weights)
sess.run(variables.local_variables_initializer())
self.assertAllEqual([[2, 0], [2, 4]], update_op.eval())
desired_miou = np.mean([2. / 4., 4. / 6.])
self.assertAlmostEqual(desired_miou, miou.eval())
def testMissingClassInLabels(self):
labels = constant_op.constant([
[[0, 0, 1, 1, 0, 0],
[1, 0, 0, 0, 0, 1]],
[[1, 1, 1, 1, 1, 1],
[0, 0, 0, 0, 0, 0]]])
predictions = constant_op.constant([
[[0, 0, 2, 1, 1, 0],
[0, 1, 2, 2, 0, 1]],
[[0, 0, 2, 1, 1, 1],
[1, 1, 2, 0, 0, 0]]])
num_classes = 3
with self.test_session() as sess:
miou, update_op = metrics.streaming_mean_iou(
predictions, labels, num_classes)
sess.run(variables.local_variables_initializer())
self.assertAllEqual([[7, 4, 3], [3, 5, 2], [0, 0, 0]], update_op.eval())
self.assertAlmostEqual(
1 / 3 * (7 / (7 + 3 + 7) + 5 / (5 + 4 + 5) + 0 / (0 + 5 + 0)),
miou.eval())
def testMissingClassOverallSmall(self):
labels = constant_op.constant([0])
predictions = constant_op.constant([0])
num_classes = 2
with self.test_session() as sess:
miou, update_op = metrics.streaming_mean_iou(
predictions, labels, num_classes)
sess.run(variables.local_variables_initializer())
self.assertAllEqual([[1, 0], [0, 0]], update_op.eval())
self.assertAlmostEqual(1, miou.eval())
def testMissingClassOverallLarge(self):
labels = constant_op.constant([
[[0, 0, 1, 1, 0, 0],
[1, 0, 0, 0, 0, 1]],
[[1, 1, 1, 1, 1, 1],
[0, 0, 0, 0, 0, 0]]])
predictions = constant_op.constant([
[[0, 0, 1, 1, 0, 0],
[1, 1, 0, 0, 1, 1]],
[[0, 0, 0, 1, 1, 1],
[1, 1, 1, 0, 0, 0]]])
num_classes = 3
with self.test_session() as sess:
miou, update_op = metrics.streaming_mean_iou(
predictions, labels, num_classes)
sess.run(variables.local_variables_initializer())
self.assertAllEqual([[9, 5, 0], [3, 7, 0], [0, 0, 0]], update_op.eval())
self.assertAlmostEqual(
1 / 2 * (9 / (9 + 3 + 5) + 7 / (7 + 5 + 3)), miou.eval())
class StreamingConcatTest(test.TestCase):
def setUp(self):
ops.reset_default_graph()
def testVars(self):
metrics.streaming_concat(values=array_ops.ones((10,)))
_assert_metric_variables(self, (
'streaming_concat/array:0',
'streaming_concat/size:0',
))
def testMetricsCollection(self):
my_collection_name = '__metrics__'
value, _ = metrics.streaming_concat(
values=array_ops.ones((10,)), metrics_collections=[my_collection_name])
self.assertListEqual(ops.get_collection(my_collection_name), [value])
def testUpdatesCollection(self):
my_collection_name = '__updates__'
_, update_op = metrics.streaming_concat(
values=array_ops.ones((10,)), updates_collections=[my_collection_name])
self.assertListEqual(ops.get_collection(my_collection_name), [update_op])
def testNextArraySize(self):
next_array_size = metric_ops._next_array_size # pylint: disable=protected-access
with self.test_session():
self.assertEqual(next_array_size(2, growth_factor=2).eval(), 2)
self.assertEqual(next_array_size(3, growth_factor=2).eval(), 4)
self.assertEqual(next_array_size(4, growth_factor=2).eval(), 4)
self.assertEqual(next_array_size(5, growth_factor=2).eval(), 8)
self.assertEqual(next_array_size(6, growth_factor=2).eval(), 8)
def testStreamingConcat(self):
with self.test_session() as sess:
values = array_ops.placeholder(dtypes_lib.int32, [None])
concatenated, update_op = metrics.streaming_concat(values)
sess.run(variables.local_variables_initializer())
self.assertAllEqual([], concatenated.eval())
sess.run([update_op], feed_dict={values: [0, 1, 2]})
self.assertAllEqual([0, 1, 2], concatenated.eval())
sess.run([update_op], feed_dict={values: [3, 4]})
self.assertAllEqual([0, 1, 2, 3, 4], concatenated.eval())
sess.run([update_op], feed_dict={values: [5, 6, 7, 8, 9]})
self.assertAllEqual(np.arange(10), concatenated.eval())
def testStreamingConcatStringValues(self):
with self.test_session() as sess:
values = array_ops.placeholder(dtypes_lib.string, [None])
concatenated, update_op = metrics.streaming_concat(values)
sess.run(variables.local_variables_initializer())
self.assertItemsEqual([], concatenated.eval())
sess.run([update_op], feed_dict={values: ['a', 'b', 'c']})
self.assertItemsEqual([b'a', b'b', b'c'], concatenated.eval())
sess.run([update_op], feed_dict={values: ['d', 'e']})
self.assertItemsEqual([b'a', b'b', b'c', b'd', b'e'], concatenated.eval())
sess.run([update_op], feed_dict={values: ['f', 'g', 'h', 'i', 'j']})
self.assertItemsEqual(
[b'a', b'b', b'c', b'd', b'e', b'f', b'g', b'h', b'i', b'j'],
concatenated.eval())
def testStreamingConcatMaxSize(self):
with self.test_session() as sess:
values = math_ops.range(3)
concatenated, update_op = metrics.streaming_concat(values, max_size=5)
sess.run(variables.local_variables_initializer())
self.assertAllEqual([], concatenated.eval())
sess.run([update_op])
self.assertAllEqual([0, 1, 2], concatenated.eval())
sess.run([update_op])
self.assertAllEqual([0, 1, 2, 0, 1], concatenated.eval())
sess.run([update_op])
self.assertAllEqual([0, 1, 2, 0, 1], concatenated.eval())
def testStreamingConcat2D(self):
with self.test_session() as sess:
values = array_ops.reshape(math_ops.range(3), (3, 1))
concatenated, update_op = metrics.streaming_concat(values, axis=-1)
sess.run(variables.local_variables_initializer())
for _ in range(10):
sess.run([update_op])
self.assertAllEqual([[0] * 10, [1] * 10, [2] * 10], concatenated.eval())
def testStreamingConcatErrors(self):
with self.assertRaises(ValueError):
metrics.streaming_concat(array_ops.placeholder(dtypes_lib.float32))
values = array_ops.zeros((2, 3))
with self.assertRaises(ValueError):
metrics.streaming_concat(values, axis=-3, max_size=3)
with self.assertRaises(ValueError):
metrics.streaming_concat(values, axis=2, max_size=3)
with self.assertRaises(ValueError):
metrics.streaming_concat(
array_ops.placeholder(dtypes_lib.float32, [None, None]))
def testStreamingConcatReset(self):
with self.test_session() as sess:
values = array_ops.placeholder(dtypes_lib.int32, [None])
concatenated, update_op = metrics.streaming_concat(values)
sess.run(variables.local_variables_initializer())
self.assertAllEqual([], concatenated.eval())
sess.run([update_op], feed_dict={values: [0, 1, 2]})
self.assertAllEqual([0, 1, 2], concatenated.eval())
sess.run(variables.local_variables_initializer())
sess.run([update_op], feed_dict={values: [3, 4]})
self.assertAllEqual([3, 4], concatenated.eval())
class AggregateMetricsTest(test.TestCase):
def testAggregateNoMetricsRaisesValueError(self):
with self.assertRaises(ValueError):
metrics.aggregate_metrics()
def testAggregateSingleMetricReturnsOneItemLists(self):
values = array_ops.ones((10, 4))
value_tensors, update_ops = metrics.aggregate_metrics(
metrics.streaming_mean(values))
self.assertEqual(len(value_tensors), 1)
self.assertEqual(len(update_ops), 1)
with self.test_session() as sess:
sess.run(variables.local_variables_initializer())
self.assertEqual(1, update_ops[0].eval())
self.assertEqual(1, value_tensors[0].eval())
def testAggregateMultipleMetricsReturnsListsInOrder(self):
predictions = array_ops.ones((10, 4))
labels = array_ops.ones((10, 4)) * 3
value_tensors, update_ops = metrics.aggregate_metrics(
metrics.streaming_mean_absolute_error(predictions, labels),
metrics.streaming_mean_squared_error(predictions, labels))
self.assertEqual(len(value_tensors), 2)
self.assertEqual(len(update_ops), 2)
with self.test_session() as sess:
sess.run(variables.local_variables_initializer())
self.assertEqual(2, update_ops[0].eval())
self.assertEqual(4, update_ops[1].eval())
self.assertEqual(2, value_tensors[0].eval())
self.assertEqual(4, value_tensors[1].eval())
class AggregateMetricMapTest(test.TestCase):
def testAggregateMultipleMetricsReturnsListsInOrder(self):
predictions = array_ops.ones((10, 4))
labels = array_ops.ones((10, 4)) * 3
names_to_values, names_to_updates = metrics.aggregate_metric_map({
'm1': metrics.streaming_mean_absolute_error(predictions, labels),
'm2': metrics.streaming_mean_squared_error(predictions, labels),
})
self.assertEqual(2, len(names_to_values))
self.assertEqual(2, len(names_to_updates))
with self.test_session() as sess:
sess.run(variables.local_variables_initializer())
self.assertEqual(2, names_to_updates['m1'].eval())
self.assertEqual(4, names_to_updates['m2'].eval())
self.assertEqual(2, names_to_values['m1'].eval())
self.assertEqual(4, names_to_values['m2'].eval())
class CountTest(test.TestCase):
def setUp(self):
ops.reset_default_graph()
def testVars(self):
metrics.count(array_ops.ones([4, 3]))
_assert_metric_variables(self, ['count/count:0'])
def testMetricsCollection(self):
my_collection_name = '__metrics__'
mean, _ = metrics.count(
array_ops.ones([4, 3]), metrics_collections=[my_collection_name])
self.assertListEqual(ops.get_collection(my_collection_name), [mean])
def testUpdatesCollection(self):
my_collection_name = '__updates__'
_, update_op = metrics.count(
array_ops.ones([4, 3]), updates_collections=[my_collection_name])
self.assertListEqual(ops.get_collection(my_collection_name), [update_op])
def testBasic(self):
with self.test_session() as sess:
values_queue = data_flow_ops.FIFOQueue(
4, dtypes=dtypes_lib.float32, shapes=(1, 2))
_enqueue_vector(sess, values_queue, [0, 1])
_enqueue_vector(sess, values_queue, [-4.2, 9.1])
_enqueue_vector(sess, values_queue, [6.5, 0])
_enqueue_vector(sess, values_queue, [-3.2, 4.0])
values = values_queue.dequeue()
result, update_op = metrics.count(values)
sess.run(variables.local_variables_initializer())
for _ in range(4):
sess.run(update_op)
self.assertAlmostEqual(8.0, sess.run(result), 5)
def testUpdateOpsReturnsCurrentValue(self):
with self.test_session() as sess:
values_queue = data_flow_ops.FIFOQueue(
4, dtypes=dtypes_lib.float32, shapes=(1, 2))
_enqueue_vector(sess, values_queue, [0, 1])
_enqueue_vector(sess, values_queue, [-4.2, 9.1])
_enqueue_vector(sess, values_queue, [6.5, 0])
_enqueue_vector(sess, values_queue, [-3.2, 4.0])
values = values_queue.dequeue()
result, update_op = metrics.count(values)
sess.run(variables.local_variables_initializer())
self.assertAlmostEqual(2.0, sess.run(update_op), 5)
self.assertAlmostEqual(4.0, sess.run(update_op), 5)
self.assertAlmostEqual(6.0, sess.run(update_op), 5)
self.assertAlmostEqual(8.0, sess.run(update_op), 5)
self.assertAlmostEqual(8.0, sess.run(result), 5)
def test1dWeightedValues(self):
with self.test_session() as sess:
# Create the queue that populates the values.
values_queue = data_flow_ops.FIFOQueue(
4, dtypes=dtypes_lib.float32, shapes=(1, 2))
_enqueue_vector(sess, values_queue, [0, 1])
_enqueue_vector(sess, values_queue, [-4.2, 9.1])
_enqueue_vector(sess, values_queue, [6.5, 0])
_enqueue_vector(sess, values_queue, [-3.2, 4.0])
values = values_queue.dequeue()
# Create the queue that populates the weighted labels.
weights_queue = data_flow_ops.FIFOQueue(
4, dtypes=dtypes_lib.float32, shapes=(1, 1))
_enqueue_vector(sess, weights_queue, [0.5])
_enqueue_vector(sess, weights_queue, [0])
_enqueue_vector(sess, weights_queue, [0])
_enqueue_vector(sess, weights_queue, [1.2])
weights = weights_queue.dequeue()
result, update_op = metrics.count(values, weights)
variables.local_variables_initializer().run()
for _ in range(4):
update_op.eval()
self.assertAlmostEqual(3.4, result.eval(), 5)
def test1dWeightedValues_placeholders(self):
with self.test_session() as sess:
# Create the queue that populates the values.
feed_values = ((0, 1), (-4.2, 9.1), (6.5, 0), (-3.2, 4.0))
values = array_ops.placeholder(dtype=dtypes_lib.float32)
# Create the queue that populates the weighted labels.
weights_queue = data_flow_ops.FIFOQueue(
4, dtypes=dtypes_lib.float32, shapes=(1,))
_enqueue_vector(sess, weights_queue, 0.5, shape=(1,))
_enqueue_vector(sess, weights_queue, 0, shape=(1,))
_enqueue_vector(sess, weights_queue, 0, shape=(1,))
_enqueue_vector(sess, weights_queue, 1.2, shape=(1,))
weights = weights_queue.dequeue()
result, update_op = metrics.count(values, weights)
variables.local_variables_initializer().run()
for i in range(4):
update_op.eval(feed_dict={values: feed_values[i]})
self.assertAlmostEqual(3.4, result.eval(), 5)
def test2dWeightedValues(self):
with self.test_session() as sess:
# Create the queue that populates the values.
values_queue = data_flow_ops.FIFOQueue(
4, dtypes=dtypes_lib.float32, shapes=(1, 2))
_enqueue_vector(sess, values_queue, [0, 1])
_enqueue_vector(sess, values_queue, [-4.2, 9.1])
_enqueue_vector(sess, values_queue, [6.5, 0])
_enqueue_vector(sess, values_queue, [-3.2, 4.0])
values = values_queue.dequeue()
# Create the queue that populates the weighted labels.
weights_queue = data_flow_ops.FIFOQueue(
4, dtypes=dtypes_lib.float32, shapes=(1, 2))
_enqueue_vector(sess, weights_queue, [1.1, 1])
_enqueue_vector(sess, weights_queue, [1, 0])
_enqueue_vector(sess, weights_queue, [0, 1])
_enqueue_vector(sess, weights_queue, [0, 0])
weights = weights_queue.dequeue()
result, update_op = metrics.count(values, weights)
variables.local_variables_initializer().run()
for _ in range(4):
update_op.eval()
self.assertAlmostEqual(4.1, result.eval(), 5)
def test2dWeightedValues_placeholders(self):
with self.test_session() as sess:
# Create the queue that populates the values.
feed_values = ((0, 1), (-4.2, 9.1), (6.5, 0), (-3.2, 4.0))
values = array_ops.placeholder(dtype=dtypes_lib.float32)
# Create the queue that populates the weighted labels.
weights_queue = data_flow_ops.FIFOQueue(
4, dtypes=dtypes_lib.float32, shapes=(2,))
_enqueue_vector(sess, weights_queue, [1.1, 1], shape=(2,))
_enqueue_vector(sess, weights_queue, [1, 0], shape=(2,))
_enqueue_vector(sess, weights_queue, [0, 1], shape=(2,))
_enqueue_vector(sess, weights_queue, [0, 0], shape=(2,))
weights = weights_queue.dequeue()
result, update_op = metrics.count(values, weights)
variables.local_variables_initializer().run()
for i in range(4):
update_op.eval(feed_dict={values: feed_values[i]})
self.assertAlmostEqual(4.1, result.eval(), 5)
class CohenKappaTest(test.TestCase):
def _confusion_matrix_to_samples(self, confusion_matrix):
x, y = confusion_matrix.shape
pairs = []
for label in range(x):
for feature in range(y):
pairs += [label, feature] * confusion_matrix[label, feature]
pairs = np.array(pairs).reshape((-1, 2))
return pairs[:, 0], pairs[:, 1]
def setUp(self):
np.random.seed(1)
ops.reset_default_graph()
def testVars(self):
metrics.cohen_kappa(
predictions_idx=array_ops.ones((10, 1)),
labels=array_ops.ones((10, 1)),
num_classes=2)
_assert_metric_variables(self, (
'cohen_kappa/po:0',
'cohen_kappa/pe_row:0',
'cohen_kappa/pe_col:0',))
def testMetricsCollection(self):
my_collection_name = '__metrics__'
kappa, _ = metrics.cohen_kappa(
predictions_idx=array_ops.ones((10, 1)),
labels=array_ops.ones((10, 1)),
num_classes=2,
metrics_collections=[my_collection_name])
self.assertListEqual(ops.get_collection(my_collection_name), [kappa])
def testUpdatesCollection(self):
my_collection_name = '__updates__'
_, update_op = metrics.cohen_kappa(
predictions_idx=array_ops.ones((10, 1)),
labels=array_ops.ones((10, 1)),
num_classes=2,
updates_collections=[my_collection_name])
self.assertListEqual(ops.get_collection(my_collection_name), [update_op])
def testValueTensorIsIdempotent(self):
predictions = random_ops.random_uniform(
(10, 1), maxval=3, dtype=dtypes_lib.int64, seed=1)
labels = random_ops.random_uniform(
(10, 1), maxval=3, dtype=dtypes_lib.int64, seed=2)
kappa, update_op = metrics.cohen_kappa(labels, predictions, 3)
with self.test_session() as sess:
sess.run(variables.local_variables_initializer())
# Run several updates.
for _ in range(10):
sess.run(update_op)
# Then verify idempotency.
initial_kappa = kappa.eval()
for _ in range(10):
self.assertAlmostEqual(initial_kappa, kappa.eval(), 5)
def testBasic(self):
confusion_matrix = np.array([
[9, 3, 1],
[4, 8, 2],
[2, 1, 6]])
# overall total = 36
# po = [9, 8, 6], sum(po) = 23
# pe_row = [15, 12, 9], pe_col = [13, 14, 9], so pe = [5.42, 4.67, 2.25]
# finally, kappa = (sum(po) - sum(pe)) / (N - sum(pe))
# = (23 - 12.34) / (36 - 12.34)
# = 0.45
# see: http://psych.unl.edu/psycrs/handcomp/hckappa.PDF
expect = 0.45
labels, predictions = self._confusion_matrix_to_samples(confusion_matrix)
dtypes = [dtypes_lib.int16, dtypes_lib.int32, dtypes_lib.int64]
shapes = [(len(labels,)), # 1-dim
(len(labels), 1)] # 2-dim
weights = [None, np.ones_like(labels)]
for dtype in dtypes:
for shape in shapes:
for weight in weights:
with self.test_session() as sess:
predictions_tensor = constant_op.constant(
np.reshape(predictions, shape), dtype=dtype)
labels_tensor = constant_op.constant(
np.reshape(labels, shape), dtype=dtype)
kappa, update_op = metrics.cohen_kappa(
labels_tensor, predictions_tensor, 3, weights=weight)
sess.run(variables.local_variables_initializer())
self.assertAlmostEqual(expect, sess.run(update_op), 2)
self.assertAlmostEqual(expect, kappa.eval(), 2)
def testAllCorrect(self):
inputs = np.arange(0, 100) % 4
# confusion matrix
# [[25, 0, 0],
# [0, 25, 0],
# [0, 0, 25]]
# Calculated by v0.19: sklearn.metrics.cohen_kappa_score(inputs, inputs)
expect = 1.0
with self.test_session() as sess:
predictions = constant_op.constant(inputs, dtype=dtypes_lib.float32)
labels = constant_op.constant(inputs)
kappa, update_op = metrics.cohen_kappa(labels, predictions, 4)
sess.run(variables.local_variables_initializer())
self.assertAlmostEqual(expect, sess.run(update_op), 5)
self.assertAlmostEqual(expect, kappa.eval(), 5)
def testAllIncorrect(self):
labels = np.arange(0, 100) % 4
predictions = (labels + 1) % 4
# confusion matrix
# [[0, 25, 0],
# [0, 0, 25],
# [25, 0, 0]]
# Calculated by v0.19: sklearn.metrics.cohen_kappa_score(labels, predictions)
expect = -0.333333333333
with self.test_session() as sess:
predictions = constant_op.constant(predictions, dtype=dtypes_lib.float32)
labels = constant_op.constant(labels)
kappa, update_op = metrics.cohen_kappa(labels, predictions, 4)
sess.run(variables.local_variables_initializer())
self.assertAlmostEqual(expect, sess.run(update_op), 5)
self.assertAlmostEqual(expect, kappa.eval(), 5)
def testWeighted(self):
confusion_matrix = np.array([
[9, 3, 1],
[4, 8, 2],
[2, 1, 6]])
labels, predictions = self._confusion_matrix_to_samples(confusion_matrix)
num_samples = np.sum(confusion_matrix, dtype=np.int32)
weights = (np.arange(0, num_samples) % 5) / 5.0
# Calculated by v0.19: sklearn.metrics.cohen_kappa_score(
# labels, predictions, sample_weight=weights)
expect = 0.453466583385
with self.test_session() as sess:
predictions = constant_op.constant(predictions, dtype=dtypes_lib.float32)
labels = constant_op.constant(labels)
kappa, update_op = metrics.cohen_kappa(labels, predictions, 4,
weights=weights)
sess.run(variables.local_variables_initializer())
self.assertAlmostEqual(expect, sess.run(update_op), 5)
self.assertAlmostEqual(expect, kappa.eval(), 5)
def testWithMultipleUpdates(self):
confusion_matrix = np.array([
[90, 30, 10, 20],
[40, 80, 20, 30],
[20, 10, 60, 35],
[15, 25, 30, 25]])
labels, predictions = self._confusion_matrix_to_samples(confusion_matrix)
num_samples = np.sum(confusion_matrix, dtype=np.int32)
weights = (np.arange(0, num_samples) % 5) / 5.0
num_classes = confusion_matrix.shape[0]
batch_size = num_samples // 10
predictions_t = array_ops.placeholder(dtypes_lib.float32,
shape=(batch_size,))
labels_t = array_ops.placeholder(dtypes_lib.int32,
shape=(batch_size,))
weights_t = array_ops.placeholder(dtypes_lib.float32,
shape=(batch_size,))
kappa, update_op = metrics.cohen_kappa(
labels_t, predictions_t, num_classes, weights=weights_t)
with self.test_session() as sess:
sess.run(variables.local_variables_initializer())
for idx in range(0, num_samples, batch_size):
batch_start, batch_end = idx, idx + batch_size
sess.run(update_op,
feed_dict={labels_t: labels[batch_start:batch_end],
predictions_t: predictions[batch_start:batch_end],
weights_t: weights[batch_start:batch_end]})
# Calculated by v0.19: sklearn.metrics.cohen_kappa_score(
# labels_np, predictions_np, sample_weight=weights_np)
expect = 0.289965397924
self.assertAlmostEqual(expect, kappa.eval(), 5)
def testInvalidNumClasses(self):
predictions = array_ops.placeholder(dtypes_lib.float32, shape=(4, 1))
labels = array_ops.placeholder(dtypes_lib.int32, shape=(4, 1))
with self.assertRaisesRegexp(ValueError, 'num_classes'):
metrics.cohen_kappa(labels, predictions, 1)
def testInvalidDimension(self):
predictions = array_ops.placeholder(dtypes_lib.float32, shape=(4, 1))
invalid_labels = array_ops.placeholder(dtypes_lib.int32, shape=(4, 2))
with self.assertRaises(ValueError):
metrics.cohen_kappa(invalid_labels, predictions, 3)
invalid_predictions = array_ops.placeholder(dtypes_lib.float32, shape=(4, 2))
labels = array_ops.placeholder(dtypes_lib.int32, shape=(4, 1))
with self.assertRaises(ValueError):
metrics.cohen_kappa(labels, invalid_predictions, 3)
if __name__ == '__main__':
test.main()
| apache-2.0 |
xiaoxq/apollo | modules/tools/control_info/control_info.py | 2 | 15239 | #!/usr/bin/env python3
###############################################################################
# Copyright 2017 The Apollo Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
###############################################################################
"""
Control Planning Analyzer
"""
import argparse
import math
import sys
import threading
import time
import matplotlib
import matplotlib.pyplot as plt
import numpy
import tkinter.filedialog
from matplotlib import patches
from matplotlib import lines
from cyber.python.cyber_py3 import cyber
from modules.localization.proto import localization_pb2
from modules.canbus.proto import chassis_pb2
from modules.planning.proto import planning_pb2
from modules.control.proto import control_cmd_pb2
class ControlInfo(object):
"""
ControlInfo Class
"""
def __init__(self, axarr):
self.throttlecmd = []
self.throttlefbk = []
self.brakecmd = []
self.brakefbk = []
self.steercmd = []
self.steerfbk = []
self.speed = []
self.curvature = []
self.imuright = []
self.imuforward = []
self.imuup = []
self.controltime = []
self.planningtime = []
self.localizationtime = []
self.canbustime = []
self.acceleration_lookup = []
self.speed_lookup = []
self.acc_open = []
self.acc_close = []
self.station_error = []
self.speed_error = []
self.heading_error = []
self.lateral_error = []
self.heading_error_rate = []
self.lateral_error_rate = []
self.target_speed = []
self.target_curvature = []
self.target_acceleration = []
self.target_heading = []
self.target_time = []
self.driving_mode = 0
self.mode_time = []
self.ax = axarr
self.planningavailable = False
self.lock = threading.Lock()
def callback_planning(self, entity):
"""
New Planning Trajectory
"""
basetime = entity.header.timestamp_sec
numpoints = len(entity.trajectory_point)
with self.lock:
self.pointx = numpy.zeros(numpoints)
self.pointy = numpy.zeros(numpoints)
self.pointspeed = numpy.zeros(numpoints)
self.pointtime = numpy.zeros(numpoints)
self.pointtheta = numpy.zeros(numpoints)
self.pointcurvature = numpy.zeros(numpoints)
self.pointacceleration = numpy.zeros(numpoints)
for idx in range(numpoints):
self.pointx[idx] = entity.trajectory_point[idx].path_point.x
self.pointy[idx] = entity.trajectory_point[idx].path_point.y
self.pointspeed[idx] = entity.trajectory_point[idx].v
self.pointtheta[idx] = entity.trajectory_point[
idx].path_point.theta
self.pointcurvature[idx] = entity.trajectory_point[
idx].path_point.kappa
self.pointacceleration[idx] = entity.trajectory_point[
idx].a
self.pointtime[
idx] = entity.trajectory_point[idx].relative_time + basetime
if numpoints == 0:
self.planningavailable = False
else:
self.planningavailable = True
def callback_canbus(self, entity):
"""
New Canbus
"""
self.throttlefbk.append(entity.throttle_percentage)
self.brakefbk.append(entity.brake_percentage)
self.steerfbk.append(entity.steering_percentage)
self.speed.append(entity.speed_mps)
self.canbustime.append(entity.header.timestamp_sec)
if entity.driving_mode == chassis_pb2.Chassis.COMPLETE_AUTO_DRIVE:
if self.driving_mode == 0:
self.mode_time.append(entity.header.timestamp_sec)
self.driving_mode = 1
elif self.driving_mode == 1:
self.mode_time.append(entity.header.timestamp_sec)
self.driving_mode = 0
def callback_localization(self, entity):
"""
New Localization
"""
self.imuright.append(entity.pose.linear_acceleration_vrf.x)
self.imuforward.append(entity.pose.linear_acceleration_vrf.y)
self.imuup.append(entity.pose.linear_acceleration_vrf.z)
self.localizationtime.append(entity.header.timestamp_sec)
def callback_control(self, entity):
"""
New Control Command
"""
self.throttlecmd.append(entity.throttle)
self.brakecmd.append(entity.brake)
self.steercmd.append(entity.steering_target)
self.controltime.append(entity.header.timestamp_sec)
self.acceleration_lookup.append(
entity.debug.simple_lon_debug.acceleration_lookup)
self.speed_lookup.append(entity.debug.simple_lon_debug.speed_lookup)
self.acc_open.append(
entity.debug.simple_lon_debug.preview_acceleration_reference)
self.acc_close.append(
entity.debug.simple_lon_debug.acceleration_cmd_closeloop)
self.station_error.append(entity.debug.simple_lon_debug.station_error)
self.speed_error.append(entity.debug.simple_lon_debug.speed_error)
self.curvature.append(entity.debug.simple_lat_debug.curvature)
self.heading_error.append(entity.debug.simple_lat_debug.heading_error)
self.lateral_error.append(entity.debug.simple_lat_debug.lateral_error)
self.heading_error_rate.append(
entity.debug.simple_lat_debug.heading_error_rate)
self.lateral_error_rate.append(
entity.debug.simple_lat_debug.lateral_error_rate)
with self.lock:
if self.planningavailable:
self.target_speed.append(
numpy.interp(entity.header.timestamp_sec, self.pointtime,
self.pointspeed))
self.target_curvature.append(
numpy.interp(entity.header.timestamp_sec, self.pointtime,
self.pointcurvature))
self.target_acceleration.append(
numpy.interp(entity.header.timestamp_sec, self.pointtime,
self.pointacceleration))
self.target_heading.append(
numpy.interp(entity.header.timestamp_sec, self.pointtime,
self.pointtheta))
self.target_time.append(entity.header.timestamp_sec)
def longitudinal(self):
"""
Showing Longitudinal
"""
for loc, ax in numpy.ndenumerate(self.ax):
ax.clear()
self.ax[0, 0].plot(
self.canbustime, self.throttlefbk, label='Throttle Feedback')
self.ax[0, 0].plot(
self.controltime, self.throttlecmd, label='Throttle Command')
self.ax[0, 0].plot(
self.canbustime, self.brakefbk, label='Brake Feedback')
self.ax[0, 0].plot(
self.controltime, self.brakecmd, label='Brake Command')
self.ax[0, 0].legend(fontsize='medium')
self.ax[0, 0].grid(True)
self.ax[0, 0].set_title('Throttle Brake Info')
self.ax[0, 0].set_xlabel('Time')
self.ax[0, 1].plot(
self.speed_lookup, self.acceleration_lookup, label='Table Lookup')
self.ax[0, 1].plot(
self.target_speed, self.target_acceleration, label='Target')
self.ax[0, 1].legend(fontsize='medium')
self.ax[0, 1].grid(True)
self.ax[0, 1].set_title('Calibration Lookup')
self.ax[0, 1].set_xlabel('Speed')
self.ax[0, 1].set_ylabel('Acceleration')
self.ax[1, 0].plot(self.canbustime, self.speed, label='Vehicle Speed')
self.ax[1, 0].plot(
self.target_time, self.target_speed, label='Target Speed')
self.ax[1, 0].plot(
self.target_time, self.target_acceleration, label='Target Acc')
self.ax[1, 0].plot(
self.localizationtime, self.imuforward, label='IMU Forward')
self.ax[1, 0].legend(fontsize='medium')
self.ax[1, 0].grid(True)
self.ax[1, 0].set_title('Speed Info')
self.ax[1, 0].set_xlabel('Time')
self.ax[1, 1].plot(
self.controltime, self.acceleration_lookup, label='Lookup Acc')
self.ax[1, 1].plot(self.controltime, self.acc_open, label='Acc Open')
self.ax[1, 1].plot(self.controltime, self.acc_close, label='Acc Close')
self.ax[1, 1].plot(
self.controltime, self.station_error, label='station_error')
self.ax[1, 1].plot(
self.controltime, self.speed_error, label='speed_error')
self.ax[1, 1].legend(fontsize='medium')
self.ax[1, 1].grid(True)
self.ax[1, 1].set_title('IMU Info')
self.ax[1, 1].set_xlabel('Time')
if len(self.mode_time) % 2 == 1:
self.mode_time.append(self.controltime[-1])
for i in range(0, len(self.mode_time), 2):
self.ax[0, 0].axvspan(
self.mode_time[i], self.mode_time[i + 1], fc='0.1', alpha=0.1)
self.ax[1, 0].axvspan(
self.mode_time[i], self.mode_time[i + 1], fc='0.1', alpha=0.1)
self.ax[1, 1].axvspan(
self.mode_time[i], self.mode_time[i + 1], fc='0.1', alpha=0.1)
plt.draw()
def lateral(self):
"""
Plot everything in time domain
"""
print("Showing Lateral")
for loc, ax in numpy.ndenumerate(self.ax):
ax.clear()
self.ax[0, 0].plot(
self.canbustime, self.steerfbk, label='Steering Feedback')
self.ax[0, 0].plot(
self.controltime, self.steercmd, label='Steering Command')
self.ax[0, 0].plot(self.controltime, self.curvature, label='Curvature')
self.ax[0, 0].legend(fontsize='medium')
self.ax[0, 0].grid(True)
self.ax[0, 0].set_title('Steering Info')
self.ax[0, 0].set_xlabel('Time')
"""
self.ax[0, 1].legend(fontsize = 'medium')
self.ax[0, 1].grid(True)
self.ax[0, 1].set_title('Calibration Lookup')
self.ax[0, 1].set_xlabel('Speed')
self.ax[0, 1].set_ylabel('Acceleration')
"""
self.ax[1, 0].plot(
self.controltime, self.heading_error, label='heading_error')
self.ax[1, 0].plot(
self.controltime, self.lateral_error, label='lateral_error')
self.ax[1, 0].legend(fontsize='medium')
self.ax[1, 0].grid(True)
self.ax[1, 0].set_title('Error Info')
self.ax[1, 0].set_xlabel('Time')
self.ax[1, 1].plot(
self.controltime,
self.heading_error_rate,
label='heading_error_rate')
self.ax[1, 1].plot(
self.controltime,
self.lateral_error_rate,
label='lateral_error_rate')
self.ax[1, 1].legend(fontsize='medium')
self.ax[1, 1].grid(True)
self.ax[1, 1].set_title('IMU Info')
self.ax[1, 1].set_xlabel('Time')
if len(self.mode_time) % 2 == 1:
self.mode_time.append(self.controltime[-1])
for i in range(0, len(self.mode_time), 2):
self.ax[0, 0].axvspan(
self.mode_time[i], self.mode_time[i + 1], fc='0.1', alpha=0.1)
self.ax[1, 0].axvspan(
self.mode_time[i], self.mode_time[i + 1], fc='0.1', alpha=0.1)
self.ax[1, 1].axvspan(
self.mode_time[i], self.mode_time[i + 1], fc='0.1', alpha=0.1)
plt.draw()
def press(self, event):
"""
Keyboard events during plotting
"""
if event.key == 'q' or event.key == 'Q':
plt.close('all')
if event.key == 'a' or event.key == 'A':
self.longitutidinal()
if event.key == 'z' or event.key == 'Z':
self.lateral()
if __name__ == "__main__":
from cyber.python.cyber_py3.record import RecordReader
parser = argparse.ArgumentParser(
description='Process and analyze control and planning data')
parser.add_argument('--bag', type=str, help='use Rosbag')
args = parser.parse_args()
fig, axarr = plt.subplots(2, 2)
plt.tight_layout()
axarr[0, 0].get_shared_x_axes().join(axarr[0, 0], axarr[1, 0])
axarr[1, 1].get_shared_x_axes().join(axarr[0, 0], axarr[1, 1])
controlinfo = ControlInfo(axarr)
if args.bag:
file_path = args.bag
# bag = rosbag.Bag(file_path)
reader = RecordReader(file_path)
for msg in reader.read_messages():
print(msg.timestamp, msg.topic)
if msg.topic == "/apollo/localization/pose":
localization = localization_pb2.LocalizationEstimate()
localization.ParseFromString(msg.message)
controlinfo.callback_localization(localization)
elif msg.topic == "/apollo/planning":
adc_trajectory = planning_pb2.ADCTrajectory()
adc_trajectory.ParseFromString(msg.message)
controlinfo.callback_planning(adc_trajectory)
elif msg.topic == "/apollo/control":
control_cmd = control_cmd_pb2.ControlCommand()
control_cmd.ParseFromString(msg.message)
controlinfo.callback_control(control_cmd)
elif msg.topic == "/apollo/canbus/chassis":
chassis = chassis_pb2.Chassis()
chassis.ParseFromString(msg.message)
controlinfo.callback_canbus(chassis)
print("Done reading the file")
else:
cyber.init()
# rospy.init_node('control_info', anonymous=True)
node = cyber.Node("rtk_recorder")
planningsub = node.create_reader('/apollo/planning',
planning_pb2.ADCTrajectory,
controlinfo.callback_planning)
localizationsub = node.create_reader(
'/apollo/localization/pose', localization_pb2.LocalizationEstimate,
controlinfo.callback_localization)
controlsub = node.create_reader('/apollo/control',
control_cmd_pb2.ControlCommand,
controlinfo.callback_control)
canbussub = node.create_reader('/apollo/canbus/chassis',
chassis_pb2.Chassis,
controlinfo.callback_canbus)
input("Press Enter To Stop")
mng = plt.get_current_fig_manager()
controlinfo.longitudinal()
fig.canvas.mpl_connect('key_press_event', controlinfo.press)
plt.show()
| apache-2.0 |
e-mission/e-mission-server | emission/tests/analysisTests/intakeTests/TestFilterAccuracy.py | 1 | 8477 | from __future__ import unicode_literals
from __future__ import print_function
from __future__ import division
from __future__ import absolute_import
# Standard imports
from future import standard_library
standard_library.install_aliases()
from builtins import *
import unittest
import datetime as pydt
import logging
import pymongo
import json
import bson.json_util as bju
import pandas as pd
from uuid import UUID
import os
# Our imports
import emission.core.get_database as edb
import emission.core.wrapper.pipelinestate as ecwp
import emission.analysis.intake.cleaning.filter_accuracy as eaicf
import emission.storage.timeseries.abstract_timeseries as esta
import emission.storage.pipeline_queries as epq
import emission.tests.common as etc
class TestFilterAccuracy(unittest.TestCase):
def setUp(self):
# We need to access the database directly sometimes in order to
# forcibly insert entries for the tests to pass. But we put the import
# in here to reduce the temptation to use the database directly elsewhere.
import emission.core.get_database as edb
import uuid
self.analysis_conf_path = \
etc.set_analysis_config("intake.cleaning.filter_accuracy.enable", True)
self.testUUID = None
def tearDown(self):
import emission.core.get_database as edb
edb.get_timeseries_db().delete_many({"user_id": self.testUUID})
edb.get_pipeline_state_db().delete_many({"user_id": self.testUUID})
os.remove(self.analysis_conf_path)
def checkSuccessfulRun(self):
pipelineState = edb.get_pipeline_state_db().find_one({"user_id": self.testUUID,
"pipeline_stage": ecwp.PipelineStages.ACCURACY_FILTERING.value})
self.assertIsNotNone(pipelineState["last_ts_run"])
def testEmptyCallToPriorDuplicate(self):
dataFile = "emission/tests/data/smoothing_data/tablet_2015-11-03"
etc.setupRealExample(self, dataFile)
self.ts = esta.TimeSeries.get_time_series(self.testUUID)
time_query = epq.get_time_range_for_accuracy_filtering(self.testUUID)
unfiltered_points_df = self.ts.get_data_df("background/location", time_query)
self.assertEqual(len(unfiltered_points_df), 205)
# Check call to check duplicate with a zero length dataframe
entry = unfiltered_points_df.iloc[5]
self.assertEqual(eaicf.check_prior_duplicate(pd.DataFrame(), 0, entry), False)
def testEmptyCall(self):
dataFile = "emission/tests/data/smoothing_data/tablet_2015-11-03"
etc.setupRealExample(self, dataFile)
self.ts = esta.TimeSeries.get_time_series(self.testUUID)
# Check call to the entire filter accuracy with a zero length timeseries
import emission.core.get_database as edb
edb.get_timeseries_db().delete_many({"user_id": self.testUUID})
# We expect that this should not throw
eaicf.filter_accuracy(self.testUUID)
self.assertEqual(len(self.ts.get_data_df("background/location")), 0)
self.checkSuccessfulRun()
def testCheckPriorDuplicate(self):
dataFile = "emission/tests/data/smoothing_data/tablet_2015-11-03"
etc.setupRealExample(self, dataFile)
self.ts = esta.TimeSeries.get_time_series(self.testUUID)
time_query = epq.get_time_range_for_accuracy_filtering(self.testUUID)
unfiltered_points_df = self.ts.get_data_df("background/location", time_query)
self.assertEqual(len(unfiltered_points_df), 205)
entry = unfiltered_points_df.iloc[5]
unfiltered_appended_df = pd.DataFrame([entry] * 5).append(unfiltered_points_df).reset_index()
logging.debug("unfiltered_appended_df = %s" % unfiltered_appended_df[["fmt_time"]].head())
self.assertEqual(eaicf.check_prior_duplicate(unfiltered_appended_df, 0, entry), False)
self.assertEqual(eaicf.check_prior_duplicate(unfiltered_appended_df, 5, entry), True)
self.assertEqual(eaicf.check_prior_duplicate(unfiltered_points_df, 5, entry), False)
def testConvertToFiltered(self):
dataFile = "emission/tests/data/smoothing_data/tablet_2015-11-03"
etc.setupRealExample(self, dataFile)
self.ts = esta.TimeSeries.get_time_series(self.testUUID)
time_query = epq.get_time_range_for_accuracy_filtering(self.testUUID)
unfiltered_points_df = self.ts.get_data_df("background/location", time_query)
self.assertEqual(len(unfiltered_points_df), 205)
entry_from_df = unfiltered_points_df.iloc[5]
entry_copy = eaicf.convert_to_filtered(self.ts.get_entry_at_ts("background/location",
"metadata.write_ts",
entry_from_df.metadata_write_ts))
self.assertNotIn("_id", entry_copy)
self.assertEqual(entry_copy["metadata"]["key"], "background/filtered_location")
def testExistingFilteredLocation(self):
dataFile = "emission/tests/data/smoothing_data/tablet_2015-11-03"
etc.setupRealExample(self, dataFile)
self.ts = esta.TimeSeries.get_time_series(self.testUUID)
time_query = epq.get_time_range_for_accuracy_filtering(self.testUUID)
unfiltered_points_df = self.ts.get_data_df("background/location", time_query)
self.assertEqual(len(unfiltered_points_df), 205)
entry_from_df = unfiltered_points_df.iloc[5]
logging.debug("entry_from_df: data.ts = %s, metadata.ts = %s" %
(entry_from_df.ts, entry_from_df.metadata_write_ts))
self.assertEqual(eaicf.check_existing_filtered_location(self.ts, entry_from_df), False)
entry_copy = self.ts.get_entry_at_ts("background/location", "metadata.write_ts",
entry_from_df.metadata_write_ts)
self.ts.insert(eaicf.convert_to_filtered(entry_copy))
self.assertEqual(eaicf.check_existing_filtered_location(self.ts, entry_from_df), True)
def testFilterAccuracy(self):
dataFile = "emission/tests/data/smoothing_data/tablet_2015-11-03"
etc.setupRealExample(self, dataFile)
self.ts = esta.TimeSeries.get_time_series(self.testUUID)
unfiltered_points_df = self.ts.get_data_df("background/location", None)
self.assertEqual(len(unfiltered_points_df), 205)
pre_filtered_points_df = self.ts.get_data_df("background/filtered_location", None)
self.assertEqual(len(pre_filtered_points_df), 0)
eaicf.filter_accuracy(self.testUUID)
filtered_points_df = self.ts.get_data_df("background/filtered_location", None)
self.assertEqual(len(filtered_points_df), 124)
self.checkSuccessfulRun()
def testFilterAccuracyWithPartialFiltered(self):
dataFile = "emission/tests/data/real_examples/shankari_2016-independence_day"
etc.setupRealExample(self, dataFile)
self.ts = esta.TimeSeries.get_time_series(self.testUUID)
unfiltered_points_df = self.ts.get_data_df("background/location", None)
self.assertEqual(len(unfiltered_points_df), 801)
pre_filtered_points_df = self.ts.get_data_df("background/filtered_location", None)
self.assertEqual(len(pre_filtered_points_df), 703)
cutoff_ts = pre_filtered_points_df.iloc[200].ts
del_result = edb.get_timeseries_db().delete_many({
"user_id": self.testUUID,
"metadata.key": "background/filtered_location",
"data.ts": {"$gte": cutoff_ts}
})
self.assertEqual(del_result.raw_result["n"], 503)
post_cutoff_points_df = self.ts.get_data_df("background/filtered_location", None)
self.assertEqual(len(post_cutoff_points_df), 200)
eaicf.filter_accuracy(self.testUUID)
filtered_points_df = self.ts.get_data_df("background/filtered_location", None)
self.assertEqual(len(filtered_points_df), 703)
self.checkSuccessfulRun()
def testPandasMergeBehavior(self):
import pandas as pd
df_a = pd.DataFrame({"ts": [1,2,3,4]})
df_b = pd.DataFrame({"ts": [1,3]})
merged_left_idx = df_a.merge(df_b, on="ts", how="inner", left_index=True)
merged_right_idx = df_a.merge(df_b, on="ts", how="inner", right_index=True)
self.assertEqual(merged_left_idx.index.to_list(), [0,1])
self.assertEqual(merged_right_idx.index.to_list(), [0,2])
if __name__ == '__main__':
etc.configLogging()
unittest.main()
| bsd-3-clause |
kiliakis/BLonD | __TEST_CASES/main_files/TC10_Fixed_frequency.py | 1 | 3581 | import numpy as np
from input_parameters.preprocess import *
from input_parameters.general_parameters import *
import sys
from decimal import Decimal
import matplotlib.pyplot as plt
from beams.beams import *
from input_parameters.rf_parameters import *
from plots.plot_beams import *
from plots.plot_impedance import *
from plots.plot_slices import *
from plots.plot import *
from plots.plot_parameters import *
from beams.slices import *
from monitors.monitors import *
from trackers.tracker import *
import time
from matplotlib.animation import ArtistAnimation
from beams.distributions import *
from llrf.phase_loop import *
# Beam parameters
particle_type = 'proton'
n_macroparticles = 100000
n_particles = 0
# Machine and RF parameters
radius = 25 # [m]
gamma_transition = 4.076750841 # [1]
alpha = 1 / gamma_transition**2 # [1]
C = 2*np.pi*radius # [m]
n_turns = 10000
general_params = GeneralParameters(n_turns, C, alpha, 310891054.809,
particle_type)
# Cavities parameters
n_rf_systems = 1
harmonic_numbers_1 = 1 # [1]
voltage_1 = 8000 # [V]
phi_offset_1 = 0 # [rad]
rf_params = RFSectionParameters(general_params, n_rf_systems, harmonic_numbers_1, voltage_1, phi_offset_1, omega_rf = 1.00001*2.*np.pi/general_params.t_rev[0])
my_beam = Beam(general_params, n_macroparticles, n_particles)
slices_ring = Slices(rf_params, my_beam, 200, cut_left = -0.9e-6, cut_right = 0.9e-6)
#Phase loop
configuration = {'machine': 'PSB', 'PL_gain': 0., 'RL_gain': [0.,0.], 'PL_period': 10.e-6, 'RL_period': 7}
phase_loop = PhaseLoop(general_params, rf_params, slices_ring, configuration)
#Long tracker
long_tracker = RingAndRFSection(rf_params, my_beam, periodicity = 'Off', PhaseLoop = phase_loop)
full_ring = FullRingAndRF([long_tracker])
distribution_options = {'type': 'gaussian', 'bunch_length': 200.e-9, 'density_variable': 'density_from_J'}
matched_from_distribution_density(my_beam, full_ring, distribution_options)
slices_ring.track()
long_tracker = RingAndRFSection(rf_params, my_beam, periodicity = 'Off', PhaseLoop = phase_loop)
#Monitor
bunch_monitor = BunchMonitor(general_params, rf_params, my_beam, '../output_files/TC10_output_data',
Slices = slices_ring, PhaseLoop = phase_loop)
#Plots
format_options = {'dirname': '../output_files/TC10_fig'}
plots = Plot(general_params, rf_params, my_beam, 1000, 10000, -0.9e-6, 0.9e-6, -1.e6, 1.e6,
separatrix_plot= True, Slices = slices_ring, format_options = format_options, h5file = '../output_files/TC10_output_data', PhaseLoop = phase_loop)
# Accelerator map
map_ = [long_tracker] + [slices_ring] + [bunch_monitor] + [plots]
#phase_loop.reference += 0.00001
for i in range(1, n_turns+1):
t0 = time.clock()
for m in map_:
m.track()
slices_ring.track_cuts()
#print time.clock()-t0
if (i % 100 == 0):
print "Time step %d" %i
print " Radial error %.4e" %(phase_loop.drho)
print " Radial error, accum %.4e" %(phase_loop.drho_int)
print " Radial loop frequency correction %.4e 1/s" %(phase_loop.domega_RF)
print " RF phase %.4f rad" %(rf_params.phi_RF[0,i])
print " RF frequency %.6e 1/s" %(rf_params.omega_RF[0,i])
print " Tracker phase %.4f rad" %(long_tracker.phi_RF[0,i])
print " Tracker frequency %.6e 1/s" %(long_tracker.omega_RF[0,i])
print 'DONE'
| gpl-3.0 |
FRED-2/Fred2-Apps | src/epitopeselection.py | 2 | 10401 | #!/usr/bin/env python
"""
Command line tool for epitope selection
usage: epitopeselection.py [-h] -i INPUT -a ALLELES [-k K] [-t THRESHOLD] -o
OUTPUT [-s SOLVER] [-c_al CONS_ALLELE]
[-c_a CONS_ANTIGEN] [-c_c CONS_CONSERVATION]
[-c CONSERVATION]
Epitope Selection for vaccine design.
optional arguments:
-h, --help show this help message and exit
-i INPUT, --input INPUT
Peptide with immunogenicity file (from
epitopeprediction)
-a ALLELES, --alleles ALLELES
Allele file with frequencies (one allele and frequency
per line)
-k K, --k K Specifies the number of epitopes to select
-t THRESHOLD, --threshold THRESHOLD
Specifies the binding threshold for all alleles
-o OUTPUT, --output OUTPUT
Specifies the output path. Results will be written to
CSV
-s SOLVER, --solver SOLVER
Specifies the ILP solver
-c_al CONS_ALLELE, --cons_allele CONS_ALLELE
Activates allele coverage constraint with specified
threshold
-c_a CONS_ANTIGEN, --cons_antigen CONS_ANTIGEN
Activates antigen coverage constraint with specified
threshold
-c_c CONS_CONSERVATION, --cons_conservation CONS_CONSERVATION
Activates conservation constraint with specified
threshold
-c CONSERVATION, --conservation CONSERVATION
Specifies a Conservation file. First column is the
peptide seq second column the conservation.
"""
import sys
import pandas
import collections
import argparse
from Fred2.EpitopeSelection.OptiTope import OptiTope
from Fred2.Core import Allele, Peptide, Protein, EpitopePredictionResult
def generate_epitope_result(input, allele_file):
"""
generates EpitopePredictionResult from output of epitopeprediction and neoepitopeprediction
"""
#first generate alleles in allele file
alleles = {}
with open(allele_file, "r") as af:
for l in af:
allele, freq = l.split("\t")
alleles[allele] = Allele(allele, prob=float(freq))
r_raw = pandas.read_csv(input, sep="\t")
res_dic = {}
method = r_raw.loc[0, "Method"]
columns = set(["Sequence", "Method", "Antigen ID", "Variant"])
alleles_raw = [c for c in r_raw.columns if c not in columns]
for k, row in r_raw.iterrows():
seq = row["Sequence"]
protPos = collections.defaultdict(list)
try:
protPos = {Protein(p, gene_id=p, transcript_id=p): [0] for p in str(row["Antigen ID"]).split(",")}
except KeyError:
pass
pep = Peptide(seq, protein_pos=protPos)
for a in alleles_raw:
if a in alleles:
if alleles[a] not in res_dic:
res_dic[alleles[a]] = {}
res_dic[alleles[a]][pep] = float(row[a])
if not res_dic:
sys.stderr.write("HLA alleles of population and HLA used for prediction did not overlap.")
sys.exit(-1)
df_result = EpitopePredictionResult.from_dict(res_dic)
df_result.index = pandas.MultiIndex.from_tuples([tuple((i, method)) for i in df_result.index],
names=['Seq', 'Method'])
return df_result, method
def to_csv(out_file, result, instance, pred_method):
"""
Writes model to CSV
"""
with open(out_file, "w") as f:
f.write("#Prediction method: " + pred_method + "\n#\n")
cons = ["#Maximum number of epitopes to select = " + str(int(instance.k.value)) + "\n"]
if float(instance.t_c.value) > 0:
cons.append("#Epitope conservation >= " + str(float(instance.t_c.value) * 100) + "%\n")
if float(instance.t_allele.value) > 0:
cons.append("#Covered alleles >= " + str(int(instance.t_allele.value)) + "\n")
if float(instance.t_var.value) > 0:
cons.append("#Covered antigens >= " + str(int(instance.t_var.value)) + "\n")
f.write("#CONSTRAINTS\n" + "".join(cons) + "#\n")
res = ["#Selected epitopes\t" + str(len(result)) + ""]
if int(instance.t_var.value) > 0:
cov_anti = []
for an in instance.Q:
for e in result:
if e in instance.E_var[an].value:
cov_anti.append(an)
cov_anti = set(cov_anti)
res.append("#Covered antigens\t" + str(len(cov_anti)) + " of " + str(len(instance.Q)) + "")
cov_als = []
res_set = set(result)
locus = {}
for a in instance.A:
eps_of_all_i = list(instance.A_I[a])
if res_set.intersection(set(eps_of_all_i)):
cov_als.append(a)
locus.setdefault(str(a).split("*")[0], set()).add(a)
cov_als = set(cov_als)
res.append("#Covered alleles\t" + str(len(cov_als)) + " of " + str(len(instance.A)) + "")
res.append("#Locus coverage:")
pop_cov = 1
for k, g in locus.iteritems():
locus = list(g)
pop_cov *= (1.0 - sum(float(instance.p[a]) for a in locus)) ** 2
covered = len(locus) / float(sum(1 for a in instance.A if a.split("*")[0] == k))
res.append("#\t%s\t%.2f" % (k, covered * 100))
res.append("#Population coverage:\t\t%.2f" % ((1.0 - pop_cov) * 100))
f.write("#RESULTS\n" + "\n".join(res) + "\n")
is_antigen_cons = int(instance.t_var.value) > 0
header = "Epitope\tConservation\tFraction of overall immunogenicity\tCovered alleles%s\n" % (
"\tCovered antigens" if is_antigen_cons else "")
rows = []
overall_imm = sum(float(instance.i[e, a]) * float(instance.p[a]) for e in result for a in instance.A)
for e in result:
row = str(e) + "\t"
if float(instance.t_c.value) > 0:
row += str(float(instance.c[e].value) * 100) + "\t"
else:
row += "100%\t"
row += "%0.2f\t" % (sum(float(instance.i[e, a]) * float(instance.p[a]) for a in instance.A) / overall_imm)
row += "%s" % " ".join(str(a) for a in instance.A if e in instance.A_I[a])
if is_antigen_cons:
row += "\t%s" % " ".join(str(q) for q in instance.Q if e in instance.E_var[q])
rows.append(row)
f.write(header + "\n".join(rows) + "\n\n")
def main():
'''
some input stuff
'''
parser = argparse.ArgumentParser(
description="Epitope Selection for vaccine design.",
)
parser.add_argument("-i","--input",
required=True,
type=str,
help="Peptide with immunogenicity file (from epitopeprediction)",
)
parser.add_argument("-a","--alleles",
required=True,
type=str,
help="Allele file with frequencies (one allele and frequency per line)",
)
parser.add_argument("-k","--k",
required=False,
type=int,
default=10,
help="Specifies the number of epitopes to select",
)
parser.add_argument("-t", "--threshold",
type=float,
default=0.,
help="Specifies the binding threshold for all alleles",
)
parser.add_argument("-o", "--output",
required=True,
type=str,
help="Specifies the output path. Results will be written to CSV",
)
parser.add_argument("-s","--solver",
type=str,
default="cbc",
help="Specifies the ILP solver")
parser.add_argument("-c_al", "--cons_allele",
required=False,
type=float,
default=0.0,
help="Activates allele coverage constraint with specified threshold",
)
parser.add_argument("-c_a", "--cons_antigen",
required=False,
type=float,
default=0.0,
help="Activates antigen coverage constraint with specified threshold",
)
c_c = parser.add_argument("-c_c", "--cons_conservation",
required=False,
type=float,
help="Activates conservation constraint with specified threshold",
)
parser.add_argument("-c", "--conservation",
required=False,
type=str,
help="Specifies a Conservation file. First column is the peptide seq second column the conservation.",
)
args = parser.parse_args()
epitopePrediciton, method = generate_epitope_result(args.input, args.alleles)
thresh = {a.name: float(args.threshold) for a in epitopePrediciton.columns}
opti = OptiTope(epitopePrediciton, threshold=thresh, k=int(args.k), solver=args.solver, verbosity=0)
# set constraints
if args.cons_allele > 0:
#print "allele constraint enforced"
opti.activate_allele_coverage_const(float(args.cons_allele) / 100.0)
if args.cons_antigen > 0:
opti.activate_antigen_coverage_const(float(args.cons_antigen) / 100.0)
if args.cons_conservation > 0:
if args.conservation:
conservation = {}
with open(args.conservation, "r") as f:
for l in f:
if l != "":
seq, cons = l.replace(",", " ").replace(";", " ").split()
conservation[seq.strip().upper()] = float(cons.strip())
opti.activate_epitope_conservation_const(float(args.cons_conservation)/100.0, conservation=conservation)
else:
opti.activate_epitope_conservation_const(float(args.cons_conservation)/100.0)
try:
result = opti.solve(options={"threads": 1})
to_csv(args.output, result, opti.instance, method)
return 0
except ValueError as e:
sys.stderr.write("Could not optimally solve the problem. Please modify your constraints.\n"+str(e))
return -1
except Exception as e:
sys.stderr.write(str(e))
return -1
if __name__ == "__main__":
sys.exit(main()) | bsd-3-clause |
matmodlab/matmodlab2 | tests/test_materials_elastic.py | 1 | 13297 | import os
import glob
import pytest
import random
import numpy as np
from matmodlab2 import *
from testing_utils import *
this_d = os.path.dirname(os.path.realpath(__file__))
K = 9.980040E+09
G = 3.750938E+09
E = 9. * K * G / (3. * K + G)
Nu = (3.0 * K - 2.0 * G) / (2.0 * (3.0 * K + G))
parameters = {'K': K, 'G': G, 'E': E, 'Nu': Nu}
@pytest.mark.pandas
@pytest.mark.elastic
@pytest.mark.material
def test_elastic_consistency():
"""Test the elastic and plastic materials for equivalence"""
environ.SQA = True
E = 10.
Nu = .1
G = E / 2. / (1. + Nu)
K = E / 3. / (1. - 2. * Nu)
jobid = 'Job-El'
mps_el = MaterialPointSimulator(jobid)
material = ElasticMaterial(E=E, Nu=Nu)
mps_el.assign_material(material)
mps_el.run_step('E'*6, [1,0,0,0,0,0], scale=.1, frames=1)
mps_el.run_step('S'*6, [0,0,0,0,0,0], frames=5)
df_el = mps_el.df
jobid = 'Job-Pl'
mps_pl = MaterialPointSimulator(jobid)
material = PlasticMaterial(K=K, G=G)
mps_pl.assign_material(material)
mps_pl.run_step('E'*6, [1,0,0,0,0,0], scale=.1, frames=1)
mps_pl.run_step('S'*6, [0,0,0,0,0,0], frames=5)
df_pl = mps_pl.df
for key in ('S.XX', 'S.YY', 'S.ZZ', 'E.XX', 'E.YY', 'E.ZZ'):
assert np.allclose(df_el[key], df_pl[key])
@pytest.mark.elastic
@pytest.mark.material
def test_uniaxial_strain():
pathtable = [[1.0, 0.0, 0.0],
[2.0, 0.0, 0.0],
[1.0, 0.0, 0.0],
[0.0, 0.0, 0.0]]
mps = MaterialPointSimulator('elastic_unistrain')
material = ElasticMaterial(**parameters)
mps.assign_material(material)
for c in pathtable:
mps.run_step('E', c, scale=-0.5)
H = K + 4. / 3. * G
Q = K - 2. / 3. * G
a = mps.get2('E.XX', 'S.XX', 'S.YY', 'S.ZZ')
eps_xx = mps.data[:,4]
assert np.allclose(a[:,2], a[:,3])
assert np.allclose(a[:,1], H * a[:,0])
assert np.allclose(a[:,2], Q * a[:,0])
assert np.allclose(eps_xx, a[:,0])
@pytest.mark.elastic
@pytest.mark.material
def test_uniaxial_stress():
pathtable = [[1.0, 0.0, 0.0],
[2.0, 0.0, 0.0],
[1.0, 0.0, 0.0],
[0.0, 0.0, 0.0]]
mps = MaterialPointSimulator('elastic_unistress')
material = ElasticMaterial(**parameters)
mps.assign_material(material)
for c in pathtable:
mps.run_step('SSS', c, frames=50, scale=-1.e6)
a = mps.get2('E.XX', 'S.XX', 'S.YY', 'S.ZZ')
assert np.allclose(a[:,2], 0)
assert np.allclose(a[:,3], 0)
diff = (a[:,1] - parameters['E'] * a[:,0]) / parameters['E']
assert max(abs(diff)) < 1e-10
@pytest.mark.elastic
@pytest.mark.material
def test_uniaxial_strain_with_stress_control():
pathtable = [[ -7490645504., -3739707392., -3739707392.],
[-14981291008., -7479414784., -7479414784.],
[ -7490645504., -3739707392., -3739707392.],
[ 0., 0., 0.]]
mps = MaterialPointSimulator('elastic_unistrain_stressc')
material = ElasticMaterial(**parameters)
mps.assign_material(material)
for c in pathtable:
mps.run_step('SSS', c, frames=250)
a = mps.get2('E.XX', 'E.YY', 'E.ZZ', 'S.XX')
assert np.allclose(a[:,1], 0)
assert np.allclose(a[:,2], 0)
H = K + 4. / 3. * G
diff = (a[:,3] - H * a[:,0]) / H
assert max(abs(diff)) < 1e-7
@pytest.mark.elastic
@pytest.mark.material
@pytest.mark.parametrize('realization', range(1,4))
def test_random_linear_elastic(realization):
difftol = 5.e-08
failtol = 1.e-07
myvars = ('Time',
'E.XX', 'E.YY', 'E.ZZ', 'E.XY', 'E.YZ', 'E.XZ',
'S.XX', 'S.YY', 'S.ZZ', 'S.XY', 'S.YZ', 'S.XZ')
jobid = 'rand_linear_elastic_{0}'.format(realization)
mps = MaterialPointSimulator(jobid)
NU, E, K, G, LAM = gen_rand_elast_params()
material = ElasticMaterial(E=E, Nu=NU)
mps.assign_material(material)
analytic = gen_analytical_response(LAM, G)
for (i, row) in enumerate(analytic[1:], start=1):
incr = analytic[i, 0] - analytic[i-1, 0]
mps.run_step('E', row[1:7], increment=incr, frames=10)
simulation = mps.get2(*myvars)
assert responses_are_same(jobid, analytic, simulation, myvars)
@pytest.mark.pandas
@pytest.mark.elastic
@pytest.mark.material
@pytest.mark.analytic
def test_supreme():
''' This test is 'supreme' because it compares the following values
against the analytical solution:
* Stress
* Strain
* Deformation gradient
* Symmetric part of the velocity gradient
This is meant to be a static test for linear elasticity. It's primary
purpose is to be THE benchmark for linear elasticity as it checks each
component of stress/strain as well as exercises key parts of the
driver (like how it computes inputs).
For uniaxial strain:
| a 0 0 | | exp(a) 0 0 |
e = | 0 0 0 | U = | 0 1 0 |
| 0 0 0 | | 0 0 1 |
-1 | 1/exp(a) 0 0 | dU da | exp(a) 0 0 |
U = | 0 1 0 | -- = -- | 0 0 0 |
| 0 0 1 | dt dt | 0 0 0 |
da | 1 0 0 |
D = L = -- | 0 0 0 |
dt | 0 0 0 |
For pure shear
| 0 a 0 | 1 | exp(2a)+1 exp(2a)-1 0 | | 0 0 0 |
e = | a 0 0 | U = - exp(-a) | exp(2a)-1 exp(2a)+1 0 | + | 0 0 0 |
| 0 0 0 | 2 | 0 0 0 | | 0 0 1 |
-1 1 | exp(-a) + exp(a) exp(-a) - exp(a) 0 |
U = - | exp(-a) - exp(a) exp(-a) + exp(a) 0 |
2 | 0 0 2 |
dU da / | exp(a) exp(a) 0 | \
-- = -- | | exp(a) exp(a) 0 | - U |
dt dt \ | 0 0 1 | /
da | 0 1 0 |
D = L = -- | 1 0 0 |
dt | 0 0 0 |
'''
difftol = 5.e-08
failtol = 1.e-07
jobid = 'supreme_linear_elastic'
mps = MaterialPointSimulator(jobid)
N = 25
solfile = os.path.join(this_d, 'data', mps.jobid + '.base_dat')
path, LAM, G, tablepath = generate_solution(solfile, N)
# set up the material
K = LAM + 2.0 * G / 3.0
E = 9. * K * G / (3. * K + G)
Nu = (3.0 * K - 2.0 * G) / (2.0 * (3.0 * K + G))
params = {'E': E, 'Nu': Nu}
material = ElasticMaterial(**params)
mps.assign_material(material)
for row in tablepath:
mps.run_step('E', row, increment=1.0, frames=N)
# check output with analytic (all shared variables)
assert same_as_baseline(mps.jobid, mps.df)
def get_D_E_F_SIG(dadt, a, LAM, G, loc):
# This is just an implementation of the above derivations.
#
# 'dadt' is the current time derivative of the strain
# 'a' is the strain at the end of the step
# 'LAM' and 'G' are the lame and shear modulii
# 'loc' is the index for what's wanted (0,1) for xy
if loc[0] == loc[1]:
# axial
E = np.zeros((3,3))
E[loc] = a
F = np.eye(3)
F[loc] = np.exp(a)
D = np.zeros((3,3))
D[loc] = dadt
SIG = LAM * a * np.eye(3)
SIG[loc] = (LAM + 2.0 * G) * a
else:
# shear
l0, l1 = loc
E = np.zeros((3,3))
E[l0, l1] = a
E[l1, l0] = a
fac = np.exp(-a) / 2.0
F = np.eye(3)
F[l0,l0] = fac * (np.exp(2.0 * a) + 1.0)
F[l1,l1] = fac * (np.exp(2.0 * a) + 1.0)
F[l0,l1] = fac * (np.exp(2.0 * a) - 1.0)
F[l1,l0] = fac * (np.exp(2.0 * a) - 1.0)
D = np.zeros((3,3))
D[l0,l1] = dadt
D[l1,l0] = dadt
SIG = np.zeros((3,3))
SIG[l0,l1] = 2.0 * G * a
SIG[l1,l0] = 2.0 * G * a
return D, E, F, SIG
def generate_solution(solfile, N):
# solfile = filename to write analytical solution to
# N = number of steps per leg
a = 0.1 # total strain increment for each leg
LAM = 1.0e9 # Lame modulus
G = 1.0e9 # Shear modulus
T = [0.0] # time
E = [np.zeros((3,3))] # strain
SIG = [np.zeros((3,3))] # stress
F = [np.eye(3)] # deformation gradient
D = [np.zeros((3,3))] # symmetric part of velocity gradient
#
# Generate the analytical solution
#
# strains: xx yy zz xy xz yz
for loc in [(0,0), (1,1), (2,2), (0,1), (0,2), (1,2)]:
t0 = T[-1]
tf = t0 + 1.0
for idx in range(1, N+1):
fac = float(idx) / float(N)
ret = get_D_E_F_SIG(a, fac * a, LAM, G, loc)
T.append(t0 + fac)
D.append(ret[0])
E.append(ret[1])
F.append(ret[2])
SIG.append(ret[3])
for idx in range(1, N+1):
fac = float(idx) / float(N)
ret = get_D_E_F_SIG(-a, (1.0 - fac) * a, LAM, G, loc)
T.append(t0 + 1.0 + fac)
D.append(ret[0])
E.append(ret[1])
F.append(ret[2])
SIG.append(ret[3])
#
# Write the output
#
headers = ['Time',
'E.XX', 'E.YY', 'E.ZZ', 'E.XY', 'E.YZ', 'E.XZ',
'S.XX', 'S.YY', 'S.ZZ', 'S.XY', 'S.YZ', 'S.XZ',
'F.XX', 'F.XY', 'F.XZ',
'F.YX', 'F.YY', 'F.YZ',
'F.ZX', 'F.ZY', 'F.ZZ',
'D.XX', 'D.YY', 'D.ZZ', 'D.XY', 'D.YZ', 'D.XZ']
symlist = lambda x: [x[0,0], x[1,1], x[2,2], x[0,1], x[1,2], x[0,2]]
matlist = lambda x: list(np.reshape(x, 9))
fmtstr = lambda x: '{0:>25s}'.format(x)
fmtflt = lambda x: '{0:25.15e}'.format(x)
with open(solfile, 'w') as FOUT:
FOUT.write(''.join(map(fmtstr, headers)) + '\n')
for idx in range(0, len(T)):
vals = ([T[idx]] +
symlist(E[idx]) +
symlist(SIG[idx]) +
matlist(F[idx]) +
symlist(D[idx]))
FOUT.write(''.join(map(fmtflt, vals)) + '\n')
#
# Pass the relevant data so the sim can run
#
# inputs xx yy zz xy yz xz
path = '''
0 0 222222 0.0 0.0 0.0 0.0 0.0 0.0
1 1 222222 {0} 0.0 0.0 0.0 0.0 0.0
2 1 222222 0.0 0.0 0.0 0.0 0.0 0.0
3 1 222222 0.0 {0} 0.0 0.0 0.0 0.0
4 1 222222 0.0 0.0 0.0 0.0 0.0 0.0
5 1 222222 0.0 0.0 {0} 0.0 0.0 0.0
6 1 222222 0.0 0.0 0.0 0.0 0.0 0.0
7 1 222222 0.0 0.0 0.0 {0} 0.0 0.0
8 1 222222 0.0 0.0 0.0 0.0 0.0 0.0
9 1 222222 0.0 0.0 0.0 0.0 0.0 {0}
10 1 222222 0.0 0.0 0.0 0.0 0.0 0.0
11 1 222222 0.0 0.0 0.0 0.0 {0} 0.0
12 1 222222 0.0 0.0 0.0 0.0 0.0 0.0
'''.format('{0:.1f}'.format(a))
tablepath = (( a, 0.0, 0.0, 0.0, 0.0, 0.0),
(0.0, 0.0, 0.0, 0.0, 0.0, 0.0),
(0.0, a, 0.0, 0.0, 0.0, 0.0),
(0.0, 0.0, 0.0, 0.0, 0.0, 0.0),
(0.0, 0.0, a, 0.0, 0.0, 0.0),
(0.0, 0.0, 0.0, 0.0, 0.0, 0.0),
(0.0, 0.0, 0.0, a, 0.0, 0.0),
(0.0, 0.0, 0.0, 0.0, 0.0, 0.0),
(0.0, 0.0, 0.0, 0.0, 0.0, a),
(0.0, 0.0, 0.0, 0.0, 0.0, 0.0),
(0.0, 0.0, 0.0, 0.0, a, 0.0),
(0.0, 0.0, 0.0, 0.0, 0.0, 0.0))
return path, LAM, G, tablepath
def get_stress(e11, e22, e33, e12, e23, e13, LAM, G):
#standard hooke's law
sig11 = (2.0 * G + LAM) * e11 + LAM * (e22 + e33)
sig22 = (2.0 * G + LAM) * e22 + LAM * (e11 + e33)
sig33 = (2.0 * G + LAM) * e33 + LAM * (e11 + e22)
sig12 = 2.0 * G * e12
sig23 = 2.0 * G * e23
sig13 = 2.0 * G * e13
return sig11, sig22, sig33, sig12, sig23, sig13
def gen_rand_elast_params():
# poisson_ratio and young's modulus
nu = random.uniform(-1.0 + 1.0e-5, 0.5 - 1.0e-5)
E = max(1.0, 10 ** random.uniform(0.0, 12.0))
# K and G are used for parameterization
K = E / (3.0 * (1.0 - 2.0 * nu))
G = E / (2.0 * (1.0 + nu))
# LAM is used for computation
LAM = E * nu / ((1.0 + nu) * (1.0 - 2.0 * nu))
return nu, E, K, G, LAM
def const_elast_params():
K = 9.980040E+09
G = 3.750938E+09
LAM = K - 2.0 / 3.0 * G
E = 9.0 * K * G / (3.0 * K + G)
NU = (3.0 * K - 2.0 * G) / (2.0 * (3.0 * K + G))
return NU, E, K, G, LAM
def gen_analytical_response(LAM, G, nlegs=4, test_type="PRINCIPAL"):
stiff = (LAM * np.outer(np.array([1,1,1,0,0,0]), np.array([1,1,1,0,0,0])) +
2.0 * G * np.identity(6))
rnd = lambda: random.uniform(-0.01, 0.01)
table = [np.zeros(1 + 6 + 6)]
for idx in range(1, nlegs):
if test_type == "FULL":
strains = np.array([rnd(), rnd(), rnd(), rnd(), rnd(), rnd()])
elif test_type == "PRINCIPAL":
strains = np.array([rnd(), rnd(), rnd(), 0.0, 0.0, 0.0])
elif test_type == "UNIAXIAL":
strains = np.array([rnd(), 0.0, 0.0, 0.0, 0.0, 0.0])
elif test_type == "BIAXIAL":
tmp = rnd()
strains = np.array([tmp, tmp, 0.0, 0.0, 0.0, 0.0])
table.append(np.hstack(([idx], strains, np.dot(stiff, strains))))
# returns a tablewith each row comprised of
# time=table[0], strains=table[1:7], stresses=table[7:]
return np.array(table)
| bsd-3-clause |
jhogsett/linkit | python/kelvin_to_rgb.py | 1 | 3202 | """
Based on: http://www.tannerhelland.com/4435/convert-temperature-rgb-algorithm-code/
Comments resceived: https://gist.github.com/petrklus/b1f427accdf7438606a6
Original pseudo code:
Set Temperature = Temperature \ 100
Calculate Red:
If Temperature <= 66 Then
Red = 255
Else
Red = Temperature - 60
Red = 329.698727446 * (Red ^ -0.1332047592)
If Red < 0 Then Red = 0
If Red > 255 Then Red = 255
End If
Calculate Green:
If Temperature <= 66 Then
Green = Temperature
Green = 99.4708025861 * Ln(Green) - 161.1195681661
If Green < 0 Then Green = 0
If Green > 255 Then Green = 255
Else
Green = Temperature - 60
Green = 288.1221695283 * (Green ^ -0.0755148492)
If Green < 0 Then Green = 0
If Green > 255 Then Green = 255
End If
Calculate Blue:
If Temperature >= 66 Then
Blue = 255
Else
If Temperature <= 19 Then
Blue = 0
Else
Blue = Temperature - 10
Blue = 138.5177312231 * Ln(Blue) - 305.0447927307
If Blue < 0 Then Blue = 0
If Blue > 255 Then Blue = 255
End If
End If
"""
import math
def convert_K_to_RGB(colour_temperature):
"""
Converts from K to RGB, algorithm courtesy of
http://www.tannerhelland.com/4435/convert-temperature-rgb-algorithm-code/
"""
#range check
if colour_temperature < 1000:
colour_temperature = 1000
elif colour_temperature > 40000:
colour_temperature = 40000
tmp_internal = colour_temperature / 100.0
# red
if tmp_internal <= 66:
red = 255
else:
tmp_red = 329.698727446 * math.pow(tmp_internal - 60, -0.1332047592)
if tmp_red < 0:
red = 0
elif tmp_red > 255:
red = 255
else:
red = tmp_red
# green
if tmp_internal <=66:
tmp_green = 99.4708025861 * math.log(tmp_internal) - 161.1195681661
if tmp_green < 0:
green = 0
elif tmp_green > 255:
green = 255
else:
green = tmp_green
else:
tmp_green = 288.1221695283 * math.pow(tmp_internal - 60, -0.0755148492)
if tmp_green < 0:
green = 0
elif tmp_green > 255:
green = 255
else:
green = tmp_green
# blue
if tmp_internal >=66:
blue = 255
elif tmp_internal <= 19:
blue = 0
else:
tmp_blue = 138.5177312231 * math.log(tmp_internal - 10) - 305.0447927307
if tmp_blue < 0:
blue = 0
elif tmp_blue > 255:
blue = 255
else:
blue = tmp_blue
return red, green, blue
if __name__ == "__main__":
print("Preview requires matplotlib")
from matplotlib import pyplot as plt
step_size = 100
for i in range(0, 15000, step_size):
color = list(map(lambda div: div/255.0, convert_K_to_RGB(i))) + [1]
print(color)
plt.plot((i, i), (0, 1), linewidth=step_size/2.0, linestyle="-", color=color)
plt.show()
| mit |
wmvanvliet/mne-python | tutorials/source-modeling/plot_visualize_stc.py | 1 | 8263 | """
.. _tut-viz-stcs:
Visualize source time courses (stcs)
====================================
This tutorial focuses on visualization of
:term:`source estimates<STC>`.
Surface Source Estimates
------------------------
First, we get the paths for the evoked data and the time courses (stcs).
"""
import os
import os.path as op
import numpy as np
import matplotlib.pyplot as plt
import mne
from mne.datasets import sample
from mne.minimum_norm import apply_inverse, read_inverse_operator
from mne import read_evokeds
data_path = sample.data_path()
sample_dir = os.path.join(data_path, 'MEG', 'sample')
subjects_dir = os.path.join(data_path, 'subjects')
fname_evoked = data_path + '/MEG/sample/sample_audvis-ave.fif'
fname_stc = os.path.join(sample_dir, 'sample_audvis-meg')
###############################################################################
# Then, we read the stc from file
stc = mne.read_source_estimate(fname_stc, subject='sample')
###############################################################################
# This is a :class:`SourceEstimate <mne.SourceEstimate>` object
print(stc)
###############################################################################
# The SourceEstimate object is in fact a *surface* source estimate. MNE also
# supports volume-based source estimates but more on that later.
#
# We can plot the source estimate using the
# :func:`stc.plot <mne.SourceEstimate.plot>` just as in other MNE
# objects. Note that for this visualization to work, you must have ``mayavi``
# and ``pysurfer`` installed on your machine.
initial_time = 0.1
brain = stc.plot(subjects_dir=subjects_dir, initial_time=initial_time,
clim=dict(kind='value', lims=[3, 6, 9]))
###############################################################################
# You can also morph it to fsaverage and visualize it using a flatmap
# sphinx_gallery_thumbnail_number = 3
stc_fs = mne.compute_source_morph(stc, 'sample', 'fsaverage', subjects_dir,
smooth=5, verbose='error').apply(stc)
brain = stc_fs.plot(subjects_dir=subjects_dir, initial_time=initial_time,
clim=dict(kind='value', lims=[3, 6, 9]),
surface='flat', hemi='split', size=(1000, 500),
smoothing_steps=5, time_viewer=False,
add_data_kwargs=dict(
colorbar_kwargs=dict(label_font_size=10)))
# You can save a movie like the one on our documentation website with:
# brain.save_movie(time_dilation=20, tmin=0.05, tmax=0.16,
# interpolation='linear', framerate=10)
###############################################################################
# Note that here we used ``initial_time=0.1``, but we can also browse through
# time using ``time_viewer=True``.
#
# In case ``mayavi`` is not available, we also offer a ``matplotlib``
# backend. Here we use verbose='error' to ignore a warning that not all
# vertices were used in plotting.
mpl_fig = stc.plot(subjects_dir=subjects_dir, initial_time=initial_time,
backend='matplotlib', verbose='error')
###############################################################################
#
# Volume Source Estimates
# -----------------------
# We can also visualize volume source estimates (used for deep structures).
#
# Let us load the sensor-level evoked data. We select the MEG channels
# to keep things simple.
evoked = read_evokeds(fname_evoked, condition=0, baseline=(None, 0))
evoked.pick_types(meg=True, eeg=False).crop(0.05, 0.15)
# this risks aliasing, but these data are very smooth
evoked.decimate(10, verbose='error')
###############################################################################
# Then, we can load the precomputed inverse operator from a file.
fname_inv = data_path + '/MEG/sample/sample_audvis-meg-vol-7-meg-inv.fif'
inv = read_inverse_operator(fname_inv)
src = inv['src']
mri_head_t = inv['mri_head_t']
###############################################################################
# The source estimate is computed using the inverse operator and the
# sensor-space data.
snr = 3.0
lambda2 = 1.0 / snr ** 2
method = "dSPM" # use dSPM method (could also be MNE or sLORETA)
stc = apply_inverse(evoked, inv, lambda2, method)
del inv
###############################################################################
# This time, we have a different container
# (:class:`VolSourceEstimate <mne.VolSourceEstimate>`) for the source time
# course.
print(stc)
###############################################################################
# This too comes with a convenient plot method.
stc.plot(src, subject='sample', subjects_dir=subjects_dir)
###############################################################################
# For this visualization, ``nilearn`` must be installed.
# This visualization is interactive. Click on any of the anatomical slices
# to explore the time series. Clicking on any time point will bring up the
# corresponding anatomical map.
#
# We could visualize the source estimate on a glass brain. Unlike the previous
# visualization, a glass brain does not show us one slice but what we would
# see if the brain was transparent like glass, and
# :term:`maximum intensity projection`) is used:
stc.plot(src, subject='sample', subjects_dir=subjects_dir, mode='glass_brain')
###############################################################################
# You can also extract label time courses using volumetric atlases. Here we'll
# use the built-in ``aparc.a2009s+aseg.mgz``:
fname_aseg = op.join(subjects_dir, 'sample', 'mri', 'aparc.a2009s+aseg.mgz')
label_names = mne.get_volume_labels_from_aseg(fname_aseg)
label_tc = stc.extract_label_time_course(fname_aseg, src=src)
lidx, tidx = np.unravel_index(np.argmax(label_tc), label_tc.shape)
fig, ax = plt.subplots(1)
ax.plot(stc.times, label_tc.T, 'k', lw=1., alpha=0.5)
xy = np.array([stc.times[tidx], label_tc[lidx, tidx]])
xytext = xy + [0.01, 1]
ax.annotate(
label_names[lidx], xy, xytext, arrowprops=dict(arrowstyle='->'), color='r')
ax.set(xlim=stc.times[[0, -1]], xlabel='Time (s)', ylabel='Activation')
for key in ('right', 'top'):
ax.spines[key].set_visible(False)
fig.tight_layout()
###############################################################################
# And we can project these label time courses back to their original
# locations and see how the plot has been smoothed:
stc_back = mne.labels_to_stc(fname_aseg, label_tc, src=src)
stc_back.plot(src, subjects_dir=subjects_dir, mode='glass_brain')
###############################################################################
# Vector Source Estimates
# -----------------------
# If we choose to use ``pick_ori='vector'`` in
# :func:`apply_inverse <mne.minimum_norm.apply_inverse>`
fname_inv = data_path + '/MEG/sample/sample_audvis-meg-oct-6-meg-inv.fif'
inv = read_inverse_operator(fname_inv)
stc = apply_inverse(evoked, inv, lambda2, 'dSPM', pick_ori='vector')
brain = stc.plot(subject='sample', subjects_dir=subjects_dir,
initial_time=initial_time, brain_kwargs=dict(
silhouette=True))
###############################################################################
# Dipole fits
# -----------
# For computing a dipole fit, we need to load the noise covariance, the BEM
# solution, and the coregistration transformation files. Note that for the
# other methods, these were already used to generate the inverse operator.
fname_cov = os.path.join(data_path, 'MEG', 'sample', 'sample_audvis-cov.fif')
fname_bem = os.path.join(subjects_dir, 'sample', 'bem',
'sample-5120-bem-sol.fif')
fname_trans = os.path.join(data_path, 'MEG', 'sample',
'sample_audvis_raw-trans.fif')
##############################################################################
# Dipoles are fit independently for each time point, so let us crop our time
# series to visualize the dipole fit for the time point of interest.
evoked.crop(0.1, 0.1)
dip = mne.fit_dipole(evoked, fname_cov, fname_bem, fname_trans)[0]
##############################################################################
# Finally, we can visualize the dipole.
dip.plot_locations(fname_trans, 'sample', subjects_dir)
| bsd-3-clause |
hrashk/sympy | sympy/plotting/plot_implicit.py | 7 | 13680 | """Implicit plotting module for SymPy
The module implements a data series called ImplicitSeries which is used by
``Plot`` class to plot implicit plots for different backends. The module,
by default, implements plotting using interval arithmetic. It switches to a
fall back algorithm if the expression cannot be plotted used interval
interval arithmetic. It is also possible to specify to use the fall back
algorithm for all plots.
Boolean combinations of expressions cannot be plotted by the fall back
algorithm.
See Also
========
sympy.plotting.plot
References
==========
- Jeffrey Allen Tupper. Reliable Two-Dimensional Graphing Methods for
Mathematical Formulae with Two Free Variables.
- Jeffrey Allen Tupper. Graphing Equations with Generalized Interval
Arithmetic. Master's thesis. University of Toronto, 1996
"""
from __future__ import print_function, division
from .plot import BaseSeries, Plot
from .experimental_lambdify import experimental_lambdify, vectorized_lambdify
from .intervalmath import interval
from sympy.core.relational import (Equality, GreaterThan, LessThan,
Relational, StrictLessThan, StrictGreaterThan)
from sympy import Eq, Tuple, sympify, Dummy
from sympy.external import import_module
from sympy.logic.boolalg import BooleanFunction
from sympy.utilities.decorator import doctest_depends_on
import warnings
class ImplicitSeries(BaseSeries):
""" Representation for Implicit plot """
is_implicit = True
def __init__(self, expr, var_start_end_x, var_start_end_y,
has_equality, use_interval_math, depth, nb_of_points):
super(ImplicitSeries, self).__init__()
self.expr = sympify(expr)
self.var_x = sympify(var_start_end_x[0])
self.start_x = float(var_start_end_x[1])
self.end_x = float(var_start_end_x[2])
self.var_y = sympify(var_start_end_y[0])
self.start_y = float(var_start_end_y[1])
self.end_y = float(var_start_end_y[2])
self.get_points = self.get_raster
self.has_equality = has_equality # If the expression has equality, i.e.
#Eq, Greaterthan, LessThan.
self.nb_of_points = nb_of_points
self.use_interval_math = use_interval_math
self.depth = 4 + depth
def __str__(self):
return ('Implicit equation: %s for '
'%s over %s and %s over %s') % (
str(self.expr),
str(self.var_x),
str((self.start_x, self.end_x)),
str(self.var_y),
str((self.start_y, self.end_y)))
def get_raster(self):
func = experimental_lambdify((self.var_x, self.var_y), self.expr,
use_interval=True)
xinterval = interval(self.start_x, self.end_x)
yinterval = interval(self.start_y, self.end_y)
try:
temp = func(xinterval, yinterval)
except AttributeError:
if self.use_interval_math:
warnings.warn("Adaptive meshing could not be applied to the"
" expression. Using uniform meshing.")
self.use_interval_math = False
if self.use_interval_math:
return self._get_raster_interval(func)
else:
return self._get_meshes_grid()
def _get_raster_interval(self, func):
""" Uses interval math to adaptively mesh and obtain the plot"""
k = self.depth
interval_list = []
#Create initial 32 divisions
np = import_module('numpy')
xsample = np.linspace(self.start_x, self.end_x, 33)
ysample = np.linspace(self.start_y, self.end_y, 33)
#Add a small jitter so that there are no false positives for equality.
# Ex: y==x becomes True for x interval(1, 2) and y interval(1, 2)
#which will draw a rectangle.
jitterx = (np.random.rand(
len(xsample)) * 2 - 1) * (self.end_x - self.start_x) / 2**20
jittery = (np.random.rand(
len(ysample)) * 2 - 1) * (self.end_y - self.start_y) / 2**20
xsample += jitterx
ysample += jittery
xinter = [interval(x1, x2) for x1, x2 in zip(xsample[:-1],
xsample[1:])]
yinter = [interval(y1, y2) for y1, y2 in zip(ysample[:-1],
ysample[1:])]
interval_list = [[x, y] for x in xinter for y in yinter]
plot_list = []
#recursive call refinepixels which subdivides the intervals which are
#neither True nor False according to the expression.
def refine_pixels(interval_list):
""" Evaluates the intervals and subdivides the interval if the
expression is partially satisfied."""
temp_interval_list = []
plot_list = []
for intervals in interval_list:
#Convert the array indices to x and y values
intervalx = intervals[0]
intervaly = intervals[1]
func_eval = func(intervalx, intervaly)
#The expression is valid in the interval. Change the contour
#array values to 1.
if func_eval[1] is False or func_eval[0] is False:
pass
elif func_eval == (True, True):
plot_list.append([intervalx, intervaly])
elif func_eval[1] is None or func_eval[0] is None:
#Subdivide
avgx = intervalx.mid
avgy = intervaly.mid
a = interval(intervalx.start, avgx)
b = interval(avgx, intervalx.end)
c = interval(intervaly.start, avgy)
d = interval(avgy, intervaly.end)
temp_interval_list.append([a, c])
temp_interval_list.append([a, d])
temp_interval_list.append([b, c])
temp_interval_list.append([b, d])
return temp_interval_list, plot_list
while k >= 0 and len(interval_list):
interval_list, plot_list_temp = refine_pixels(interval_list)
plot_list.extend(plot_list_temp)
k = k - 1
#Check whether the expression represents an equality
#If it represents an equality, then none of the intervals
#would have satisfied the expression due to floating point
#differences. Add all the undecided values to the plot.
if self.has_equality:
for intervals in interval_list:
intervalx = intervals[0]
intervaly = intervals[1]
func_eval = func(intervalx, intervaly)
if func_eval[1] and func_eval[0] is not False:
plot_list.append([intervalx, intervaly])
return plot_list, 'fill'
def _get_meshes_grid(self):
"""Generates the mesh for generating a contour.
In the case of equality, ``contour`` function of matplotlib can
be used. In other cases, matplotlib's ``contourf`` is used.
"""
equal = False
if isinstance(self.expr, Equality):
expr = self.expr.lhs - self.expr.rhs
equal = True
elif isinstance(self.expr, (GreaterThan, StrictGreaterThan)):
expr = self.expr.lhs - self.expr.rhs
elif isinstance(self.expr, (LessThan, StrictLessThan)):
expr = self.expr.rhs - self.expr.lhs
else:
raise NotImplementedError("The expression is not supported for "
"plotting in uniform meshed plot.")
np = import_module('numpy')
xarray = np.linspace(self.start_x, self.end_x, self.nb_of_points)
yarray = np.linspace(self.start_y, self.end_y, self.nb_of_points)
x_grid, y_grid = np.meshgrid(xarray, yarray)
func = vectorized_lambdify((self.var_x, self.var_y), expr)
z_grid = func(x_grid, y_grid)
z_grid[np.ma.where(z_grid < 0)] = -1
z_grid[np.ma.where(z_grid > 0)] = 1
if equal:
return xarray, yarray, z_grid, 'contour'
else:
return xarray, yarray, z_grid, 'contourf'
@doctest_depends_on(modules=('matplotlib',))
def plot_implicit(expr, *args, **kwargs):
"""A plot function to plot implicit equations / inequalities.
Arguments
=========
- ``expr`` : The equation / inequality that is to be plotted.
- ``(x, xmin, xmax)`` optional, 3-tuple denoting the range of symbol
``x``
- ``(y, ymin, ymax)`` optional, 3-tuple denoting the range of symbol
``y``
The following arguments can be passed as named parameters.
- ``adaptive``. Boolean. The default value is set to True. It has to be
set to False if you want to use a mesh grid.
- ``depth`` integer. The depth of recursion for adaptive mesh grid.
Default value is 0. Takes value in the range (0, 4).
- ``points`` integer. The number of points if adaptive mesh grid is not
used. Default value is 200.
- ``title`` string .The title for the plot.
- ``xlabel`` string. The label for the x - axis
- ``ylabel`` string. The label for the y - axis
plot_implicit, by default, uses interval arithmetic to plot functions. If
the expression cannot be plotted using interval arithmetic, it defaults to
a generating a contour using a mesh grid of fixed number of points. By
setting adaptive to False, you can force plot_implicit to use the mesh
grid. The mesh grid method can be effective when adaptive plotting using
interval arithmetic, fails to plot with small line width.
Examples:
=========
Plot expressions:
>>> from sympy import plot_implicit, cos, sin, symbols, Eq, And
>>> x, y = symbols('x y')
Without any ranges for the symbols in the expression
>>> p1 = plot_implicit(Eq(x**2 + y**2, 5))
With the range for the symbols
>>> p2 = plot_implicit(Eq(x**2 + y**2, 3),
... (x, -3, 3), (y, -3, 3))
With depth of recursion as argument.
>>> p3 = plot_implicit(Eq(x**2 + y**2, 5),
... (x, -4, 4), (y, -4, 4), depth = 2)
Using mesh grid and not using adaptive meshing.
>>> p4 = plot_implicit(Eq(x**2 + y**2, 5),
... (x, -5, 5), (y, -2, 2), adaptive=False)
Using mesh grid with number of points as input.
>>> p5 = plot_implicit(Eq(x**2 + y**2, 5),
... (x, -5, 5), (y, -2, 2),
... adaptive=False, points=400)
Plotting regions.
>>> p6 = plot_implicit(y > x**2)
Plotting Using boolean conjunctions.
>>> p7 = plot_implicit(And(y > x, y > -x))
"""
has_equality = False # Represents whether the expression contains an Equality,
#GreaterThan or LessThan
def arg_expand(bool_expr):
"""
Recursively expands the arguments of an Boolean Function
"""
for arg in bool_expr.args:
if isinstance(arg, BooleanFunction):
arg_expand(arg)
elif isinstance(arg, Relational):
arg_list.append(arg)
arg_list = []
if isinstance(expr, BooleanFunction):
arg_expand(expr)
#Check whether there is an equality in the expression provided.
if any(isinstance(e, (Equality, GreaterThan, LessThan))
for e in arg_list):
has_equality = True
elif not isinstance(expr, Relational):
expr = Eq(expr, 0)
has_equality = True
elif isinstance(expr, (Equality, GreaterThan, LessThan)):
has_equality = True
free_symbols = set(expr.free_symbols)
range_symbols = set([t[0] for t in args])
symbols = set.union(free_symbols, range_symbols)
if len(symbols) > 2:
raise NotImplementedError("Implicit plotting is not implemented for "
"more than 2 variables")
#Create default ranges if the range is not provided.
default_range = Tuple(-5, 5)
if len(args) == 2:
var_start_end_x = args[0]
var_start_end_y = args[1]
elif len(args) == 1:
if len(free_symbols) == 2:
var_start_end_x = args[0]
var_start_end_y, = (Tuple(e) + default_range
for e in (free_symbols - range_symbols))
else:
var_start_end_x, = (Tuple(e) + default_range for e in free_symbols)
#Create a random symbol
var_start_end_y = Tuple(Dummy()) + default_range
elif len(args) == 0:
if len(free_symbols) == 1:
var_start_end_x, = (Tuple(e) + default_range for e in free_symbols)
#create a random symbol
var_start_end_y = Tuple(Dummy()) + default_range
else:
var_start_end_x, var_start_end_y = (Tuple(e) + default_range
for e in free_symbols)
use_interval = kwargs.pop('adaptive', True)
nb_of_points = kwargs.pop('points', 300)
depth = kwargs.pop('depth', 0)
#Check whether the depth is greater than 4 or less than 0.
if depth > 4:
depth = 4
elif depth < 0:
depth = 0
series_argument = ImplicitSeries(expr, var_start_end_x, var_start_end_y,
has_equality, use_interval, depth,
nb_of_points)
show = kwargs.pop('show', True)
#set the x and y limits
kwargs['xlim'] = tuple(float(x) for x in var_start_end_x[1:])
kwargs['ylim'] = tuple(float(y) for y in var_start_end_y[1:])
p = Plot(series_argument, **kwargs)
if show:
p.show()
return p
| bsd-3-clause |
Acehaidrey/incubator-airflow | airflow/providers/apache/hive/hooks/hive.py | 2 | 41178 | #
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import contextlib
import os
import re
import socket
import subprocess
import time
from collections import OrderedDict
from tempfile import NamedTemporaryFile, TemporaryDirectory
from typing import Any, Dict, List, Optional, Text, Union
import pandas
import unicodecsv as csv
from airflow.configuration import conf
from airflow.exceptions import AirflowException
from airflow.hooks.base_hook import BaseHook
from airflow.hooks.dbapi_hook import DbApiHook
from airflow.security import utils
from airflow.utils.helpers import as_flattened_list
from airflow.utils.operator_helpers import AIRFLOW_VAR_NAME_FORMAT_MAPPING
HIVE_QUEUE_PRIORITIES = ['VERY_HIGH', 'HIGH', 'NORMAL', 'LOW', 'VERY_LOW']
def get_context_from_env_var() -> Dict[Any, Any]:
"""
Extract context from env variable, e.g. dag_id, task_id and execution_date,
so that they can be used inside BashOperator and PythonOperator.
:return: The context of interest.
"""
return {
format_map['default']: os.environ.get(format_map['env_var_format'], '')
for format_map in AIRFLOW_VAR_NAME_FORMAT_MAPPING.values()
}
class HiveCliHook(BaseHook):
"""Simple wrapper around the hive CLI.
It also supports the ``beeline``
a lighter CLI that runs JDBC and is replacing the heavier
traditional CLI. To enable ``beeline``, set the use_beeline param in the
extra field of your connection as in ``{ "use_beeline": true }``
Note that you can also set default hive CLI parameters using the
``hive_cli_params`` to be used in your connection as in
``{"hive_cli_params": "-hiveconf mapred.job.tracker=some.jobtracker:444"}``
Parameters passed here can be overridden by run_cli's hive_conf param
The extra connection parameter ``auth`` gets passed as in the ``jdbc``
connection string as is.
:param mapred_queue: queue used by the Hadoop Scheduler (Capacity or Fair)
:type mapred_queue: str
:param mapred_queue_priority: priority within the job queue.
Possible settings include: VERY_HIGH, HIGH, NORMAL, LOW, VERY_LOW
:type mapred_queue_priority: str
:param mapred_job_name: This name will appear in the jobtracker.
This can make monitoring easier.
:type mapred_job_name: str
"""
def __init__(
self,
hive_cli_conn_id: str = "hive_cli_default",
run_as: Optional[str] = None,
mapred_queue: Optional[str] = None,
mapred_queue_priority: Optional[str] = None,
mapred_job_name: Optional[str] = None,
) -> None:
super().__init__()
conn = self.get_connection(hive_cli_conn_id)
self.hive_cli_params: str = conn.extra_dejson.get('hive_cli_params', '')
self.use_beeline: bool = conn.extra_dejson.get('use_beeline', False)
self.auth = conn.extra_dejson.get('auth', 'noSasl')
self.conn = conn
self.run_as = run_as
self.sub_process: Any = None
if mapred_queue_priority:
mapred_queue_priority = mapred_queue_priority.upper()
if mapred_queue_priority not in HIVE_QUEUE_PRIORITIES:
raise AirflowException(
"Invalid Mapred Queue Priority. Valid values are: "
"{}".format(', '.join(HIVE_QUEUE_PRIORITIES))
)
self.mapred_queue = mapred_queue or conf.get('hive', 'default_hive_mapred_queue')
self.mapred_queue_priority = mapred_queue_priority
self.mapred_job_name = mapred_job_name
def _get_proxy_user(self) -> str:
"""This function set the proper proxy_user value in case the user overwrite the default."""
conn = self.conn
proxy_user_value: str = conn.extra_dejson.get('proxy_user', "")
if proxy_user_value == "login" and conn.login:
return f"hive.server2.proxy.user={conn.login}"
if proxy_user_value == "owner" and self.run_as:
return f"hive.server2.proxy.user={self.run_as}"
if proxy_user_value != "": # There is a custom proxy user
return f"hive.server2.proxy.user={proxy_user_value}"
return proxy_user_value # The default proxy user (undefined)
def _prepare_cli_cmd(self) -> List[Any]:
"""This function creates the command list from available information"""
conn = self.conn
hive_bin = 'hive'
cmd_extra = []
if self.use_beeline:
hive_bin = 'beeline'
jdbc_url = "jdbc:hive2://{host}:{port}/{schema}".format(
host=conn.host, port=conn.port, schema=conn.schema
)
if conf.get('core', 'security') == 'kerberos':
template = conn.extra_dejson.get('principal', "hive/[email protected]")
if "_HOST" in template:
template = utils.replace_hostname_pattern(utils.get_components(template))
proxy_user = self._get_proxy_user()
jdbc_url += ";principal={template};{proxy_user}".format(
template=template, proxy_user=proxy_user
)
elif self.auth:
jdbc_url += ";auth=" + self.auth
jdbc_url = f'"{jdbc_url}"'
cmd_extra += ['-u', jdbc_url]
if conn.login:
cmd_extra += ['-n', conn.login]
if conn.password:
cmd_extra += ['-p', conn.password]
hive_params_list = self.hive_cli_params.split()
return [hive_bin] + cmd_extra + hive_params_list
@staticmethod
def _prepare_hiveconf(d: Dict[Any, Any]) -> List[Any]:
"""
This function prepares a list of hiveconf params
from a dictionary of key value pairs.
:param d:
:type d: dict
>>> hh = HiveCliHook()
>>> hive_conf = {"hive.exec.dynamic.partition": "true",
... "hive.exec.dynamic.partition.mode": "nonstrict"}
>>> hh._prepare_hiveconf(hive_conf)
["-hiveconf", "hive.exec.dynamic.partition=true",\
"-hiveconf", "hive.exec.dynamic.partition.mode=nonstrict"]
"""
if not d:
return []
return as_flattened_list(zip(["-hiveconf"] * len(d), [f"{k}={v}" for k, v in d.items()]))
def run_cli(
self,
hql: Union[str, Text],
schema: Optional[str] = None,
verbose: bool = True,
hive_conf: Optional[Dict[Any, Any]] = None,
) -> Any:
"""
Run an hql statement using the hive cli. If hive_conf is specified
it should be a dict and the entries will be set as key/value pairs
in HiveConf
:param hive_conf: if specified these key value pairs will be passed
to hive as ``-hiveconf "key"="value"``. Note that they will be
passed after the ``hive_cli_params`` and thus will override
whatever values are specified in the database.
:type hive_conf: dict
>>> hh = HiveCliHook()
>>> result = hh.run_cli("USE airflow;")
>>> ("OK" in result)
True
"""
conn = self.conn
schema = schema or conn.schema
if schema:
hql = f"USE {schema};\n{hql}"
with TemporaryDirectory(prefix='airflow_hiveop_') as tmp_dir:
with NamedTemporaryFile(dir=tmp_dir) as f:
hql += '\n'
f.write(hql.encode('UTF-8'))
f.flush()
hive_cmd = self._prepare_cli_cmd()
env_context = get_context_from_env_var()
# Only extend the hive_conf if it is defined.
if hive_conf:
env_context.update(hive_conf)
hive_conf_params = self._prepare_hiveconf(env_context)
if self.mapred_queue:
hive_conf_params.extend(
[
'-hiveconf',
f'mapreduce.job.queuename={self.mapred_queue}',
'-hiveconf',
f'mapred.job.queue.name={self.mapred_queue}',
'-hiveconf',
f'tez.queue.name={self.mapred_queue}',
]
)
if self.mapred_queue_priority:
hive_conf_params.extend(
['-hiveconf', f'mapreduce.job.priority={self.mapred_queue_priority}']
)
if self.mapred_job_name:
hive_conf_params.extend(['-hiveconf', f'mapred.job.name={self.mapred_job_name}'])
hive_cmd.extend(hive_conf_params)
hive_cmd.extend(['-f', f.name])
if verbose:
self.log.info("%s", " ".join(hive_cmd))
sub_process: Any = subprocess.Popen(
hive_cmd, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, cwd=tmp_dir, close_fds=True
)
self.sub_process = sub_process
stdout = ''
while True:
line = sub_process.stdout.readline()
if not line:
break
stdout += line.decode('UTF-8')
if verbose:
self.log.info(line.decode('UTF-8').strip())
sub_process.wait()
if sub_process.returncode:
raise AirflowException(stdout)
return stdout
def test_hql(self, hql: Union[str, Text]) -> None:
"""Test an hql statement using the hive cli and EXPLAIN"""
create, insert, other = [], [], []
for query in hql.split(';'): # naive
query_original = query
query = query.lower().strip()
if query.startswith('create table'):
create.append(query_original)
elif query.startswith(('set ', 'add jar ', 'create temporary function')):
other.append(query_original)
elif query.startswith('insert'):
insert.append(query_original)
other_ = ';'.join(other)
for query_set in [create, insert]:
for query in query_set:
query_preview = ' '.join(query.split())[:50]
self.log.info("Testing HQL [%s (...)]", query_preview)
if query_set == insert:
query = other_ + '; explain ' + query
else:
query = 'explain ' + query
try:
self.run_cli(query, verbose=False)
except AirflowException as e:
message = e.args[0].split('\n')[-2]
self.log.info(message)
error_loc = re.search(r'(\d+):(\d+)', message)
if error_loc and error_loc.group(1).isdigit():
lst = int(error_loc.group(1))
begin = max(lst - 2, 0)
end = min(lst + 3, len(query.split('\n')))
context = '\n'.join(query.split('\n')[begin:end])
self.log.info("Context :\n %s", context)
else:
self.log.info("SUCCESS")
def load_df(
self,
df: pandas.DataFrame,
table: str,
field_dict: Optional[Dict[Any, Any]] = None,
delimiter: str = ',',
encoding: str = 'utf8',
pandas_kwargs: Any = None,
**kwargs: Any,
) -> None:
"""
Loads a pandas DataFrame into hive.
Hive data types will be inferred if not passed but column names will
not be sanitized.
:param df: DataFrame to load into a Hive table
:type df: pandas.DataFrame
:param table: target Hive table, use dot notation to target a
specific database
:type table: str
:param field_dict: mapping from column name to hive data type.
Note that it must be OrderedDict so as to keep columns' order.
:type field_dict: collections.OrderedDict
:param delimiter: field delimiter in the file
:type delimiter: str
:param encoding: str encoding to use when writing DataFrame to file
:type encoding: str
:param pandas_kwargs: passed to DataFrame.to_csv
:type pandas_kwargs: dict
:param kwargs: passed to self.load_file
"""
def _infer_field_types_from_df(df: pandas.DataFrame) -> Dict[Any, Any]:
dtype_kind_hive_type = {
'b': 'BOOLEAN', # boolean
'i': 'BIGINT', # signed integer
'u': 'BIGINT', # unsigned integer
'f': 'DOUBLE', # floating-point
'c': 'STRING', # complex floating-point
'M': 'TIMESTAMP', # datetime
'O': 'STRING', # object
'S': 'STRING', # (byte-)string
'U': 'STRING', # Unicode
'V': 'STRING', # void
}
order_type = OrderedDict()
for col, dtype in df.dtypes.iteritems():
order_type[col] = dtype_kind_hive_type[dtype.kind]
return order_type
if pandas_kwargs is None:
pandas_kwargs = {}
with TemporaryDirectory(prefix='airflow_hiveop_') as tmp_dir:
with NamedTemporaryFile(dir=tmp_dir, mode="w") as f:
if field_dict is None:
field_dict = _infer_field_types_from_df(df)
df.to_csv(
path_or_buf=f,
sep=delimiter,
header=False,
index=False,
encoding=encoding,
date_format="%Y-%m-%d %H:%M:%S",
**pandas_kwargs,
)
f.flush()
return self.load_file(
filepath=f.name, table=table, delimiter=delimiter, field_dict=field_dict, **kwargs
)
def load_file(
self,
filepath: str,
table: str,
delimiter: str = ",",
field_dict: Optional[Dict[Any, Any]] = None,
create: bool = True,
overwrite: bool = True,
partition: Optional[Dict[str, Any]] = None,
recreate: bool = False,
tblproperties: Optional[Dict[str, Any]] = None,
) -> None:
"""
Loads a local file into Hive
Note that the table generated in Hive uses ``STORED AS textfile``
which isn't the most efficient serialization format. If a
large amount of data is loaded and/or if the tables gets
queried considerably, you may want to use this operator only to
stage the data into a temporary table before loading it into its
final destination using a ``HiveOperator``.
:param filepath: local filepath of the file to load
:type filepath: str
:param table: target Hive table, use dot notation to target a
specific database
:type table: str
:param delimiter: field delimiter in the file
:type delimiter: str
:param field_dict: A dictionary of the fields name in the file
as keys and their Hive types as values.
Note that it must be OrderedDict so as to keep columns' order.
:type field_dict: collections.OrderedDict
:param create: whether to create the table if it doesn't exist
:type create: bool
:param overwrite: whether to overwrite the data in table or partition
:type overwrite: bool
:param partition: target partition as a dict of partition columns
and values
:type partition: dict
:param recreate: whether to drop and recreate the table at every
execution
:type recreate: bool
:param tblproperties: TBLPROPERTIES of the hive table being created
:type tblproperties: dict
"""
hql = ''
if recreate:
hql += f"DROP TABLE IF EXISTS {table};\n"
if create or recreate:
if field_dict is None:
raise ValueError("Must provide a field dict when creating a table")
fields = ",\n ".join(['`{k}` {v}'.format(k=k.strip('`'), v=v) for k, v in field_dict.items()])
hql += f"CREATE TABLE IF NOT EXISTS {table} (\n{fields})\n"
if partition:
pfields = ",\n ".join([p + " STRING" for p in partition])
hql += f"PARTITIONED BY ({pfields})\n"
hql += "ROW FORMAT DELIMITED\n"
hql += f"FIELDS TERMINATED BY '{delimiter}'\n"
hql += "STORED AS textfile\n"
if tblproperties is not None:
tprops = ", ".join([f"'{k}'='{v}'" for k, v in tblproperties.items()])
hql += f"TBLPROPERTIES({tprops})\n"
hql += ";"
self.log.info(hql)
self.run_cli(hql)
hql = f"LOAD DATA LOCAL INPATH '{filepath}' "
if overwrite:
hql += "OVERWRITE "
hql += f"INTO TABLE {table} "
if partition:
pvals = ", ".join([f"{k}='{v}'" for k, v in partition.items()])
hql += f"PARTITION ({pvals})"
# As a workaround for HIVE-10541, add a newline character
# at the end of hql (AIRFLOW-2412).
hql += ';\n'
self.log.info(hql)
self.run_cli(hql)
def kill(self) -> None:
"""Kill Hive cli command"""
if hasattr(self, 'sp'):
if self.sub_process.poll() is None:
print("Killing the Hive job")
self.sub_process.terminate()
time.sleep(60)
self.sub_process.kill()
class HiveMetastoreHook(BaseHook):
"""Wrapper to interact with the Hive Metastore"""
# java short max val
MAX_PART_COUNT = 32767
def __init__(self, metastore_conn_id: str = 'metastore_default') -> None:
super().__init__()
self.conn_id = metastore_conn_id
self.metastore = self.get_metastore_client()
def __getstate__(self) -> Dict[str, Any]:
# This is for pickling to work despite the thirft hive client not
# being pickable
state = dict(self.__dict__)
del state['metastore']
return state
def __setstate__(self, d: Dict[str, Any]) -> None:
self.__dict__.update(d)
self.__dict__['metastore'] = self.get_metastore_client()
def get_metastore_client(self) -> Any:
"""Returns a Hive thrift client."""
import hmsclient
from thrift.protocol import TBinaryProtocol
from thrift.transport import TSocket, TTransport
conn = self._find_valid_server()
if not conn:
raise AirflowException("Failed to locate the valid server.")
auth_mechanism = conn.extra_dejson.get('authMechanism', 'NOSASL')
if conf.get('core', 'security') == 'kerberos':
auth_mechanism = conn.extra_dejson.get('authMechanism', 'GSSAPI')
kerberos_service_name = conn.extra_dejson.get('kerberos_service_name', 'hive')
conn_socket = TSocket.TSocket(conn.host, conn.port)
if conf.get('core', 'security') == 'kerberos' and auth_mechanism == 'GSSAPI':
try:
import saslwrapper as sasl
except ImportError:
import sasl
def sasl_factory() -> sasl.Client:
sasl_client = sasl.Client()
sasl_client.setAttr("host", conn.host)
sasl_client.setAttr("service", kerberos_service_name)
sasl_client.init()
return sasl_client
from thrift_sasl import TSaslClientTransport
transport = TSaslClientTransport(sasl_factory, "GSSAPI", conn_socket)
else:
transport = TTransport.TBufferedTransport(conn_socket)
protocol = TBinaryProtocol.TBinaryProtocol(transport)
return hmsclient.HMSClient(iprot=protocol)
def _find_valid_server(self) -> Any:
conns = self.get_connections(self.conn_id)
for conn in conns:
host_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.log.info("Trying to connect to %s:%s", conn.host, conn.port)
if host_socket.connect_ex((conn.host, conn.port)) == 0:
self.log.info("Connected to %s:%s", conn.host, conn.port)
host_socket.close()
return conn
else:
self.log.error("Could not connect to %s:%s", conn.host, conn.port)
return None
def get_conn(self) -> Any:
return self.metastore
def check_for_partition(self, schema: str, table: str, partition: str) -> bool:
"""
Checks whether a partition exists
:param schema: Name of hive schema (database) @table belongs to
:type schema: str
:param table: Name of hive table @partition belongs to
:type schema: str
:partition: Expression that matches the partitions to check for
(eg `a = 'b' AND c = 'd'`)
:type schema: str
:rtype: bool
>>> hh = HiveMetastoreHook()
>>> t = 'static_babynames_partitioned'
>>> hh.check_for_partition('airflow', t, "ds='2015-01-01'")
True
"""
with self.metastore as client:
partitions = client.get_partitions_by_filter(schema, table, partition, 1)
return bool(partitions)
def check_for_named_partition(self, schema: str, table: str, partition_name: str) -> Any:
"""
Checks whether a partition with a given name exists
:param schema: Name of hive schema (database) @table belongs to
:type schema: str
:param table: Name of hive table @partition belongs to
:type table: str
:partition: Name of the partitions to check for (eg `a=b/c=d`)
:type table: str
:rtype: bool
>>> hh = HiveMetastoreHook()
>>> t = 'static_babynames_partitioned'
>>> hh.check_for_named_partition('airflow', t, "ds=2015-01-01")
True
>>> hh.check_for_named_partition('airflow', t, "ds=xxx")
False
"""
with self.metastore as client:
return client.check_for_named_partition(schema, table, partition_name)
def get_table(self, table_name: str, db: str = 'default') -> Any:
"""Get a metastore table object
>>> hh = HiveMetastoreHook()
>>> t = hh.get_table(db='airflow', table_name='static_babynames')
>>> t.tableName
'static_babynames'
>>> [col.name for col in t.sd.cols]
['state', 'year', 'name', 'gender', 'num']
"""
if db == 'default' and '.' in table_name:
db, table_name = table_name.split('.')[:2]
with self.metastore as client:
return client.get_table(dbname=db, tbl_name=table_name)
def get_tables(self, db: str, pattern: str = '*') -> Any:
"""Get a metastore table object"""
with self.metastore as client:
tables = client.get_tables(db_name=db, pattern=pattern)
return client.get_table_objects_by_name(db, tables)
def get_databases(self, pattern: str = '*') -> Any:
"""Get a metastore table object"""
with self.metastore as client:
return client.get_databases(pattern)
def get_partitions(
self, schema: str, table_name: str, partition_filter: Optional[str] = None
) -> List[Any]:
"""
Returns a list of all partitions in a table. Works only
for tables with less than 32767 (java short max val).
For subpartitioned table, the number might easily exceed this.
>>> hh = HiveMetastoreHook()
>>> t = 'static_babynames_partitioned'
>>> parts = hh.get_partitions(schema='airflow', table_name=t)
>>> len(parts)
1
>>> parts
[{'ds': '2015-01-01'}]
"""
with self.metastore as client:
table = client.get_table(dbname=schema, tbl_name=table_name)
if len(table.partitionKeys) == 0:
raise AirflowException("The table isn't partitioned")
else:
if partition_filter:
parts = client.get_partitions_by_filter(
db_name=schema,
tbl_name=table_name,
filter=partition_filter,
max_parts=HiveMetastoreHook.MAX_PART_COUNT,
)
else:
parts = client.get_partitions(
db_name=schema, tbl_name=table_name, max_parts=HiveMetastoreHook.MAX_PART_COUNT
)
pnames = [p.name for p in table.partitionKeys]
return [dict(zip(pnames, p.values)) for p in parts]
@staticmethod
def _get_max_partition_from_part_specs(
part_specs: List[Any], partition_key: Optional[str], filter_map: Optional[Dict[str, Any]]
) -> Any:
"""
Helper method to get max partition of partitions with partition_key
from part specs. key:value pair in filter_map will be used to
filter out partitions.
:param part_specs: list of partition specs.
:type part_specs: list
:param partition_key: partition key name.
:type partition_key: str
:param filter_map: partition_key:partition_value map used for partition filtering,
e.g. {'key1': 'value1', 'key2': 'value2'}.
Only partitions matching all partition_key:partition_value
pairs will be considered as candidates of max partition.
:type filter_map: map
:return: Max partition or None if part_specs is empty.
:rtype: basestring
"""
if not part_specs:
return None
# Assuming all specs have the same keys.
if partition_key not in part_specs[0].keys():
raise AirflowException(f"Provided partition_key {partition_key} is not in part_specs.")
is_subset = None
if filter_map:
is_subset = set(filter_map.keys()).issubset(set(part_specs[0].keys()))
if filter_map and not is_subset:
raise AirflowException(
"Keys in provided filter_map {} "
"are not subset of part_spec keys: {}".format(
', '.join(filter_map.keys()), ', '.join(part_specs[0].keys())
)
)
candidates = [
p_dict[partition_key]
for p_dict in part_specs
if filter_map is None or all(item in p_dict.items() for item in filter_map.items())
]
if not candidates:
return None
else:
return max(candidates)
def max_partition(
self,
schema: str,
table_name: str,
field: Optional[str] = None,
filter_map: Optional[Dict[Any, Any]] = None,
) -> Any:
"""
Returns the maximum value for all partitions with given field in a table.
If only one partition key exist in the table, the key will be used as field.
filter_map should be a partition_key:partition_value map and will be used to
filter out partitions.
:param schema: schema name.
:type schema: str
:param table_name: table name.
:type table_name: str
:param field: partition key to get max partition from.
:type field: str
:param filter_map: partition_key:partition_value map used for partition filtering.
:type filter_map: map
>>> hh = HiveMetastoreHook()
>>> filter_map = {'ds': '2015-01-01', 'ds': '2014-01-01'}
>>> t = 'static_babynames_partitioned'
>>> hh.max_partition(schema='airflow',\
... table_name=t, field='ds', filter_map=filter_map)
'2015-01-01'
"""
with self.metastore as client:
table = client.get_table(dbname=schema, tbl_name=table_name)
key_name_set = {key.name for key in table.partitionKeys}
if len(table.partitionKeys) == 1:
field = table.partitionKeys[0].name
elif not field:
raise AirflowException("Please specify the field you want the max value for.")
elif field not in key_name_set:
raise AirflowException("Provided field is not a partition key.")
if filter_map and not set(filter_map.keys()).issubset(key_name_set):
raise AirflowException("Provided filter_map contains keys that are not partition key.")
part_names = client.get_partition_names(
schema, table_name, max_parts=HiveMetastoreHook.MAX_PART_COUNT
)
part_specs = [client.partition_name_to_spec(part_name) for part_name in part_names]
return HiveMetastoreHook._get_max_partition_from_part_specs(part_specs, field, filter_map)
def table_exists(self, table_name: str, db: str = 'default') -> bool:
"""
Check if table exists
>>> hh = HiveMetastoreHook()
>>> hh.table_exists(db='airflow', table_name='static_babynames')
True
>>> hh.table_exists(db='airflow', table_name='does_not_exist')
False
"""
try:
self.get_table(table_name, db)
return True
except Exception: # pylint: disable=broad-except
return False
def drop_partitions(self, table_name, part_vals, delete_data=False, db='default'):
"""
Drop partitions from the given table matching the part_vals input
:param table_name: table name.
:type table_name: str
:param part_vals: list of partition specs.
:type part_vals: list
:param delete_data: Setting to control if underlying data have to deleted
in addition to dropping partitions.
:type delete_data: bool
:param db: Name of hive schema (database) @table belongs to
:type db: str
>>> hh = HiveMetastoreHook()
>>> hh.drop_partitions(db='airflow', table_name='static_babynames',
part_vals="['2020-05-01']")
True
"""
if self.table_exists(table_name, db):
with self.metastore as client:
self.log.info(
"Dropping partition of table %s.%s matching the spec: %s", db, table_name, part_vals
)
return client.drop_partition(db, table_name, part_vals, delete_data)
else:
self.log.info("Table %s.%s does not exist!", db, table_name)
return False
class HiveServer2Hook(DbApiHook):
"""
Wrapper around the pyhive library
Notes:
* the default authMechanism is PLAIN, to override it you
can specify it in the ``extra`` of your connection in the UI
* the default for run_set_variable_statements is true, if you
are using impala you may need to set it to false in the
``extra`` of your connection in the UI
"""
conn_name_attr = 'hiveserver2_conn_id'
default_conn_name = 'hiveserver2_default'
supports_autocommit = False
def get_conn(self, schema: Optional[str] = None) -> Any:
"""Returns a Hive connection object."""
username: Optional[str] = None
# pylint: disable=no-member
db = self.get_connection(self.hiveserver2_conn_id) # type: ignore
auth_mechanism = db.extra_dejson.get('authMechanism', 'NONE')
if auth_mechanism == 'NONE' and db.login is None:
# we need to give a username
username = 'airflow'
kerberos_service_name = None
if conf.get('core', 'security') == 'kerberos':
auth_mechanism = db.extra_dejson.get('authMechanism', 'KERBEROS')
kerberos_service_name = db.extra_dejson.get('kerberos_service_name', 'hive')
# pyhive uses GSSAPI instead of KERBEROS as a auth_mechanism identifier
if auth_mechanism == 'GSSAPI':
self.log.warning(
"Detected deprecated 'GSSAPI' for authMechanism for %s. Please use 'KERBEROS' instead",
self.hiveserver2_conn_id, # type: ignore
)
auth_mechanism = 'KERBEROS'
from pyhive.hive import connect
return connect(
host=db.host,
port=db.port,
auth=auth_mechanism,
kerberos_service_name=kerberos_service_name,
username=db.login or username,
password=db.password,
database=schema or db.schema or 'default',
)
# pylint: enable=no-member
def _get_results(
self,
hql: Union[str, Text, List[str]],
schema: str = 'default',
fetch_size: Optional[int] = None,
hive_conf: Optional[Dict[Any, Any]] = None,
) -> Any:
from pyhive.exc import ProgrammingError
if isinstance(hql, str):
hql = [hql]
previous_description = None
with contextlib.closing(self.get_conn(schema)) as conn, contextlib.closing(conn.cursor()) as cur:
cur.arraysize = fetch_size or 1000
# not all query services (e.g. impala AIRFLOW-4434) support the set command
# pylint: disable=no-member
db = self.get_connection(self.hiveserver2_conn_id) # type: ignore
# pylint: enable=no-member
if db.extra_dejson.get('run_set_variable_statements', True):
env_context = get_context_from_env_var()
if hive_conf:
env_context.update(hive_conf)
for k, v in env_context.items():
cur.execute(f"set {k}={v}")
for statement in hql:
cur.execute(statement)
# we only get results of statements that returns
lowered_statement = statement.lower().strip()
if (
lowered_statement.startswith('select')
or lowered_statement.startswith('with')
or lowered_statement.startswith('show')
or (lowered_statement.startswith('set') and '=' not in lowered_statement)
):
description = cur.description
if previous_description and previous_description != description:
message = '''The statements are producing different descriptions:
Current: {}
Previous: {}'''.format(
repr(description), repr(previous_description)
)
raise ValueError(message)
elif not previous_description:
previous_description = description
yield description
try:
# DB API 2 raises when no results are returned
# we're silencing here as some statements in the list
# may be `SET` or DDL
yield from cur
except ProgrammingError:
self.log.debug("get_results returned no records")
def get_results(
self,
hql: Union[str, Text],
schema: str = 'default',
fetch_size: Optional[int] = None,
hive_conf: Optional[Dict[Any, Any]] = None,
) -> Dict[str, Any]:
"""
Get results of the provided hql in target schema.
:param hql: hql to be executed.
:type hql: str or list
:param schema: target schema, default to 'default'.
:type schema: str
:param fetch_size: max size of result to fetch.
:type fetch_size: int
:param hive_conf: hive_conf to execute alone with the hql.
:type hive_conf: dict
:return: results of hql execution, dict with data (list of results) and header
:rtype: dict
"""
results_iter = self._get_results(hql, schema, fetch_size=fetch_size, hive_conf=hive_conf)
header = next(results_iter)
results = {'data': list(results_iter), 'header': header}
return results
def to_csv(
self,
hql: Union[str, Text],
csv_filepath: str,
schema: str = 'default',
delimiter: str = ',',
lineterminator: str = '\r\n',
output_header: bool = True,
fetch_size: int = 1000,
hive_conf: Optional[Dict[Any, Any]] = None,
) -> None:
"""
Execute hql in target schema and write results to a csv file.
:param hql: hql to be executed.
:type hql: str or list
:param csv_filepath: filepath of csv to write results into.
:type csv_filepath: str
:param schema: target schema, default to 'default'.
:type schema: str
:param delimiter: delimiter of the csv file, default to ','.
:type delimiter: str
:param lineterminator: lineterminator of the csv file.
:type lineterminator: str
:param output_header: header of the csv file, default to True.
:type output_header: bool
:param fetch_size: number of result rows to write into the csv file, default to 1000.
:type fetch_size: int
:param hive_conf: hive_conf to execute alone with the hql.
:type hive_conf: dict
"""
results_iter = self._get_results(hql, schema, fetch_size=fetch_size, hive_conf=hive_conf)
header = next(results_iter)
message = None
i = 0
with open(csv_filepath, 'wb') as file:
writer = csv.writer(file, delimiter=delimiter, lineterminator=lineterminator, encoding='utf-8')
try:
if output_header:
self.log.debug('Cursor description is %s', header)
writer.writerow([c[0] for c in header])
for i, row in enumerate(results_iter, 1):
writer.writerow(row)
if i % fetch_size == 0:
self.log.info("Written %s rows so far.", i)
except ValueError as exception:
message = str(exception)
if message:
# need to clean up the file first
os.remove(csv_filepath)
raise ValueError(message)
self.log.info("Done. Loaded a total of %s rows.", i)
def get_records(
self, hql: Union[str, Text], schema: str = 'default', hive_conf: Optional[Dict[Any, Any]] = None
) -> Any:
"""
Get a set of records from a Hive query.
:param hql: hql to be executed.
:type hql: str or list
:param schema: target schema, default to 'default'.
:type schema: str
:param hive_conf: hive_conf to execute alone with the hql.
:type hive_conf: dict
:return: result of hive execution
:rtype: list
>>> hh = HiveServer2Hook()
>>> sql = "SELECT * FROM airflow.static_babynames LIMIT 100"
>>> len(hh.get_records(sql))
100
"""
return self.get_results(hql, schema=schema, hive_conf=hive_conf)['data']
def get_pandas_df( # type: ignore
self,
hql: Union[str, Text],
schema: str = 'default',
hive_conf: Optional[Dict[Any, Any]] = None,
**kwargs,
) -> pandas.DataFrame:
"""
Get a pandas dataframe from a Hive query
:param hql: hql to be executed.
:type hql: str or list
:param schema: target schema, default to 'default'.
:type schema: str
:param hive_conf: hive_conf to execute alone with the hql.
:type hive_conf: dict
:param kwargs: (optional) passed into pandas.DataFrame constructor
:type kwargs: dict
:return: result of hive execution
:rtype: DataFrame
>>> hh = HiveServer2Hook()
>>> sql = "SELECT * FROM airflow.static_babynames LIMIT 100"
>>> df = hh.get_pandas_df(sql)
>>> len(df.index)
100
:return: pandas.DateFrame
"""
res = self.get_results(hql, schema=schema, hive_conf=hive_conf)
df = pandas.DataFrame(res['data'], **kwargs)
df.columns = [c[0] for c in res['header']]
return df
| apache-2.0 |
paplorinc/intellij-community | python/helpers/pydev/pydev_ipython/matplotlibtools.py | 15 | 6107 |
import sys
backends = {'tk': 'TkAgg',
'gtk': 'GTKAgg',
'wx': 'WXAgg',
'qt': 'Qt4Agg', # qt3 not supported
'qt4': 'Qt4Agg',
'qt5': 'Qt5Agg',
'osx': 'MacOSX'}
# We also need a reverse backends2guis mapping that will properly choose which
# GUI support to activate based on the desired matplotlib backend. For the
# most part it's just a reverse of the above dict, but we also need to add a
# few others that map to the same GUI manually:
backend2gui = dict(zip(backends.values(), backends.keys()))
backend2gui['Qt4Agg'] = 'qt4'
backend2gui['Qt5Agg'] = 'qt5'
# In the reverse mapping, there are a few extra valid matplotlib backends that
# map to the same GUI support
backend2gui['GTK'] = backend2gui['GTKCairo'] = 'gtk'
backend2gui['WX'] = 'wx'
backend2gui['CocoaAgg'] = 'osx'
def do_enable_gui(guiname):
from _pydev_bundle.pydev_versioncheck import versionok_for_gui
if versionok_for_gui():
try:
from pydev_ipython.inputhook import enable_gui
enable_gui(guiname)
except:
sys.stderr.write("Failed to enable GUI event loop integration for '%s'\n" % guiname)
import traceback
traceback.print_exc()
elif guiname not in ['none', '', None]:
# Only print a warning if the guiname was going to do something
sys.stderr.write("Debug console: Python version does not support GUI event loop integration for '%s'\n" % guiname)
# Return value does not matter, so return back what was sent
return guiname
def find_gui_and_backend():
"""Return the gui and mpl backend."""
matplotlib = sys.modules['matplotlib']
# WARNING: this assumes matplotlib 1.1 or newer!!
backend = matplotlib.rcParams['backend']
# In this case, we need to find what the appropriate gui selection call
# should be for IPython, so we can activate inputhook accordingly
gui = backend2gui.get(backend, None)
return gui, backend
def is_interactive_backend(backend):
""" Check if backend is interactive """
matplotlib = sys.modules['matplotlib']
from matplotlib.rcsetup import interactive_bk, non_interactive_bk # @UnresolvedImport
if backend in interactive_bk:
return True
elif backend in non_interactive_bk:
return False
else:
return matplotlib.is_interactive()
def patch_use(enable_gui_function):
""" Patch matplotlib function 'use' """
matplotlib = sys.modules['matplotlib']
def patched_use(*args, **kwargs):
matplotlib.real_use(*args, **kwargs)
gui, backend = find_gui_and_backend()
enable_gui_function(gui)
matplotlib.real_use = matplotlib.use
matplotlib.use = patched_use
def patch_is_interactive():
""" Patch matplotlib function 'use' """
matplotlib = sys.modules['matplotlib']
def patched_is_interactive():
return matplotlib.rcParams['interactive']
matplotlib.real_is_interactive = matplotlib.is_interactive
matplotlib.is_interactive = patched_is_interactive
def _get_major_version(module):
return int(module.__version__.split('.')[0])
def activate_matplotlib(enable_gui_function):
"""Set interactive to True for interactive backends.
enable_gui_function - Function which enables gui, should be run in the main thread.
"""
matplotlib = sys.modules['matplotlib']
if not hasattr(matplotlib, 'rcParams'):
# matplotlib module wasn't fully imported, try later
return False
if _get_major_version(matplotlib) >= 3:
# since matplotlib 3.0, accessing `matplotlib.rcParams` lead to pyplot import,
# so we need to wait until necessary pyplot attributes will be imported as well
if 'matplotlib.pyplot' not in sys.modules:
return False
pyplot = sys.modules['matplotlib.pyplot']
if not hasattr(pyplot, 'switch_backend'):
return False
gui, backend = find_gui_and_backend()
is_interactive = is_interactive_backend(backend)
if is_interactive:
enable_gui_function(gui)
if not matplotlib.is_interactive():
sys.stdout.write("Backend %s is interactive backend. Turning interactive mode on.\n" % backend)
matplotlib.interactive(True)
else:
if matplotlib.is_interactive():
sys.stdout.write("Backend %s is non-interactive backend. Turning interactive mode off.\n" % backend)
matplotlib.interactive(False)
patch_use(enable_gui_function)
patch_is_interactive()
return True
def flag_calls(func):
"""Wrap a function to detect and flag when it gets called.
This is a decorator which takes a function and wraps it in a function with
a 'called' attribute. wrapper.called is initialized to False.
The wrapper.called attribute is set to False right before each call to the
wrapped function, so if the call fails it remains False. After the call
completes, wrapper.called is set to True and the output is returned.
Testing for truth in wrapper.called allows you to determine if a call to
func() was attempted and succeeded."""
# don't wrap twice
if hasattr(func, 'called'):
return func
def wrapper(*args,**kw):
wrapper.called = False
out = func(*args,**kw)
wrapper.called = True
return out
wrapper.called = False
wrapper.__doc__ = func.__doc__
return wrapper
def activate_pylab():
pylab = sys.modules['pylab']
pylab.show._needmain = False
# We need to detect at runtime whether show() is called by the user.
# For this, we wrap it into a decorator which adds a 'called' flag.
pylab.draw_if_interactive = flag_calls(pylab.draw_if_interactive)
return True
def activate_pyplot():
pyplot = sys.modules['matplotlib.pyplot']
pyplot.show._needmain = False
# We need to detect at runtime whether show() is called by the user.
# For this, we wrap it into a decorator which adds a 'called' flag.
pyplot.draw_if_interactive = flag_calls(pyplot.draw_if_interactive)
return True
| apache-2.0 |
eladtan/white_dwarf_nova | radial_plot.py | 2 | 1420 | def main():
import h5py
import numpy
import matplotlib.pyplot as plt
fi = h5py.File('initial.h5','r+')
with h5py.File('final.h5','r+') as f:
raw = {}
for field in ['x_coordinate',
'y_coordinate',
'density',
'temperature',
'pressure',
'x_velocity',
'y_velocity',
'ghost']:
raw[field] = numpy.array(f[field])
raw['radius'] = numpy.sqrt(raw['x_coordinate']**2+
raw['y_coordinate']**2)
raw['r_velocity'] = ((
raw['x_velocity']*raw['x_coordinate']+
raw['y_velocity']*raw['y_coordinate'])/
raw['radius'])
mask = (raw['ghost']<0.5)
for n,field in enumerate(['density','pressure','temperature','y_velocity']):
plt.subplot(2,2,n+1)
initial_radius = numpy.sqrt(
numpy.array(fi['x_coordinate'])**2+
numpy.array(fi['y_coordinate'])**2)
plt.plot(initial_radius[mask],
numpy.array(fi[field])[mask],
'.')
plt.plot(raw['radius'][mask],
raw[field][mask],
'.')
plt.ylabel(field)
plt.show()
if __name__ == '__main__':
main()
| mit |
rahul-c1/scikit-learn | examples/decomposition/plot_ica_vs_pca.py | 43 | 3343 | """
==========================
FastICA on 2D point clouds
==========================
This example illustrates visually in the feature space a comparison by
results using two different component analysis techniques.
:ref:`ICA` vs :ref:`PCA`.
Representing ICA in the feature space gives the view of 'geometric ICA':
ICA is an algorithm that finds directions in the feature space
corresponding to projections with high non-Gaussianity. These directions
need not be orthogonal in the original feature space, but they are
orthogonal in the whitened feature space, in which all directions
correspond to the same variance.
PCA, on the other hand, finds orthogonal directions in the raw feature
space that correspond to directions accounting for maximum variance.
Here we simulate independent sources using a highly non-Gaussian
process, 2 student T with a low number of degrees of freedom (top left
figure). We mix them to create observations (top right figure).
In this raw observation space, directions identified by PCA are
represented by orange vectors. We represent the signal in the PCA space,
after whitening by the variance corresponding to the PCA vectors (lower
left). Running ICA corresponds to finding a rotation in this space to
identify the directions of largest non-Gaussianity (lower right).
"""
print(__doc__)
# Authors: Alexandre Gramfort, Gael Varoquaux
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
from sklearn.decomposition import PCA, FastICA
###############################################################################
# Generate sample data
rng = np.random.RandomState(42)
S = rng.standard_t(1.5, size=(20000, 2))
S[:, 0] *= 2.
# Mix data
A = np.array([[1, 1], [0, 2]]) # Mixing matrix
X = np.dot(S, A.T) # Generate observations
pca = PCA()
S_pca_ = pca.fit(X).transform(X)
ica = FastICA(random_state=rng)
S_ica_ = ica.fit(X).transform(X) # Estimate the sources
S_ica_ /= S_ica_.std(axis=0)
###############################################################################
# Plot results
def plot_samples(S, axis_list=None):
plt.scatter(S[:, 0], S[:, 1], s=2, marker='o', linewidths=0, zorder=10,
color='steelblue', alpha=0.5)
if axis_list is not None:
colors = ['orange', 'red']
for color, axis in zip(colors, axis_list):
axis /= axis.std()
x_axis, y_axis = axis
# Trick to get legend to work
plt.plot(0.1 * x_axis, 0.1 * y_axis, linewidth=2, color=color)
plt.quiver(0, 0, x_axis, y_axis, zorder=11, width=0.01, scale=6,
color=color)
plt.hlines(0, -3, 3)
plt.vlines(0, -3, 3)
plt.xlim(-3, 3)
plt.ylim(-3, 3)
plt.xlabel('x')
plt.ylabel('y')
plt.figure()
plt.subplot(2, 2, 1)
plot_samples(S / S.std())
plt.title('True Independent Sources')
axis_list = [pca.components_.T, ica.mixing_]
plt.subplot(2, 2, 2)
plot_samples(X / np.std(X), axis_list=axis_list)
legend = plt.legend(['PCA', 'ICA'], loc='upper right')
legend.set_zorder(100)
plt.title('Observations')
plt.subplot(2, 2, 3)
plot_samples(S_pca_ / np.std(S_pca_, axis=0))
plt.title('PCA recovered signals')
plt.subplot(2, 2, 4)
plot_samples(S_ica_ / np.std(S_ica_))
plt.title('ICA recovered signals')
plt.subplots_adjust(0.09, 0.04, 0.94, 0.94, 0.26, 0.36)
plt.show()
| bsd-3-clause |
iismd17/scikit-learn | sklearn/linear_model/tests/test_sparse_coordinate_descent.py | 244 | 9986 | import numpy as np
import scipy.sparse as sp
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_less
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_greater
from sklearn.utils.testing import ignore_warnings
from sklearn.linear_model.coordinate_descent import (Lasso, ElasticNet,
LassoCV, ElasticNetCV)
def test_sparse_coef():
# Check that the sparse_coef propery works
clf = ElasticNet()
clf.coef_ = [1, 2, 3]
assert_true(sp.isspmatrix(clf.sparse_coef_))
assert_equal(clf.sparse_coef_.toarray().tolist()[0], clf.coef_)
def test_normalize_option():
# Check that the normalize option in enet works
X = sp.csc_matrix([[-1], [0], [1]])
y = [-1, 0, 1]
clf_dense = ElasticNet(fit_intercept=True, normalize=True)
clf_sparse = ElasticNet(fit_intercept=True, normalize=True)
clf_dense.fit(X, y)
X = sp.csc_matrix(X)
clf_sparse.fit(X, y)
assert_almost_equal(clf_dense.dual_gap_, 0)
assert_array_almost_equal(clf_dense.coef_, clf_sparse.coef_)
def test_lasso_zero():
# Check that the sparse lasso can handle zero data without crashing
X = sp.csc_matrix((3, 1))
y = [0, 0, 0]
T = np.array([[1], [2], [3]])
clf = Lasso().fit(X, y)
pred = clf.predict(T)
assert_array_almost_equal(clf.coef_, [0])
assert_array_almost_equal(pred, [0, 0, 0])
assert_almost_equal(clf.dual_gap_, 0)
def test_enet_toy_list_input():
# Test ElasticNet for various values of alpha and l1_ratio with list X
X = np.array([[-1], [0], [1]])
X = sp.csc_matrix(X)
Y = [-1, 0, 1] # just a straight line
T = np.array([[2], [3], [4]]) # test sample
# this should be the same as unregularized least squares
clf = ElasticNet(alpha=0, l1_ratio=1.0)
# catch warning about alpha=0.
# this is discouraged but should work.
ignore_warnings(clf.fit)(X, Y)
pred = clf.predict(T)
assert_array_almost_equal(clf.coef_, [1])
assert_array_almost_equal(pred, [2, 3, 4])
assert_almost_equal(clf.dual_gap_, 0)
clf = ElasticNet(alpha=0.5, l1_ratio=0.3, max_iter=1000)
clf.fit(X, Y)
pred = clf.predict(T)
assert_array_almost_equal(clf.coef_, [0.50819], decimal=3)
assert_array_almost_equal(pred, [1.0163, 1.5245, 2.0327], decimal=3)
assert_almost_equal(clf.dual_gap_, 0)
clf = ElasticNet(alpha=0.5, l1_ratio=0.5)
clf.fit(X, Y)
pred = clf.predict(T)
assert_array_almost_equal(clf.coef_, [0.45454], 3)
assert_array_almost_equal(pred, [0.9090, 1.3636, 1.8181], 3)
assert_almost_equal(clf.dual_gap_, 0)
def test_enet_toy_explicit_sparse_input():
# Test ElasticNet for various values of alpha and l1_ratio with sparse X
f = ignore_warnings
# training samples
X = sp.lil_matrix((3, 1))
X[0, 0] = -1
# X[1, 0] = 0
X[2, 0] = 1
Y = [-1, 0, 1] # just a straight line (the identity function)
# test samples
T = sp.lil_matrix((3, 1))
T[0, 0] = 2
T[1, 0] = 3
T[2, 0] = 4
# this should be the same as lasso
clf = ElasticNet(alpha=0, l1_ratio=1.0)
f(clf.fit)(X, Y)
pred = clf.predict(T)
assert_array_almost_equal(clf.coef_, [1])
assert_array_almost_equal(pred, [2, 3, 4])
assert_almost_equal(clf.dual_gap_, 0)
clf = ElasticNet(alpha=0.5, l1_ratio=0.3, max_iter=1000)
clf.fit(X, Y)
pred = clf.predict(T)
assert_array_almost_equal(clf.coef_, [0.50819], decimal=3)
assert_array_almost_equal(pred, [1.0163, 1.5245, 2.0327], decimal=3)
assert_almost_equal(clf.dual_gap_, 0)
clf = ElasticNet(alpha=0.5, l1_ratio=0.5)
clf.fit(X, Y)
pred = clf.predict(T)
assert_array_almost_equal(clf.coef_, [0.45454], 3)
assert_array_almost_equal(pred, [0.9090, 1.3636, 1.8181], 3)
assert_almost_equal(clf.dual_gap_, 0)
def make_sparse_data(n_samples=100, n_features=100, n_informative=10, seed=42,
positive=False, n_targets=1):
random_state = np.random.RandomState(seed)
# build an ill-posed linear regression problem with many noisy features and
# comparatively few samples
# generate a ground truth model
w = random_state.randn(n_features, n_targets)
w[n_informative:] = 0.0 # only the top features are impacting the model
if positive:
w = np.abs(w)
X = random_state.randn(n_samples, n_features)
rnd = random_state.uniform(size=(n_samples, n_features))
X[rnd > 0.5] = 0.0 # 50% of zeros in input signal
# generate training ground truth labels
y = np.dot(X, w)
X = sp.csc_matrix(X)
if n_targets == 1:
y = np.ravel(y)
return X, y
def _test_sparse_enet_not_as_toy_dataset(alpha, fit_intercept, positive):
n_samples, n_features, max_iter = 100, 100, 1000
n_informative = 10
X, y = make_sparse_data(n_samples, n_features, n_informative,
positive=positive)
X_train, X_test = X[n_samples // 2:], X[:n_samples // 2]
y_train, y_test = y[n_samples // 2:], y[:n_samples // 2]
s_clf = ElasticNet(alpha=alpha, l1_ratio=0.8, fit_intercept=fit_intercept,
max_iter=max_iter, tol=1e-7, positive=positive,
warm_start=True)
s_clf.fit(X_train, y_train)
assert_almost_equal(s_clf.dual_gap_, 0, 4)
assert_greater(s_clf.score(X_test, y_test), 0.85)
# check the convergence is the same as the dense version
d_clf = ElasticNet(alpha=alpha, l1_ratio=0.8, fit_intercept=fit_intercept,
max_iter=max_iter, tol=1e-7, positive=positive,
warm_start=True)
d_clf.fit(X_train.toarray(), y_train)
assert_almost_equal(d_clf.dual_gap_, 0, 4)
assert_greater(d_clf.score(X_test, y_test), 0.85)
assert_almost_equal(s_clf.coef_, d_clf.coef_, 5)
assert_almost_equal(s_clf.intercept_, d_clf.intercept_, 5)
# check that the coefs are sparse
assert_less(np.sum(s_clf.coef_ != 0.0), 2 * n_informative)
def test_sparse_enet_not_as_toy_dataset():
_test_sparse_enet_not_as_toy_dataset(alpha=0.1, fit_intercept=False,
positive=False)
_test_sparse_enet_not_as_toy_dataset(alpha=0.1, fit_intercept=True,
positive=False)
_test_sparse_enet_not_as_toy_dataset(alpha=1e-3, fit_intercept=False,
positive=True)
_test_sparse_enet_not_as_toy_dataset(alpha=1e-3, fit_intercept=True,
positive=True)
def test_sparse_lasso_not_as_toy_dataset():
n_samples = 100
max_iter = 1000
n_informative = 10
X, y = make_sparse_data(n_samples=n_samples, n_informative=n_informative)
X_train, X_test = X[n_samples // 2:], X[:n_samples // 2]
y_train, y_test = y[n_samples // 2:], y[:n_samples // 2]
s_clf = Lasso(alpha=0.1, fit_intercept=False, max_iter=max_iter, tol=1e-7)
s_clf.fit(X_train, y_train)
assert_almost_equal(s_clf.dual_gap_, 0, 4)
assert_greater(s_clf.score(X_test, y_test), 0.85)
# check the convergence is the same as the dense version
d_clf = Lasso(alpha=0.1, fit_intercept=False, max_iter=max_iter, tol=1e-7)
d_clf.fit(X_train.toarray(), y_train)
assert_almost_equal(d_clf.dual_gap_, 0, 4)
assert_greater(d_clf.score(X_test, y_test), 0.85)
# check that the coefs are sparse
assert_equal(np.sum(s_clf.coef_ != 0.0), n_informative)
def test_enet_multitarget():
n_targets = 3
X, y = make_sparse_data(n_targets=n_targets)
estimator = ElasticNet(alpha=0.01, fit_intercept=True, precompute=None)
# XXX: There is a bug when precompute is not None!
estimator.fit(X, y)
coef, intercept, dual_gap = (estimator.coef_,
estimator.intercept_,
estimator.dual_gap_)
for k in range(n_targets):
estimator.fit(X, y[:, k])
assert_array_almost_equal(coef[k, :], estimator.coef_)
assert_array_almost_equal(intercept[k], estimator.intercept_)
assert_array_almost_equal(dual_gap[k], estimator.dual_gap_)
def test_path_parameters():
X, y = make_sparse_data()
max_iter = 50
n_alphas = 10
clf = ElasticNetCV(n_alphas=n_alphas, eps=1e-3, max_iter=max_iter,
l1_ratio=0.5, fit_intercept=False)
ignore_warnings(clf.fit)(X, y) # new params
assert_almost_equal(0.5, clf.l1_ratio)
assert_equal(n_alphas, clf.n_alphas)
assert_equal(n_alphas, len(clf.alphas_))
sparse_mse_path = clf.mse_path_
ignore_warnings(clf.fit)(X.toarray(), y) # compare with dense data
assert_almost_equal(clf.mse_path_, sparse_mse_path)
def test_same_output_sparse_dense_lasso_and_enet_cv():
X, y = make_sparse_data(n_samples=40, n_features=10)
for normalize in [True, False]:
clfs = ElasticNetCV(max_iter=100, cv=5, normalize=normalize)
ignore_warnings(clfs.fit)(X, y)
clfd = ElasticNetCV(max_iter=100, cv=5, normalize=normalize)
ignore_warnings(clfd.fit)(X.toarray(), y)
assert_almost_equal(clfs.alpha_, clfd.alpha_, 7)
assert_almost_equal(clfs.intercept_, clfd.intercept_, 7)
assert_array_almost_equal(clfs.mse_path_, clfd.mse_path_)
assert_array_almost_equal(clfs.alphas_, clfd.alphas_)
clfs = LassoCV(max_iter=100, cv=4, normalize=normalize)
ignore_warnings(clfs.fit)(X, y)
clfd = LassoCV(max_iter=100, cv=4, normalize=normalize)
ignore_warnings(clfd.fit)(X.toarray(), y)
assert_almost_equal(clfs.alpha_, clfd.alpha_, 7)
assert_almost_equal(clfs.intercept_, clfd.intercept_, 7)
assert_array_almost_equal(clfs.mse_path_, clfd.mse_path_)
assert_array_almost_equal(clfs.alphas_, clfd.alphas_)
| bsd-3-clause |
paulcronk/psinsights | working-pagespeed2.py | 1 | 21838 | import pandas as pd
from psinsights import Service
# my Google API key
service = Service('AIzaSyA3FfXgcx1LF5wLNUjVrFB9ioJ8cQrRgkM')
# list of URLs
# url = pd.read_csv('pagespeed-pages.csv', encoding='utf-8')
url = [['https://www.gov.uk/national-minimum-wage-rates','answer','659310','2.602313863','1.864345121','0.080800259','5410'] ,['https://www.gov.uk/benefits-calculators','answer','579682','1.817202545','1.303122652','0.207799784','4636'] ,['https://www.gov.uk/state-pension-statement','answer','553982','1.299548217','0.828769003','0.047087204','4739'] ,['https://www.gov.uk/log-in-file-self-assessment-tax-return','answer','542504','1.257700302','0.790538384','0.055064242','4965'] ,['https://www.gov.uk/apply-apprenticeship','answer','473757','1.614386391','1.101233627','0.036795812','4203'] ,['https://www.gov.uk/contact-jobcentre-plus','answer','411175','2.52197478','1.693368049','0.091542866','3410'] ,['https://www.gov.uk/renew-adult-passport','answer','367061','1.944272489','1.212711987','0.05119179','3046'] ,['https://www.gov.uk/car-tax-disc-without-v11-reminder','answer','352777','1.834407658','1.214956595','0.117341472','2951'] ,['https://www.gov.uk/dbs-update-service','answer','298695','2.189723237','1.318467049','0.03589255','2793'] ,['https://www.gov.uk/apply-national-insurance-number','answer','290161','2.639798061','1.817138642','0.109350445','2476'] ,['https://www.gov.uk/browse/driving','browse','1078322','1.735348727','1.171895258','0.060171596','8207'] ,['https://www.gov.uk/browse/driving/car-tax-discs','browse','785064','2.259636031','1.392965201','0.067918415','6017'] ,['https://www.gov.uk/browse/driving/driving-licences','browse','719546','1.551090427','1.033780902','0.045290453','3959'] ,['https://www.gov.uk/browse/benefits','browse','560756','1.616491391','1.070204063','0.066103866','4298'] ,['https://www.gov.uk/browse/visas-immigration','browse','300102','1.394285897','0.861546114','0.045552519','2347'] ,['https://www.gov.uk/browse/benefits/tax-credits','browse','290666','2.08884772','1.450619556','0.082990683','2259'] ,['https://www.gov.uk/browse/working','browse','247633','1.374571942','0.895719794','0.036283805','1946'] ,['https://www.gov.uk/browse/driving/learning-to-drive','browse','246129','1.285296271','0.93968339','0.065025763','1475'] ,['https://www.gov.uk/browse/abroad/passports','browse','234574','1.5927428','1.037507713','0.050746479','1493'] ,['https://www.gov.uk/browse/tax','browse','233701','1.123299509','0.71208952','0.035849891','1833'] ,['https://www.gov.uk/start-up-loans','business_support','23378','1.735593301','1.312245192','0.041703349','209'] ,['https://www.gov.uk/child-benefit-tax-calculator/main','calculator','100060','0.95861118','0.647821118','0.039937888','805'] ,['https://www.gov.uk/child-benefit-tax-calculator','calculator','57194','1.175415909','0.848981818','0.090025','440'] ,['https://www.gov.uk/bank-holidays','calendar','814429','2.947756442','1.858846154','0.067771917','6791'] ,['https://www.gov.uk/when-do-the-clocks-change','calendar','11900','3.456198113','1.735801887','0.035811321','106'] ,['https://www.gov.uk/yourstatepension','campaign','234739','3.480980065','2.727734914','0.12223653','1856'] ,['https://www.gov.uk/statepensiontopup','campaign','33837','5.680011111','4.274977778','0.169514815','270'] ,['https://www.gov.uk/floodsdestroy','campaign','31796','8.493238683','6.145382716','0.107242798','243'] ,['https://www.gov.uk/vehicletaxrules','campaign','23775','3.724149733','2.952882353','0.020010695','187'] ,['https://www.gov.uk/done/vehicle-tax','completed_transaction','1425382','2.16046774','1.527164529','0.075622016','10183'] ,['https://www.gov.uk/done/check-vehicle-tax','completed_transaction','356630','2.238557165','1.653729874','0.153767646','2624'] ,['https://www.gov.uk/driving-transaction-finished','completed_transaction','246843','2.377802372','1.421906343','0.147405352','2024'] ,['https://www.gov.uk/done/view-driving-licence','completed_transaction','240825','1.330058584','0.972969262','0.078002561','1963'] ,['https://www.gov.uk/done/make-a-sorn','completed_transaction','168843','2.374023383','1.695594848','0.090216237','1283'] ,['https://www.gov.uk/transaction-finished','completed_transaction','88642','1.905958146','1.312157186','0.053341317','669'] ,['https://www.gov.uk/done/book-driving-test','completed_transaction','82863','2.770414414','2.131402527','0.028516245','555'] ,['https://www.gov.uk/done/overseas-passports','completed_transaction','43101','2.274297619','1.509440476','0.046044643','336'] ,['https://www.gov.uk/done/change-date-practical-driving-test','completed_transaction','27533','2.202359606','1.654694581','0.098837438','203'] ,['https://www.gov.uk/done/marriage-allowance','completed_transaction','25089','1.880342105','0.932535088','0.028846491','228'] ,['https://www.gov.uk/guidance/hmrc-tools-and-calculators','detailed_guidance','89674','1.694723577','1.180822888','0.042138965','738'] ,['https://www.gov.uk/guidance/annual-tax-summary','detailed_guidance','81043','3.026364472','1.968769585','0.046956989','653'] ,['https://www.gov.uk/guidance/tax-professional-development-programme','detailed_guidance','75379','4.638122356','3.369554381','0.149279456','662'] ,['https://www.gov.uk/guidance/civil-service-fast-stream-graduate-schemes','detailed_guidance','74103','2.095938871','1.489315047','0.037909091','638'] ,['https://www.gov.uk/guidance/rates-of-vat-on-different-goods-and-services','detailed_guidance','66143','1.986718992','1.37448932','0.048963107','516'] ,['https://www.gov.uk/guidance/jobcentres-where-you-can-claim-universal-credit','detailed_guidance','63771','2.243862917','1.591653779','0.064609842','569'] ,['https://www.gov.uk/guidance/hmrc-online-services-for-agents','detailed_guidance','63697','0.732922018','0.494529002','0.010013921','436'] ,['https://www.gov.uk/guidance/civil-service-fast-stream-how-to-apply','detailed_guidance','63592','2.040547826','1.139676087','0.019854348','460'] ,['https://www.gov.uk/guidance/rates-and-thresholds-for-employers-2015-to-2016','detailed_guidance','55180','1.782179325','1.143632911','0.026415612','474'] ,['https://www.gov.uk/guidance/equality-act-2010-guidance','detailed_guidance','44030','2.184041872','1.443509852','0.02785468','406'] ,['https://www.gov.uk/business-finance-support-finder/search','finder','160127','1.113048832','0.750252654','0.104394904','471'] ,['https://www.gov.uk/business-finance-support-finder','finder','41967','1.616508772','1.139374269','0.033555556','342'] ,['https://www.gov.uk/licence-finder/sectors','finder','18040','0.863345794','0.526107477','0.054971963','214'] ,['https://www.gov.uk/licence-finder','finder','11763','2.250155172','1.530155172','0.011974138','116'] ,['https://www.gov.uk/log-in-register-hmrc-online-services','guide','1105909','0.907914613','0.605065872','0.017374808','11079'] ,['https://www.gov.uk/new-state-pension','guide','375771','0.991882245','0.623622468','0.062311234','3261'] ,['https://www.gov.uk/vehicle-tax-rate-tables','guide','332818','1.342384295','0.843072721','0.05621364','2878'] ,['https://www.gov.uk/self-assessment-tax-returns','guide','298670','1.041300235','0.654159984','0.028718553','2548'] ,['https://www.gov.uk/income-tax-rates','guide','251031','1.242352336','0.845760636','0.051732243','2140'] ,['https://www.gov.uk/income-tax-rates/current-rates-and-allowances','guide','249455','2.314474085','1.548633682','0.065519734','2103'] ,['https://www.gov.uk/disclosure-barring-service-check/overview','guide','225875','2.391589692','1.641899342','0.066298938','1979'] ,['https://www.gov.uk/register-for-self-assessment','guide','199602','1.411126803','0.91472261','0.040750996','2011'] ,['https://www.gov.uk/new-state-pension/eligibility','guide','195523','1.053483262','0.639461023','0.057804507','1643'] ,['https://www.gov.uk/disclosure-barring-service-check/tracking-application-getting-certificate','guide','189818','1.986434286','1.269380023','0.1199214','1750'] ,['https://www.gov.uk/help/beta','help_page','48548','0.872476077','0.57157554','0.030539568','418'] ,['https://www.gov.uk/help','help_page','26858','1.215348754','0.780314286','0.034053381','281'] ,['https://www.gov.uk/help/browsers','help_page','26572','2.115023585','1.415843902','0.143741463','212'] ,['https://www.gov.uk/help/cookies','help_page','18324','1.789937984','1.246904762','0.043936508','129'] ,['https://www.gov.uk/','homepage','3834589','2.413297742','1.463053584','0.048877741','32555'] ,['https://www.gov.uk/shotgun-and-firearm-certificates','licence','30255','2.412229167','1.7114875','0.07715','240'] ,['https://www.gov.uk/waste-carrier-or-broker-registration','licence','19138','1.791174194','1.061845161','0.044090323','155'] ,['https://www.gov.uk/tv-licence','licence','18491','2.365462857','1.571988571','0.082245714','175'] ,['https://www.gov.uk/hazardous-waste-producer-registration','licence','13200','1.980877358','1.49614433','0.009226804','106'] ,['https://www.gov.uk/temporary-events-notice','licence','9490','1.419169811','0.987062893','0.060874214','159'] ,['https://www.gov.uk/premises-licence','licence','9239','1.41587156','1.130559633','0.043238532','109'] ,['https://www.gov.uk/pay-council-tax','local_transaction','108560','2.07687664','1.440164042','0.05387664','762'] ,['https://www.gov.uk/apply-for-elderly-person-bus-pass','local_transaction','86399','1.977504711','1.286635628','0.066774629','743'] ,['https://www.gov.uk/school-term-holiday-dates','local_transaction','68189','2.582669091','1.965723636','0.054129091','550'] ,['https://www.gov.uk/apply-council-tax-reduction','local_transaction','65838','1.695058712','1.0833125','0.094409091','528'] ,['https://www.gov.uk/apply-for-primary-school-place','local_transaction','65219','1.977235514','1.257418692','0.053108411','535'] ,['https://www.gov.uk/get-on-electoral-register','local_transaction','59672','1.149631356','0.86854661','0.034161017','472'] ,['https://www.gov.uk/apply-for-council-housing','local_transaction','53797','1.587685714','1.184171429','0.084784615','455'] ,['https://www.gov.uk/blue-badge-scheme-information-council','local_transaction','41368','2.404148688','1.737769679','0.062157434','343'] ,['https://www.gov.uk/apply-for-council-tax-discount','local_transaction','33482','1.286489879','0.794846154','0.047299595','247'] ,['https://www.gov.uk/apply-housing-benefit-from-council','local_transaction','30205','1.296875','0.957269767','0.065180556','216'] ,['https://www.gov.uk/government/news/driving-licence-changes','news','118341','6.19678198','1.62969265','0.054374165','899'] ,['https://www.gov.uk/government/news/vehicle-tax-changes','news','73145','5.056795332','1.687448833','0.05902693','557'] ,['https://www.gov.uk/government/news/spending-review-and-autumn-statement-2015-key-announcements','news','72804','1.683813264','0.991699825','0.045066318','573'] ,['https://www.gov.uk/government/news/new-national-minimum-wage-rates-announced','news','62669','2.709677291','1.962265469','0.055722555','502'] ,['https://www.gov.uk/government/news/launch-of-the-new-companies-house-public-beta-service','news','57081','1.91729718','0.497655098','0.014331887','461'] ,['https://www.gov.uk/government/news/immigration-rules-changes','news','49269','1.692984169','1.198807388','0.05507124','379'] ,['https://www.gov.uk/government/news/uk-to-observe-a-minutes-silence-for-victims-of-the-paris-terrorist-attacks','news','44135','3.792754617','2.891662269','0.044448549','379'] ,['https://www.gov.uk/government/news/spending-review-and-autumn-statement-2015-everything-you-need-to-know','news','36342','0.952006472','0.59697411','0.024640777','309'] ,['https://www.gov.uk/government/news/transport-direct-website-closes-on-30-september-2014','news','33175','3.030919847','2.38835249','0.095141762','262'] ,['https://www.gov.uk/government/news/hiring-a-vehicle','news','31495','4.066705674','1.43372695','0.041460993','282'] ,['https://www.gov.uk/driving-theory-test-centre','place','162720','1.908049459','1.420468315','0.098295981','1294'] ,['https://www.gov.uk/number-plate-supplier','place','50458','1.026865116','0.74964186','0.042709302','430'] ,['https://www.gov.uk/find-regional-passport-office','place','41983','1.284501458','0.924820588','0.078629412','343'] ,['https://www.gov.uk/passport-interview-office','place','34223','1.505879518','1.104506024','0.07153012','249'] ,['https://www.gov.uk/compulsory-basic-training-cbt-courses','place','28013','3.32660479','1.876652695','0.01991018','167'] ,['https://www.gov.uk/health-protection-team','place','11792','0.931783784','0.606441441','0.01527027','111'] ,['https://www.gov.uk/find-atf-dvsa-test-station','place','11721','1.528632479','1.12962931','0.064784483','117'] ,['https://www.gov.uk/jobseekers-allowance/how-to-claim','programme','517121','2.067895394','1.256484552','0.054557321','4407'] ,['https://www.gov.uk/the-warm-home-discount-scheme/eligibility','programme','222238','1.472418678','1.05137723','0.061521511','1906'] ,['https://www.gov.uk/jobseekers-allowance/what-youll-get','programme','203573','1.588507303','1.00161439','0.062897753','1780'] ,['https://www.gov.uk/employment-support-allowance/how-to-claim','programme','195996','2.005792303','1.329552147','0.088635697','1637'] ,['https://www.gov.uk/jobseekers-allowance','programme','194322','1.415665867','0.823507212','0.055968788','1667'] ,['https://www.gov.uk/winter-fuel-payment/what-youll-get','programme','179816','1.746984095','1.120472462','0.104792303','1509'] ,['https://www.gov.uk/winter-fuel-payment/overview','programme','173779','3.097577056','2.025932133','0.109900277','1447'] ,['https://www.gov.uk/jobseekers-allowance/overview','programme','169642','2.804739529','1.951411649','0.066259817','1528'] ,['https://www.gov.uk/jobseekers-allowance/eligibility','programme','164853','1.766173116','1.118011549','0.093747454','1473'] ,['https://www.gov.uk/state-pension','programme','162477','1.337557034','0.853877693','0.104963878','1578'] ,['https://www.gov.uk/search','search','6511900','1.210784089','0.801749713','0.042228946','57644'] ,['https://www.gov.uk/contact-the-dvla','simple_smart_answer','1410564','1.397849482','0.945512214','0.053394152','11394'] ,['https://www.gov.uk/sold-bought-vehicle','simple_smart_answer','1138271','0.931111489','0.600344308','0.028497916','9409'] ,['https://www.gov.uk/claim-state-pension-online','simple_smart_answer','443399','1.1561543','0.70765905','0.029102696','3895'] ,['https://www.gov.uk/qualify-tax-credits','simple_smart_answer','438506','1.034497655','0.732959238','0.056668915','3412'] ,['https://www.gov.uk/settle-in-the-uk','simple_smart_answer','369041','0.803511943','0.547294383','0.031545513','3098'] ,['https://www.gov.uk/check-if-you-need-a-tax-return','simple_smart_answer','350118','0.779019651','0.499903618','0.026344354','3206'] ,['https://www.gov.uk/vehicles-can-drive','simple_smart_answer','281331','1.232023063','0.79163238','0.035751384','2168'] ,['https://www.gov.uk/legal-right-work-uk','simple_smart_answer','234155','0.750825955','0.515023003','0.022628906','2304'] ,['https://www.gov.uk/exchange-foreign-driving-licence','simple_smart_answer','201185','1.036775655','0.747343886','0.027069869','1832'] ,['https://www.gov.uk/register-employer','simple_smart_answer','182258','0.768559476','0.522612','0.021017714','1757'] ,['https://www.gov.uk/calculate-state-pension','smart_answer','3370003','1.323673649','0.781799362','0.05688553','13844'] ,['https://www.gov.uk/calculate-your-holiday-entitlement','smart_answer','1739517','1.156100457','0.712383784','0.037452599','4818'] ,['https://www.gov.uk/calculate-your-child-maintenance','smart_answer','1612490','1.924017796','1.300467853','0.070649828','3484'] ,['https://www.gov.uk/pay-leave-for-parents','smart_answer','1019333','1.261407001','0.895759677','0.061127957','1914'] ,['https://www.gov.uk/calculate-your-redundancy-pay','smart_answer','859450','1.443340382','0.96114276','0.068161202','2932'] ,['https://www.gov.uk/student-finance-calculator','smart_answer','761446','1.287060683','0.902192794','0.032221871','1582'] ,['https://www.gov.uk/maternity-paternity-calculator','smart_answer','582730','0.9024375','0.547480783','0.026960116','1456'] ,['https://www.gov.uk/calculate-statutory-sick-pay','smart_answer','372199','1.247096012','0.828138848','0.036793205','677'] ,['https://www.gov.uk/am-i-getting-minimum-wage','smart_answer','339752','1.048715871','0.759760266','0.041628191','901'] ,['https://www.gov.uk/check-uk-visa','smart_answer','338491','1.24757376','0.841644696','0.029703704','1593'] ,['https://www.gov.uk/topic/personal-tax/self-assessment','specialist-sector','470817','1.237742525','0.740827424','0.024183486','4047'] ,['https://www.gov.uk/topic/business-tax/vat','specialist-sector','304506','1.125591655','0.753375716','0.013811963','2804'] ,['https://www.gov.uk/topic/further-education-skills/apprenticeships','specialist-sector','251697','2.166331426','1.482254658','0.037946991','2097'] ,['https://www.gov.uk/topic/business-tax/paye','specialist-sector','211271','1.215260504','0.741377438','0.018665788','1904'] ,['https://www.gov.uk/topic/personal-tax/income-tax','specialist-sector','169895','1.29112069','0.825151294','0.025229595','1508'] ,['https://www.gov.uk/topic/dealing-with-hmrc/paying-hmrc','specialist-sector','156343','1.551596518','0.991156893','0.030437928','1321'] ,['https://www.gov.uk/topic/dealing-with-hmrc/tax-agent-guidance','specialist-sector','99168','1.039654948','0.710200521','0.015571615','768'] ,['https://www.gov.uk/topic/benefits-credits/tax-credits','specialist-sector','96240','1.547082383','1.013265228','0.030821066','789'] ,['https://www.gov.uk/topic/company-registration-filing/starting-company','specialist-sector','90810','1.328091969','0.724003891','0.023923476','772'] ,['https://www.gov.uk/topic/intellectual-property/trade-marks','specialist-sector','74541','0.930183511','0.57750266','0.00881516','752'] ,['https://www.gov.uk/jobsearch','transaction','4324633','3.198359068','1.888408722','0.075950826','39065'] ,['https://www.gov.uk/vehicle-tax','transaction','4114405','2.353487418','1.539265496','0.051599234','32984'] ,['https://www.gov.uk/get-information-about-a-company','transaction','2031722','0.95427395','0.567052993','0.014709628','19113'] ,['https://www.gov.uk/check-vehicle-tax','transaction','1787482','2.236185273','1.539040753','0.086508525','15210'] ,['https://www.gov.uk/pay-dartford-crossing-charge','transaction','1333059','2.656110519','1.854997236','0.054276596','10876'] ,['https://www.gov.uk/view-driving-licence','transaction','1204448','2.1289536','1.378514309','0.045585854','9806'] ,['https://www.gov.uk/register-to-vote','transaction','1155932','3.295604682','2.262534125','0.095845411','9013'] ,['https://www.gov.uk/change-driving-test','transaction','1145398','2.137030543','1.445087856','0.055782429','8218'] ,['https://www.gov.uk/get-vehicle-information-from-dvla','transaction','1020082','2.280651486','1.616397358','0.057676852','8717'] ,['https://www.gov.uk/student-finance-register-login','transaction','848938','2.377969539','1.535384431','0.080602313','7091'] ,['https://www.gov.uk/foreign-travel-advice','travel-advice','696235','1.323709716','0.904970226','0.037407287','5846'] ,['https://www.gov.uk/foreign-travel-advice/egypt','travel-advice','459565','2.697579807','1.831234647','0.097583378','3734'] ,['https://www.gov.uk/foreign-travel-advice/france','travel-advice','297766','1.857639094','1.277448244','0.059746765','2164'] ,['https://www.gov.uk/foreign-travel-advice/belgium','travel-advice','178485','1.900671063','1.297070652','0.066744763','1289'] ,['https://www.gov.uk/foreign-travel-advice/turkey','travel-advice','102138','2.422057572','1.720808511','0.080863579','799'] ,['https://www.gov.uk/foreign-travel-advice/morocco','travel-advice','82995','1.901528358','1.352938806','0.048067164','670'] ,['https://www.gov.uk/foreign-travel-advice/spain','travel-advice','61100','1.5977473','1.112637744','0.080967603','463'] ,['https://www.gov.uk/foreign-travel-advice/usa','travel-advice','59144','1.549398773','1.015507157','0.035492843','489'] ,['https://www.gov.uk/foreign-travel-advice/usa/entry-requirements','travel-advice','58369','1.788462168','1.227586912','0.071386503','489'] ,['https://www.gov.uk/foreign-travel-advice/tunisia','travel-advice','48475','2.117009828','1.496646192','0.059378378','407']]
# define dataframe
headings = ['URL', 'Format', 'Pageviews', 'Page load time', 'Interactive doc time', 'Download time', 'Sample size', 'Title', 'PageSpeed score', 'Total Resource count', 'Total Request Bytes', 'Static Resource count', 'CSS Resource count', 'CSS Response Bytes', 'Flash Response Bytes', 'Host Count', 'HTML Response Bytes', 'Image Response Bytes', 'JavaScript Resource Count', 'JavaScript Response Bytes', 'Text Response Bytes', 'Other Response Bytes']
table = []
speed = pd.DataFrame(columns=headings, data=table)
# loop through each url
for page in url:
analysis = service.analyze(page[0])
stats = analysis.statistics
# get data into list
newrow = [page[0], page[1], page[2], page[3], page[4], page[5], page[6],analysis.title, analysis.score, stats.resource_count,
stats.total_request_bytes, stats.static_resource_count,
stats.css_resource_count, stats.css_response_bytes,
stats.flash_response_bytes, stats.host_count,
stats.html_response_bytes, stats.image_response_bytes,
stats.javascript_resource_count,
stats.javascript_response_bytes, stats.text_response_bytes,
stats.other_response_bytes]
# turn data list into dataframe
newrowdf = pd.Series(newrow, index=headings)
# append dataframe to main dataframe
speed = speed.append(newrowdf, ignore_index=True)
print speed
speed.to_csv('pagespeed-results.csv',encoding='utf-8')
| apache-2.0 |
vshtanko/scikit-learn | examples/svm/plot_svm_anova.py | 250 | 2000 | """
=================================================
SVM-Anova: SVM with univariate feature selection
=================================================
This example shows how to perform univariate feature before running a SVC
(support vector classifier) to improve the classification scores.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from sklearn import svm, datasets, feature_selection, cross_validation
from sklearn.pipeline import Pipeline
###############################################################################
# Import some data to play with
digits = datasets.load_digits()
y = digits.target
# Throw away data, to be in the curse of dimension settings
y = y[:200]
X = digits.data[:200]
n_samples = len(y)
X = X.reshape((n_samples, -1))
# add 200 non-informative features
X = np.hstack((X, 2 * np.random.random((n_samples, 200))))
###############################################################################
# Create a feature-selection transform and an instance of SVM that we
# combine together to have an full-blown estimator
transform = feature_selection.SelectPercentile(feature_selection.f_classif)
clf = Pipeline([('anova', transform), ('svc', svm.SVC(C=1.0))])
###############################################################################
# Plot the cross-validation score as a function of percentile of features
score_means = list()
score_stds = list()
percentiles = (1, 3, 6, 10, 15, 20, 30, 40, 60, 80, 100)
for percentile in percentiles:
clf.set_params(anova__percentile=percentile)
# Compute cross-validation score using all CPUs
this_scores = cross_validation.cross_val_score(clf, X, y, n_jobs=1)
score_means.append(this_scores.mean())
score_stds.append(this_scores.std())
plt.errorbar(percentiles, score_means, np.array(score_stds))
plt.title(
'Performance of the SVM-Anova varying the percentile of features selected')
plt.xlabel('Percentile')
plt.ylabel('Prediction rate')
plt.axis('tight')
plt.show()
| bsd-3-clause |
gizatt/director | src/python/ddapp/terrain.py | 6 | 3850 | from __future__ import division
import numpy as np
from scipy.spatial import ConvexHull
from ddapp.irisUtils import SafeTerrainRegion
from ddapp import transformUtils
from irispy.utils import sample_convex_polytope
import polyhedron._cdd
from polyhedron import Vrep, Hrep
from py_drake_utils.utils import rpy2rotmat
DEFAULT_FOOT_CONTACTS = np.array([[-0.13, -0.13, 0.13, 0.13],
[0.0562, -0.0562, 0.0562, -0.0562]])
DEFAULT_BOUNDING_BOX_WIDTH = 1
class PolygonSegmentationNonIRIS():
def __init__(self, polygon_vertices, bot_pts=DEFAULT_FOOT_CONTACTS,
bounding_box_width=DEFAULT_BOUNDING_BOX_WIDTH):
polygon_vertices = np.asarray(polygon_vertices)
self.planar_polyhedron = Vrep(polygon_vertices[:2,:].T)
self.bot_pts = bot_pts
def getBoundingPolytope(self, start):
"""
Return A, b describing a bounding box on [x, y, yaw] into which the IRIS region must be contained.
The format is A [x;y;yaw] <= b
"""
start = np.array(start).reshape((3,))
lb = np.hstack((start[:2] - self.bounding_box_width / 2, start[2] - np.pi))
ub = np.hstack((start[:2] + self.bounding_box_width / 2, start[2] + np.pi))
A_bounds = np.vstack((-np.eye(3), np.eye(3)))
b_bounds = np.hstack((-lb, ub))
return A_bounds, b_bounds
def findSafeRegion(self, pose):
pose = np.asarray(pose)
tformForProjection = transformUtils.frameFromPositionAndRPY([0,0,0], pose[3:] * 180 / np.pi)
tform = transformUtils.frameFromPositionAndRPY(pose[:3], pose[3:] * 180 / np.pi)
contact_pts_on_plane = np.zeros((2, self.bot_pts.shape[1]))
for j in range(self.bot_pts.shape[1]):
contact_pts_on_plane[:,j] = tformForProjection.TransformPoint([self.bot_pts[0,j], self.bot_pts[1,j], 0])[:2]
Rdot = np.array([[0, -1], [1, 0]])
contact_vel_in_world = Rdot.dot(contact_pts_on_plane)
c_region = {'A': [], 'b': []}
for i in range(self.planar_polyhedron.A.shape[0]):
ai = self.planar_polyhedron.A[i,:]
n = np.linalg.norm(ai)
ai = ai / n
bi = self.planar_polyhedron.b[i] / n
p = ai.dot(contact_pts_on_plane)
v = ai.dot(contact_vel_in_world)
mask = np.logical_or(p >= 0, v >= 0)
for j, tf in enumerate(mask):
if tf:
c_region['A'].append(np.hstack((ai, v[j])))
c_region['b'].append([bi - p[j]])
A = np.vstack(c_region['A'])
b = np.hstack(c_region['b'])
b = b + A.dot(np.array([0,0,pose[5]]))
self.c_space_polyhedron = Hrep(A, b)
return SafeTerrainRegion(A, b, [], [], tform)
def drawSamples(self, nsamples):
import matplotlib.pyplot as plt
plt.figure(1)
plt.clf()
plt.hold(True)
k = ConvexHull(self.bot_pts.T).vertices
k = np.hstack((k, k[0]))
n = self.planar_polyhedron.generators.shape[0]
plt.plot(self.planar_polyhedron.generators.T[0,range(n) + [0]],
self.planar_polyhedron.generators.T[1,range(n) + [0]], 'r.-')
samples = sample_convex_polytope(self.c_space_polyhedron.A,
self.c_space_polyhedron.b,
500)
for i in range(samples.shape[1]):
R = np.array([[np.cos(samples[2,i]), -np.sin(samples[2,i])],
[np.sin(samples[2,i]), np.cos(samples[2,i])]])
V = R.dot(self.bot_pts[:,k])
V = V + samples[:2, i].reshape((2,1))
plt.plot(V[0,:], V[1,:], 'k-')
plt.show()
def get_point_and_normal(pose):
point = pose[:3]
normal = rpy2rotmat(pose[3:]).dot([0,0,1])
return point, normal
| bsd-3-clause |
Nyker510/scikit-learn | examples/covariance/plot_covariance_estimation.py | 250 | 5070 | """
=======================================================================
Shrinkage covariance estimation: LedoitWolf vs OAS and max-likelihood
=======================================================================
When working with covariance estimation, the usual approach is to use
a maximum likelihood estimator, such as the
:class:`sklearn.covariance.EmpiricalCovariance`. It is unbiased, i.e. it
converges to the true (population) covariance when given many
observations. However, it can also be beneficial to regularize it, in
order to reduce its variance; this, in turn, introduces some bias. This
example illustrates the simple regularization used in
:ref:`shrunk_covariance` estimators. In particular, it focuses on how to
set the amount of regularization, i.e. how to choose the bias-variance
trade-off.
Here we compare 3 approaches:
* Setting the parameter by cross-validating the likelihood on three folds
according to a grid of potential shrinkage parameters.
* A close formula proposed by Ledoit and Wolf to compute
the asymptotically optimal regularization parameter (minimizing a MSE
criterion), yielding the :class:`sklearn.covariance.LedoitWolf`
covariance estimate.
* An improvement of the Ledoit-Wolf shrinkage, the
:class:`sklearn.covariance.OAS`, proposed by Chen et al. Its
convergence is significantly better under the assumption that the data
are Gaussian, in particular for small samples.
To quantify estimation error, we plot the likelihood of unseen data for
different values of the shrinkage parameter. We also show the choices by
cross-validation, or with the LedoitWolf and OAS estimates.
Note that the maximum likelihood estimate corresponds to no shrinkage,
and thus performs poorly. The Ledoit-Wolf estimate performs really well,
as it is close to the optimal and is computational not costly. In this
example, the OAS estimate is a bit further away. Interestingly, both
approaches outperform cross-validation, which is significantly most
computationally costly.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from scipy import linalg
from sklearn.covariance import LedoitWolf, OAS, ShrunkCovariance, \
log_likelihood, empirical_covariance
from sklearn.grid_search import GridSearchCV
###############################################################################
# Generate sample data
n_features, n_samples = 40, 20
np.random.seed(42)
base_X_train = np.random.normal(size=(n_samples, n_features))
base_X_test = np.random.normal(size=(n_samples, n_features))
# Color samples
coloring_matrix = np.random.normal(size=(n_features, n_features))
X_train = np.dot(base_X_train, coloring_matrix)
X_test = np.dot(base_X_test, coloring_matrix)
###############################################################################
# Compute the likelihood on test data
# spanning a range of possible shrinkage coefficient values
shrinkages = np.logspace(-2, 0, 30)
negative_logliks = [-ShrunkCovariance(shrinkage=s).fit(X_train).score(X_test)
for s in shrinkages]
# under the ground-truth model, which we would not have access to in real
# settings
real_cov = np.dot(coloring_matrix.T, coloring_matrix)
emp_cov = empirical_covariance(X_train)
loglik_real = -log_likelihood(emp_cov, linalg.inv(real_cov))
###############################################################################
# Compare different approaches to setting the parameter
# GridSearch for an optimal shrinkage coefficient
tuned_parameters = [{'shrinkage': shrinkages}]
cv = GridSearchCV(ShrunkCovariance(), tuned_parameters)
cv.fit(X_train)
# Ledoit-Wolf optimal shrinkage coefficient estimate
lw = LedoitWolf()
loglik_lw = lw.fit(X_train).score(X_test)
# OAS coefficient estimate
oa = OAS()
loglik_oa = oa.fit(X_train).score(X_test)
###############################################################################
# Plot results
fig = plt.figure()
plt.title("Regularized covariance: likelihood and shrinkage coefficient")
plt.xlabel('Regularizaton parameter: shrinkage coefficient')
plt.ylabel('Error: negative log-likelihood on test data')
# range shrinkage curve
plt.loglog(shrinkages, negative_logliks, label="Negative log-likelihood")
plt.plot(plt.xlim(), 2 * [loglik_real], '--r',
label="Real covariance likelihood")
# adjust view
lik_max = np.amax(negative_logliks)
lik_min = np.amin(negative_logliks)
ymin = lik_min - 6. * np.log((plt.ylim()[1] - plt.ylim()[0]))
ymax = lik_max + 10. * np.log(lik_max - lik_min)
xmin = shrinkages[0]
xmax = shrinkages[-1]
# LW likelihood
plt.vlines(lw.shrinkage_, ymin, -loglik_lw, color='magenta',
linewidth=3, label='Ledoit-Wolf estimate')
# OAS likelihood
plt.vlines(oa.shrinkage_, ymin, -loglik_oa, color='purple',
linewidth=3, label='OAS estimate')
# best CV estimator likelihood
plt.vlines(cv.best_estimator_.shrinkage, ymin,
-cv.best_estimator_.score(X_test), color='cyan',
linewidth=3, label='Cross-validation best estimate')
plt.ylim(ymin, ymax)
plt.xlim(xmin, xmax)
plt.legend()
plt.show()
| bsd-3-clause |
noslenfa/tdjangorest | uw/lib/python2.7/site-packages/IPython/kernel/zmq/pylab/backend_inline.py | 2 | 8288 | """Produce SVG versions of active plots for display by the rich Qt frontend.
"""
#-----------------------------------------------------------------------------
# Imports
#-----------------------------------------------------------------------------
from __future__ import print_function
# Third-party imports
import matplotlib
from matplotlib.backends.backend_agg import new_figure_manager, FigureCanvasAgg
from matplotlib._pylab_helpers import Gcf
# Local imports.
from IPython.config.configurable import SingletonConfigurable
from IPython.core.display import display
from IPython.core.displaypub import publish_display_data
from IPython.core.pylabtools import print_figure, select_figure_format
from IPython.utils.traitlets import Dict, Instance, CaselessStrEnum, Bool
from IPython.utils.warn import warn
#-----------------------------------------------------------------------------
# Configurable for inline backend options
#-----------------------------------------------------------------------------
# inherit from InlineBackendConfig for deprecation purposes
class InlineBackendConfig(SingletonConfigurable):
pass
class InlineBackend(InlineBackendConfig):
"""An object to store configuration of the inline backend."""
def _config_changed(self, name, old, new):
# warn on change of renamed config section
if new.InlineBackendConfig != old.InlineBackendConfig:
warn("InlineBackendConfig has been renamed to InlineBackend")
super(InlineBackend, self)._config_changed(name, old, new)
# The typical default figure size is too large for inline use,
# so we shrink the figure size to 6x4, and tweak fonts to
# make that fit.
rc = Dict({'figure.figsize': (6.0,4.0),
# play nicely with white background in the Qt and notebook frontend
'figure.facecolor': 'white',
'figure.edgecolor': 'white',
# 12pt labels get cutoff on 6x4 logplots, so use 10pt.
'font.size': 10,
# 72 dpi matches SVG/qtconsole
# this only affects PNG export, as SVG has no dpi setting
'savefig.dpi': 72,
# 10pt still needs a little more room on the xlabel:
'figure.subplot.bottom' : .125
}, config=True,
help="""Subset of matplotlib rcParams that should be different for the
inline backend."""
)
figure_format = CaselessStrEnum(['svg', 'png', 'retina'], default_value='png', config=True,
help="The image format for figures with the inline backend.")
def _figure_format_changed(self, name, old, new):
if self.shell is None:
return
else:
select_figure_format(self.shell, new)
close_figures = Bool(True, config=True,
help="""Close all figures at the end of each cell.
When True, ensures that each cell starts with no active figures, but it
also means that one must keep track of references in order to edit or
redraw figures in subsequent cells. This mode is ideal for the notebook,
where residual plots from other cells might be surprising.
When False, one must call figure() to create new figures. This means
that gcf() and getfigs() can reference figures created in other cells,
and the active figure can continue to be edited with pylab/pyplot
methods that reference the current active figure. This mode facilitates
iterative editing of figures, and behaves most consistently with
other matplotlib backends, but figure barriers between cells must
be explicit.
""")
shell = Instance('IPython.core.interactiveshell.InteractiveShellABC')
#-----------------------------------------------------------------------------
# Functions
#-----------------------------------------------------------------------------
def show(close=None):
"""Show all figures as SVG/PNG payloads sent to the IPython clients.
Parameters
----------
close : bool, optional
If true, a ``plt.close('all')`` call is automatically issued after
sending all the figures. If this is set, the figures will entirely
removed from the internal list of figures.
"""
if close is None:
close = InlineBackend.instance().close_figures
try:
for figure_manager in Gcf.get_all_fig_managers():
display(figure_manager.canvas.figure)
finally:
show._to_draw = []
if close:
matplotlib.pyplot.close('all')
# This flag will be reset by draw_if_interactive when called
show._draw_called = False
# list of figures to draw when flush_figures is called
show._to_draw = []
def draw_if_interactive():
"""
Is called after every pylab drawing command
"""
# signal that the current active figure should be sent at the end of
# execution. Also sets the _draw_called flag, signaling that there will be
# something to send. At the end of the code execution, a separate call to
# flush_figures() will act upon these values
manager = Gcf.get_active()
if manager is None:
return
fig = manager.canvas.figure
# Hack: matplotlib FigureManager objects in interacive backends (at least
# in some of them) monkeypatch the figure object and add a .show() method
# to it. This applies the same monkeypatch in order to support user code
# that might expect `.show()` to be part of the official API of figure
# objects.
# For further reference:
# https://github.com/ipython/ipython/issues/1612
# https://github.com/matplotlib/matplotlib/issues/835
if not hasattr(fig, 'show'):
# Queue up `fig` for display
fig.show = lambda *a: display(fig)
# If matplotlib was manually set to non-interactive mode, this function
# should be a no-op (otherwise we'll generate duplicate plots, since a user
# who set ioff() manually expects to make separate draw/show calls).
if not matplotlib.is_interactive():
return
# ensure current figure will be drawn, and each subsequent call
# of draw_if_interactive() moves the active figure to ensure it is
# drawn last
try:
show._to_draw.remove(fig)
except ValueError:
# ensure it only appears in the draw list once
pass
# Queue up the figure for drawing in next show() call
show._to_draw.append(fig)
show._draw_called = True
def flush_figures():
"""Send all figures that changed
This is meant to be called automatically and will call show() if, during
prior code execution, there had been any calls to draw_if_interactive.
This function is meant to be used as a post_execute callback in IPython,
so user-caused errors are handled with showtraceback() instead of being
allowed to raise. If this function is not called from within IPython,
then these exceptions will raise.
"""
if not show._draw_called:
return
if InlineBackend.instance().close_figures:
# ignore the tracking, just draw and close all figures
try:
return show(True)
except Exception as e:
# safely show traceback if in IPython, else raise
try:
get_ipython
except NameError:
raise e
else:
get_ipython().showtraceback()
return
try:
# exclude any figures that were closed:
active = set([fm.canvas.figure for fm in Gcf.get_all_fig_managers()])
for fig in [ fig for fig in show._to_draw if fig in active ]:
try:
display(fig)
except Exception as e:
# safely show traceback if in IPython, else raise
try:
get_ipython
except NameError:
raise e
else:
get_ipython().showtraceback()
break
finally:
# clear flags for next round
show._to_draw = []
show._draw_called = False
# Changes to matplotlib in version 1.2 requires a mpl backend to supply a default
# figurecanvas. This is set here to a Agg canvas
# See https://github.com/matplotlib/matplotlib/pull/1125
FigureCanvas = FigureCanvasAgg
| apache-2.0 |
hyflashstar/gupiao | src/交易策略模拟.py | 1 | 4692 | # -*- coding: utf-8 -*-
"""
Created on Fri Aug 25 15:05:53 2017
@author: 53771
"""
import loadStock as ls
import PairTrading as pairTrading
import tushare as ts
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
sz50s=ts.get_sz50s()
#sz50s=sz50s[0:2]
Close=pd.DataFrame()
#Close.index=c000001.index
for index,row in sz50s.iterrows():
data=ls.read_hit_data(row['code'])
#Close.index=data.index
Close[row['code']]=data['close']
formPeriod='2015-01-01:2016-01-01'
tradePeriod='2016-01-01:2017-01-01'
priceA=Close['601288']
priceB=Close['601398']
priceAf=priceA[formPeriod.split(':')[0]:formPeriod.split(':')[1]]
priceBf=priceB[formPeriod.split(':')[0]:formPeriod.split(':')[1]]
priceAt=priceA[tradePeriod.split(':')[0]:tradePeriod.split(':')[1]]
priceBt=priceB[tradePeriod.split(':')[0]:tradePeriod.split(':')[1]]
pt=pairTrading.PairTrading()
alpha,beta=pt.Cointegration(priceAf,priceBf)
spreadf=pt.CointegrationSpread(priceA,priceB,formPeriod,formPeriod)
mu=np.mean(spreadf)
sd=np.std(spreadf)
CoSpreadT=np.log(priceBt)-beta*np.log(priceAt)-alpha
CoSpreadT.plot()
plt.title('交易期价差序列(协整配对)')
plt.axhline(y=mu,color='black')
plt.axhline(y=mu+0.2*sd,color='blue',ls='-',lw=2)
plt.axhline(y=mu-0.2*sd,color='blue',ls='-',lw=2)
plt.axhline(y=mu+1.5*sd,color='green',ls='--',lw=2.5)
plt.axhline(y=mu-1.5*sd,color='green',ls='--',lw=2.5)
plt.axhline(y=mu+2.5*sd,color='red',ls="-.",lw=3)
plt.axhline(y=mu-2.5*sd,color='red',ls="-.",lw=3)
level=(float('-inf'),mu-2.5*sd,mu-1.5*sd,mu-0.2*sd,mu+0.2*sd,mu+1.5*sd,mu+2.5*sd,float('inf'))
prcLevel=pd.cut(CoSpreadT,level,labels=False)-3
#prcLevel.plot()
def TradeSig(prcLevel):
n=len(prcLevel)
signal=np.zeros(n)
for i in range(1,n):
if prcLevel[i-1]==1 and prcLevel[i]==2:#上穿建
signal[i]=1
elif prcLevel[i-1]==-1 and prcLevel[i]==-2:#下穿建
signal[i]=-1
elif prcLevel[i-1]==1 and prcLevel[i]<1:#平仓线
signal[i]=2
elif prcLevel[i-1]==-1 and prcLevel[i]>-1:#下平仓
signal[i]=-2
elif prcLevel[i-1]<=2 and prcLevel[i]>2:#关系脱离平仓
signal[i]=3
elif prcLevel[i-1]>=-2 and prcLevel[i]<-2:#关系脱离平仓
signal[i]=-3
return(signal)
signal=TradeSig(prcLevel)
ns=len(signal)
position=[signal[0]]
for i in range(1,ns):
position.append(position[-1])
if signal[i]==1:
position[i]=1
elif signal[i]==-1:
position[i]=-1
elif signal[i]==2 and position[i-1]==1:
position[i]=0
elif signal[i]==-2 and position[i-1]==-1:
position[i]=0
elif signal[i]==3:
position[i]=0
elif signal[i]==-3:
position[i]=0
position=pd.Series(position,index=CoSpreadT.index)
#A股无法做空,所以只能在价值高估的时候卖出,价值低估时买入,这要求操作的股票要在恒定价值内波动
def TradeSim(priceX,priceY,position):
n=len(position)
shareY=pd.Series(np.zeros(n),index=position.index)
shareX=pd.Series(np.zeros(n),index=position.index)
cash=[2000]
for i in range(1,n):
shareX[i]=(shareX[i-1])
shareY[i]=(shareY[i-1])
cash.append(cash[i-1])
if position[i-1]==0 and position[i]==1:#卖出X,买入Y
shareX[i]=0
shareY[i]=(cash[i-1]+((shareX[i-1]-shareX[i])*priceX[i]))/priceY[i]
cash[i]=cash[i-1]-(shareY[i]*priceY[i]+shareX[i]*priceX[i])
elif position[i-1]==0 and position[i]==-1:#买入X,卖出Y
shareY[i]=0
shareX[i]=(cash[i-1]+((shareY[i-1]-shareY[i])*priceY[i]))/priceX[i]
cash[i]=cash[i-1]-(shareY[i]*priceY[i]+shareX[i]*priceX[i])
elif position[i-1]==1 and position[i]==0:
shareX[i]=0
shareY[i]=0
cash[i]=cash[i-1]+(shareY[i-1]*priceY[i]+shareX[i-1]*priceX[i])
elif position[i-1]==-1 and position[i]==0:
shareX[i]=0
shareY[i]=0
cash[i]=cash[i-1]+(shareY[i-1]*priceY[i]+shareX[i-1]*priceX[i])
cash=pd.Series(cash,index=position.index)
asset=cash+shareY*priceY+shareX*priceX
account=pd.DataFrame({'Position':position,'ShareY':shareY,'ShareX':shareX,
'Cash':cash,'Asset':asset})
return(account)
#根据A股不能做空的方式,重新拟定交易策略
'''
1.按均衡方式持股 0.5 0.5的比例方式
2然后在价差出现较大变化时进行仓位调整,处理掉估值较高的股票
3.在价位回复正常后,重新按仓位持
'''
account=TradeSim(priceAt,priceBt,position)
account.iloc[:,[0,1,3,4]].plot(style=['--','-',':'])
| apache-2.0 |
appapantula/scikit-learn | examples/model_selection/plot_validation_curve.py | 229 | 1823 | """
==========================
Plotting Validation Curves
==========================
In this plot you can see the training scores and validation scores of an SVM
for different values of the kernel parameter gamma. For very low values of
gamma, you can see that both the training score and the validation score are
low. This is called underfitting. Medium values of gamma will result in high
values for both scores, i.e. the classifier is performing fairly well. If gamma
is too high, the classifier will overfit, which means that the training score
is good but the validation score is poor.
"""
print(__doc__)
import matplotlib.pyplot as plt
import numpy as np
from sklearn.datasets import load_digits
from sklearn.svm import SVC
from sklearn.learning_curve import validation_curve
digits = load_digits()
X, y = digits.data, digits.target
param_range = np.logspace(-6, -1, 5)
train_scores, test_scores = validation_curve(
SVC(), X, y, param_name="gamma", param_range=param_range,
cv=10, scoring="accuracy", n_jobs=1)
train_scores_mean = np.mean(train_scores, axis=1)
train_scores_std = np.std(train_scores, axis=1)
test_scores_mean = np.mean(test_scores, axis=1)
test_scores_std = np.std(test_scores, axis=1)
plt.title("Validation Curve with SVM")
plt.xlabel("$\gamma$")
plt.ylabel("Score")
plt.ylim(0.0, 1.1)
plt.semilogx(param_range, train_scores_mean, label="Training score", color="r")
plt.fill_between(param_range, train_scores_mean - train_scores_std,
train_scores_mean + train_scores_std, alpha=0.2, color="r")
plt.semilogx(param_range, test_scores_mean, label="Cross-validation score",
color="g")
plt.fill_between(param_range, test_scores_mean - test_scores_std,
test_scores_mean + test_scores_std, alpha=0.2, color="g")
plt.legend(loc="best")
plt.show()
| bsd-3-clause |
gautamkmr/incubator-mxnet | docs/mxdoc.py | 7 | 12702 | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""A sphnix-doc plugin to build mxnet docs"""
import subprocess
import re
import os
import json
import sys
from recommonmark import transform
import pypandoc
import StringIO
import contextlib
# white list to evaluate the code block output, such as ['tutorials/gluon']
_EVAL_WHILTELIST = []
# start or end of a code block
_CODE_MARK = re.compile('^([ ]*)```([\w]*)')
# language names and the according file extensions and comment symbol
_LANGS = {'python' : ('py', '#'),
'r' : ('R','#'),
'scala' : ('scala', '#'),
'julia' : ('jl', '#'),
'perl' : ('pl', '#'),
'cpp' : ('cc', '//'),
'bash' : ('sh', '#')}
_LANG_SELECTION_MARK = 'INSERT SELECTION BUTTONS'
_SRC_DOWNLOAD_MARK = 'INSERT SOURCE DOWNLOAD BUTTONS'
def _run_cmd(cmds):
"""Run commands, raise exception if failed"""
if not isinstance(cmds, str):
cmds = "".join(cmds)
print("Execute \"%s\"" % cmds)
try:
subprocess.check_call(cmds, shell=True)
except subprocess.CalledProcessError as err:
print(err)
raise err
def generate_doxygen(app):
"""Run the doxygen make commands"""
_run_cmd("cd %s/.. && make doxygen" % app.builder.srcdir)
_run_cmd("cp -rf doxygen/html %s/doxygen" % app.builder.outdir)
def build_mxnet(app):
"""Build mxnet .so lib"""
_run_cmd("cd %s/.. && cp make/config.mk config.mk && make -j$(nproc) DEBUG=1" %
app.builder.srcdir)
def build_r_docs(app):
"""build r pdf"""
r_root = app.builder.srcdir + '/../R-package'
pdf_path = root_path + '/docs/api/r/mxnet-r-reference-manual.pdf'
_run_cmd('cd ' + r_root +
'; R -e "roxygen2::roxygenize()"; R CMD Rd2pdf . --no-preview -o ' + pdf_path)
dest_path = app.builder.outdir + '/api/r/'
_run_cmd('mkdir -p ' + dest_path + '; mv ' + pdf_path + ' ' + dest_path)
def build_scala_docs(app):
"""build scala doc and then move the outdir"""
scala_path = app.builder.srcdir + '/../scala-package/core/src/main/scala/ml/dmlc/mxnet'
# scaldoc fails on some apis, so exit 0 to pass the check
_run_cmd('cd ' + scala_path + '; scaladoc `find . | grep .*scala`; exit 0')
dest_path = app.builder.outdir + '/api/scala/docs'
_run_cmd('rm -rf ' + dest_path)
_run_cmd('mkdir -p ' + dest_path)
scaladocs = ['index', 'index.html', 'ml', 'lib', 'index.js', 'package.html']
for doc_file in scaladocs:
_run_cmd('cd ' + scala_path + ' && mv -f ' + doc_file + ' ' + dest_path)
def _convert_md_table_to_rst(table):
"""Convert a markdown table to rst format"""
if len(table) < 3:
return ''
out = '```eval_rst\n.. list-table::\n :header-rows: 1\n\n'
for i,l in enumerate(table):
cols = l.split('|')[1:-1]
if i == 0:
ncol = len(cols)
else:
if len(cols) != ncol:
return ''
if i == 1:
for c in cols:
if len(c) is not 0 and '---' not in c:
return ''
else:
for j,c in enumerate(cols):
out += ' * - ' if j == 0 else ' - '
out += pypandoc.convert_text(
c, 'rst', format='md').replace('\n', ' ').replace('\r', '') + '\n'
out += '```\n'
return out
def convert_table(app, docname, source):
"""Find tables in a markdown and then convert them into the rst format"""
num_tables = 0
for i,j in enumerate(source):
table = []
output = ''
in_table = False
for l in j.split('\n'):
r = l.strip()
if r.startswith('|'):
table.append(r)
in_table = True
else:
if in_table is True:
converted = _convert_md_table_to_rst(table)
if converted is '':
print("Failed to convert the markdown table")
print(table)
else:
num_tables += 1
output += converted
in_table = False
table = []
output += l + '\n'
source[i] = output
if num_tables > 0:
print('Converted %d tables in %s' % (num_tables, docname))
def _parse_code_lines(lines):
"""A iterator that returns if a line is within a code block
Returns
-------
iterator of (str, bool, str, int)
- line: the line
- in_code: if this line is in a code block
- lang: the code block langunage
- indent: the code indent
"""
in_code = False
lang = None
indent = None
for l in lines:
m = _CODE_MARK.match(l)
if m is not None:
if not in_code:
if m.groups()[1].lower() in _LANGS:
lang = m.groups()[1].lower()
indent = len(m.groups()[0])
in_code = True
yield (l, in_code, lang, indent)
else:
yield (l, in_code, lang, indent)
lang = None
indent = None
in_code = False
else:
yield (l, in_code, lang, indent)
def _get_lang_selection_btn(langs):
active = True
btngroup = '<div class="text-center">\n<div class="btn-group opt-group" role="group">'
for l in langs:
btngroup += '<button type="button" class="btn btn-default opt %s">%s</button>\n' % (
'active' if active else '', l[0].upper()+l[1:].lower())
active = False
btngroup += '</div>\n</div> <script type="text/javascript" src="../../_static/js/options.js"></script>'
return btngroup
def _get_blocks(lines):
"""split lines into code and non-code blocks
Returns
-------
iterator of (bool, str, list of str)
- if it is a code block
- source language
- lines of source
"""
cur_block = []
pre_lang = None
pre_in_code = None
for (l, in_code, cur_lang, _) in _parse_code_lines(lines):
if in_code != pre_in_code:
if pre_in_code and len(cur_block) >= 2:
cur_block = cur_block[1:-1] # remove ```
# remove empty lines at head
while len(cur_block) > 0:
if len(cur_block[0]) == 0:
cur_block.pop(0)
else:
break
# remove empty lines at tail
while len(cur_block) > 0:
if len(cur_block[-1]) == 0:
cur_block.pop()
else:
break
if len(cur_block):
yield (pre_in_code, pre_lang, cur_block)
cur_block = []
cur_block.append(l)
pre_lang = cur_lang
pre_in_code = in_code
if len(cur_block):
yield (pre_in_code, pre_lang, cur_block)
def _get_mk_code_block(src, lang):
"""Return a markdown code block
E.g.
```python
import mxnet
````
"""
if lang is None:
lang = ''
return '```'+lang+'\n'+src.rstrip()+'\n'+'```\n'
@contextlib.contextmanager
def _string_io():
oldout = sys.stdout
olderr = sys.stderr
strio = StringIO.StringIO()
sys.stdout = strio
sys.stderr = strio
yield strio
sys.stdout = oldout
sys.stderr = olderr
def _get_python_block_output(src, global_dict, local_dict):
"""Evaluate python source codes
Returns
(bool, str):
- True if success
- output
"""
src = '\n'.join([l for l in src.split('\n')
if not l.startswith('%') and not 'plt.show()' in l])
ret_status = True
err = ''
with _string_io() as s:
try:
exec(src, global_dict, global_dict)
except Exception as e:
err = str(e)
ret_status = False
return (ret_status, s.getvalue()+err)
def _get_jupyter_notebook(lang, lines):
cells = []
for in_code, blk_lang, lines in _get_blocks(lines):
if blk_lang != lang:
in_code = False
src = '\n'.join(lines)
cell = {
"cell_type": "code" if in_code else "markdown",
"metadata": {},
"source": src
}
if in_code:
cell.update({
"outputs": [],
"execution_count": None,
})
cells.append(cell)
ipynb = {"nbformat" : 4,
"nbformat_minor" : 2,
"metadata" : {"language":lang, "display_name":'', "name":''},
"cells" : cells}
return ipynb
def _get_source(lang, lines):
cmt = _LANGS[lang][1] + ' '
out = []
for in_code, lines in _get_blocks(lang, lines):
if in_code:
out.append('')
for l in lines:
if in_code:
if '%matplotlib' not in l:
out.append(l)
else:
if ('<div>' in l or '</div>' in l or
'<script>' in l or '</script>' in l or
'<!--' in l or '-->' in l or
'%matplotlib' in l ):
continue
out.append(cmt+l)
if in_code:
out.append('')
return out
def _get_src_download_btn(out_prefix, langs, lines):
btn = '<div class="btn-group" role="group">\n'
for lang in langs:
ipynb = out_prefix
if lang == 'python':
ipynb += '.ipynb'
else:
ipynb += '_' + lang + '.ipynb'
with open(ipynb, 'w') as f:
json.dump(_get_jupyter_notebook(lang, lines), f)
f = ipynb.split('/')[-1]
btn += '<div class="download_btn"><a href="%s" download="%s">' \
'<span class="glyphicon glyphicon-download-alt"></span> %s</a></div>' % (f, f, f)
btn += '</div>\n'
return btn
def add_buttons(app, docname, source):
out_prefix = app.builder.outdir + '/' + docname
dirname = os.path.dirname(out_prefix)
if not os.path.exists(dirname):
os.makedirs(dirname)
for i,j in enumerate(source):
local_dict = {}
global_dict = {}
lines = j.split('\n')
langs = set([l for (_, _, l, _) in _parse_code_lines(lines)
if l is not None and l in _LANGS])
# first convert
for k,l in enumerate(lines):
if _SRC_DOWNLOAD_MARK in l:
lines[k] = _get_src_download_btn(
out_prefix, langs, lines)
# # then add lang buttons
# for k,l in enumerate(lines):
# if _LANG_SELECTION_MARK in l:
# lines[k] = _get_lang_selection_btn(langs)
output = ''
for in_code, lang, lines in _get_blocks(lines):
src = '\n'.join(lines)+'\n'
if in_code:
output += _get_mk_code_block(src, lang)
if lang == 'python' and any([w in docname for w in _EVAL_WHILTELIST]):
status, blk_out = _get_python_block_output(src, global_dict, local_dict)
if len(blk_out):
output += '<div class=\"cell-results-header\">Output:</div>\n\n'
output += _get_mk_code_block(blk_out, 'results')
else:
output += src
source[i] = output
# source[i] = '\n'.join(lines)
def setup(app):
app.connect("builder-inited", build_mxnet)
app.connect("builder-inited", generate_doxygen)
app.connect("builder-inited", build_scala_docs)
# skipped to build r, it requires to install latex, which is kinds of too heavy
# app.connect("builder-inited", build_r_docs)
app.connect('source-read', convert_table)
app.connect('source-read', add_buttons)
app.add_config_value('recommonmark_config', {
'url_resolver': lambda url: 'http://mxnet.io/' + url,
'enable_eval_rst': True,
}, True)
app.add_transform(transform.AutoStructify)
| apache-2.0 |
sangwook236/sangwook-library | python/test/language_processing/draw_character_distribution.py | 2 | 8108 | #!/usr/bin/env python
# -*- coding: UTF-8 -*-
import sys
sys.path.append('../../src')
import os, math
import numpy as np
import scipy.stats
from PIL import Image, ImageDraw, ImageFont
import matplotlib.pyplot as plt
import cv2
def draw_ellipse_on_character():
if 'posix' == os.name:
font_base_dir_path = '/home/sangwook/work/font'
else:
font_base_dir_path = '/work/font'
font_dir_path = font_base_dir_path + '/kor'
font_type = font_dir_path + '/gulim.ttf'
font_index = 0
font_size = 32
text_offset = (0, 0)
draw_text_border, crop_text_area = False, False
font_color, bg_color = 255, 0
font = ImageFont.truetype(font=font_type, size=font_size, index=font_index)
import string
#text = string.ascii_letters
text = '가나다라마바사자차카타파하'
image_size = font.getsize(text)
#image_size = (math.ceil(len(text) * font_size * 1.1), math.ceil((text.count('\n') + 1) * font_size * 1.1))
img = Image.new(mode='L', size=image_size, color=bg_color)
draw = ImageDraw.Draw(img)
# Draws text.
draw.text(xy=text_offset, text=text, font=font, fill=font_color)
if draw_text_border or crop_text_area:
#text_size = font.getsize(text) # (width, height). This is erroneous for multiline text.
text_size = draw.textsize(text, font=font) # (width, height).
font_offset = font.getoffset(text) # (x, y).
text_rect = (text_offset[0], text_offset[1], text_offset[0] + text_size[0] + font_offset[0], text_offset[1] + text_size[1] + font_offset[1])
# Draws a rectangle surrounding text.
if draw_text_border:
draw.rectangle(text_rect, outline='red', width=5)
# Crops text area.
if crop_text_area:
img = img.crop(text_rect)
rgb = cv2.cvtColor(np.array(img), cv2.COLOR_GRAY2BGR)
offset = np.array(text_offset)
for ch in text:
#ch_size = font.getsize(ch) # (width, height). This is erroneous for multiline text.
ch_size = draw.textsize(ch, font=font) # (width, height).
font_offset = font.getoffset(ch) # (x, y).
text_rect = (offset[0], offset[1], offset[0] + ch_size[0] + font_offset[0], offset[1] + ch_size[1] + font_offset[1])
if False:
center = (text_rect[0] + text_rect[2]) / 2, (text_rect[1] + text_rect[3]) / 2
axis = (text_rect[2] - text_rect[0], text_rect[3] - text_rect[1])
cv2.ellipse(rgb, (center, axis, 0), (0, 0, 255), 1, cv2.LINE_AA)
elif False:
pts = cv2.findNonZero(np.array(img)[text_rect[1]:text_rect[3],text_rect[0]:text_rect[2]]) + offset
obb = cv2.minAreaRect(pts)
cv2.ellipse(rgb, obb, (0, 0, 255), 1, cv2.LINE_AA)
elif True:
try:
pts = cv2.findNonZero(np.array(img)[text_rect[1]:text_rect[3],text_rect[0]:text_rect[2]])
pts = np.squeeze(pts, axis=1)
center = np.mean(pts, axis=0)
size = np.max(pts, axis=0) - np.min(pts, axis=0)
pts = pts - center # Centering.
u, s, vh = np.linalg.svd(pts, full_matrices=True)
angle = math.degrees(math.atan2(vh[0,1], vh[0,0]))
#obb = (center + offset, s * max(size) / max(s), angle)
obb = (center + offset, s * math.sqrt((size[0] * size[0] + size[1] * size[1]) / (s[0] * s[0] + s[1] * s[1])), angle)
cv2.ellipse(rgb, obb, (0, 255, 0), 1, cv2.LINE_AA)
except np.linalg.LinAlgError:
print('np.linalg.LinAlgError raised.')
raise
offset[0] = text_rect[2]
cv2.imshow('Ellipse', rgb)
cv2.imshow('Text', np.array(img))
cv2.waitKey(0)
cv2.destroyAllWindows()
def draw_normal_distribution_on_character():
if 'posix' == os.name:
font_base_dir_path = '/home/sangwook/work/font'
else:
font_base_dir_path = '/work/font'
font_dir_path = font_base_dir_path + '/kor'
font_type = font_dir_path + '/gulim.ttf'
font_index = 0
font_size = 32
font_color, bg_color = 255, 0
text_offset = (0, 0)
draw_text_border, crop_text_area = False, False
font = ImageFont.truetype(font=font_type, size=font_size, index=font_index)
import string
text = string.ascii_letters
#text = '가나다라마바사자차카타파하'
image_size = font.getsize(text) # (width, height). This is erroneous for multiline text.
#image_size = (math.ceil(len(text) * font_size * 1.1), math.ceil((text.count('\n') + 1) * font_size * 1.1))
img = Image.new(mode='L', size=image_size, color=bg_color)
draw = ImageDraw.Draw(img)
# Draws text.
draw.text(xy=text_offset, text=text, font=font, fill=font_color)
if draw_text_border or crop_text_area:
#text_size = font.getsize(text) # (width, height). This is erroneous for multiline text.
text_size = draw.textsize(text, font=font) # (width, height).
font_offset = font.getoffset(text) # (x, y).
text_rect = (text_offset[0], text_offset[1], text_offset[0] + text_size[0] + font_offset[0], text_offset[1] + text_size[1] + font_offset[1])
# Draws a rectangle surrounding text.
if draw_text_border:
draw.rectangle(text_rect, outline='red', width=5)
# Crops text area.
if crop_text_area:
img = img.crop(text_rect)
#x, y = np.mgrid[0:img.size[0], 0:img.size[1]]
x, y = np.mgrid[0:img.size[0]:0.5, 0:img.size[1]:0.5]
pos = np.dstack((x, y))
text_pdf_unnormalized = np.zeros(x.shape, dtype=np.float32)
offset = np.array(text_offset)
for ch in text:
#char_size = font.getsize(ch) # (width, height). This is erroneous for multiline text.
char_size = draw.textsize(ch, font=font) # (width, height).
font_offset = font.getoffset(ch) # (x, y).
text_rect = (offset[0], offset[1], offset[0] + char_size[0] + font_offset[0], offset[1] + char_size[1] + font_offset[1])
if True:
pts = cv2.findNonZero(np.array(img)[text_rect[1]:text_rect[3],text_rect[0]:text_rect[2]]) + offset
center, axis, angle = cv2.minAreaRect(pts)
angle = math.radians(angle)
elif False:
try:
pts = cv2.findNonZero(np.array(img)[text_rect[1]:text_rect[3],text_rect[0]:text_rect[2]])
pts = np.squeeze(pts, axis=1)
center = np.mean(pts, axis=0)
size = np.max(pts, axis=0) - np.min(pts, axis=0)
pts = pts - center # Centering.
u, s, vh = np.linalg.svd(pts, full_matrices=True)
center = center + offset
#axis = s * max(size) / max(s)
axis = s * math.sqrt((size[0] * size[0] + size[1] * size[1]) / (s[0] * s[0] + s[1] * s[1]))
angle = math.atan2(vh[0,1], vh[0,0])
except np.linalg.LinAlgError:
print('np.linalg.LinAlgError raised.')
raise
cos_theta, sin_theta = math.cos(angle), math.sin(angle)
R = np.array([[cos_theta, -sin_theta], [sin_theta, cos_theta]])
# TODO [decide] >> Which one is better?
if True:
cov = np.diag(np.array(axis)) # 1 * sigma.
else:
cov = np.diag(np.array(axis) * 2) # 2 * sigma.
cov = np.matmul(R, np.matmul(cov, R.T))
rv = scipy.stats.multivariate_normal(center, cov)
# TODO [decide] >> Which one is better?
if False:
text_pdf_unnormalized += rv.pdf(pos)
else:
char_pdf = rv.pdf(pos)
text_pdf_unnormalized += char_pdf / np.max(char_pdf)
offset[0] = text_rect[2]
rotation_angle = 22.5
text_pdf_unnormalized = np.array(Image.fromarray(text_pdf_unnormalized.T).rotate(rotation_angle, expand=1)).T
img = img.rotate(rotation_angle, expand=1)
img = img.resize(text_pdf_unnormalized.shape, resample=Image.BICUBIC)
#x, y = np.mgrid[0:img.size[0], 0:img.size[1]]
#pos = np.dstack((x, y))
fig = plt.figure()
ax1 = fig.add_subplot(311)
ax2 = fig.add_subplot(312)
ax3 = fig.add_subplot(313)
ax1.imshow(img, cmap='gray', aspect='equal')
#ax2.contourf(x, y, text_pdf_unnormalized, cmap='Reds')
#ax2.set_aspect('equal')
ax2.imshow(text_pdf_unnormalized.T, cmap='Reds', aspect='equal')
#text_pdf_blended = 0.5 * text_pdf_unnormalized + 0.5 * np.array(img).T / 255
text_pdf_blended = 0.5 * text_pdf_unnormalized / np.max(text_pdf_unnormalized) + 0.5 * np.array(img).T / 255
#ax3.contourf(x, y, text_pdf_blended, cmap='gray')
#ax3.set_aspect('equal')
ax3.imshow(text_pdf_blended.T, cmap='gray', aspect='equal')
plt.show()
def transform_ellipse_projectively():
raise NotImplementedError
def main():
#draw_ellipse_on_character()
draw_normal_distribution_on_character()
#transform_ellipse_projectively() # Not yet implemented.
#--------------------------------------------------------------------
if '__main__' == __name__:
main()
| gpl-2.0 |
sebi06/BioFormatsRead | showZsurface.py | 1 | 2644 | # -*- coding: utf-8 -*-
"""
@author: Sebi
File: showZsurface.py
Date: 01.02.2019
Version. 0.4
"""
import bftools as bf
import matplotlib.pyplot as plt
import argparse
import sys
import os
# setup commandline parameters
parser = argparse.ArgumentParser(description='Read Filename and Parameters.')
parser.add_argument('-file', action="store", dest='filename')
parser.add_argument('-csv', action="store", dest='writecsv')
parser.add_argument('-sep', action="store", dest='separator')
parser.add_argument('-save', action="store", dest='savefigure')
parser.add_argument('-show', action="store", dest='showsurface')
parser.add_argument('-format', action="store", dest='saveformat')
# get the arguments
args = parser.parse_args()
# get the filename
filenameczi = args.filename
saveformat = args.saveformat
# get separator
separator = args.separator
if args.separator == 'tab':
separator = '\t'
elif args.separator == 'comma':
separator = ','
elif args.separator == 'semicolon':
separator = ';'
print('Write CSV Option : ', args.writecsv)
print('Separator : ', args.separator)
# get CSV write option
if args.writecsv == 'True':
wcsv = True
elif args.writecsv == 'False':
wcsv = False
# get save option
if args.savefigure == 'True':
save = True
elif args.savefigure == 'False':
save = False
# get show surface options
if args.showsurface == 'True':
surface = True
elif args.showsurface == 'False':
surface = False
# specify bioformats_package.jar to use if required
# Attention: for larger CZI tile images containing an image pyramid one must still use 5.1.10
# since the latest version is not fully supported by python-bioformats yet
bfpackage = r'bfpackage/5.1.10/bioformats_package.jar'
bf.set_bfpath(bfpackage)
# create plane info from CZI image file and write CSV file (optional)
planetable, filenamecsv = bf.get_planetable(filenameczi,
writecsv=wcsv,
separator=separator)
# show the dataframe
print(planetable[:10])
# define name for figure to be saved
figuresavename = os.path.splitext(filenamecsv)[0] + '_XYZ-Pos' + '.' + saveformat
# display the XYZ positions
fig1, fig2 = bf.scatterplot(planetable,
ImageID=0,
T=0,
CH=0,
Z=0,
size=250,
savefigure=save,
figsavename=figuresavename,
showsurface=surface)
# show the plot
plt.show()
print('Exiting ...')
os._exit(42)
| bsd-2-clause |
rueberger/MJHMC | mjhmc/misc/mixing.py | 1 | 4915 | import numpy as np
from scipy.linalg import eig
from mjhmc.samplers.algebraic_hmc import AlgebraicDiscrete, AlgebraicContinuous
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
from exceptions import RuntimeError
def get_eigs(sampler, order, steps=1000, energies=None):
"""Runs the sampler, returns the l1 normalized eigs
"""
hmc = sampler(order, energies=energies)
for _ in xrange(steps):
hmc.sampling_iteration()
t = hmc.get_transition_matrix()
return eig(t, left=True, right=False)
def mixing_times(H, trials=10):
"""runs the two samplers with the given energy a bunch of times
reports back their average mixing times
"""
order = len(H) * 2
c_tm = np.zeros(trials)
d_tm = np.zeros(trials)
for i in xrange(trials):
# todo: add reset methods
print "trial: {}".format(i)
hmc = AlgebraicDiscrete(order, energies=H)
chmc = AlgebraicContinuous(order, energies=H)
d_tm[i] = hmc.calculate_mixing_time()
c_tm[i] = chmc.calculate_mixing_time()
print "Average mixing time for discrete sampler: {}".format(np.mean(d_tm))
print "Average mixing time for continuous sampler: {}".format(np.mean(c_tm))
def test_sampler(sampler, H, steps=1000):
"""Runs the sampler on the given energy
Prints a bunch of statistics about how well it's doing
returns t_obs, distr_obs
"""
order = len(H) * 2
smp = sampler(order, energies=H)
smp.sample(steps)
t_obs = smp.get_transition_matrix()
print "Predicted distribution: {} \n".format(smp.prd_distr)
print "Observed distribution: {} \n".format(smp.get_distr())
print "Sampling error (L1): {} \n".format(smp.sampling_err())
print "Observed transition matrix: \n {} \n".format(t_obs)
print "Eigenspectrum of observed transition matrix: \n"
eigs = rectify_evecs(eig(t_obs, left=True, right=False))
pprint_eigs(eigs)
return t_obs, smp.get_distr()
def pprint_eigs(eigs):
"""eigs: output of linalg.eig
pretty prints the results
"""
for l, vec in zip(eigs[0], eigs[1]):
print "Eigenvalue: {} \n".format(l)
print "Eigenvector: {} \n".format(list(vec))
def rectify_evecs(eigs):
"""
eigs: output of linalg.eig
normalizes evecs by L1 norm, truncates small complex components,
ensures things are positive
"""
evecs = eigs[1].T
l1_norm = np.abs(evecs).sum(axis=1)
norm_evecs = evecs / l1_norm[:, np.newaxis]
real_evals = [np.around(np.real_if_close(l), decimals=5) for l in eigs[0]]
real_evecs = []
for v in norm_evecs:
real_v = np.real_if_close(v)
if (real_v < 0).all():
real_v *= -1
real_evecs.append(real_v)
# skip sorting for now: argsort is pain because numpy will typecase to complex arr
# desc_idx = np.argsort(real_evals)[::-1]
# return real_evals[desc_idx], real_evecs[desc_idx]
return real_evals, real_evecs
def calc_spectral_gaps(order, trials=1, n_sample_step=1000):
"""Approximates the spectral gap for each sampler at a certain order
returns avg_discrete_sg, discrete_sg_var, avg_continuous_sg, continuous_sg_var
"""
assert order % 2 == 0
# normally distributed?
H = np.random.randn(order / 2)
c_sg = np.zeros(trials)
h_sg = np.zeros(trials)
print "Order: {}".format(order)
for i in xrange(trials):
hmc = AlgebraicDiscrete(order, energies=H)
chmc = AlgebraicContinuous(order, energies=H)
# runs until close to equilibrium distribution
n_hmc = hmc.calculate_mixing_time()
n_chmc = chmc.calculate_mixing_time()
h_sg[i] = sg(hmc)
c_sg[i] = sg(chmc)
print "{} samplings steps for hmc to approach equilibirium".format(n_hmc)
print "{} samplings steps for chmc to approach equilibirium".format(n_chmc)
return np.mean(h_sg), np.std(h_sg), np.mean(c_sg), np.std(c_sg)
def sg(sampler):
"""returns the spectral gap
t: transition matrix
"""
while True:
try:
t = sampler.get_empirical_transition_matrix()
w,v = eig(t)
w_ord = np.sort(w)[::-1]
if np.around(np.real_if_close(w_ord[0]), decimals=5) != 1:
raise Exception("no eval with value 1")
return 1 - np.absolute(w_ord[1])
except RuntimeError:
sampler.sample(1000)
def plot_sgs(max_ord=100):
"""Saves a plot of spectral gap against order
"""
plt.clf()
plt.ion()
orders = np.arange(2, max_ord) * 2
sgs = [calc_spectral_gaps(o) for o in orders]
avg_h_sg, std_h_sg, avg_c_sg, std_c_sg = zip(*sgs)
plt.errorbar(orders, avg_h_sg, yerr=std_h_sg, label='Discrete sampler')
plt.errorbar(orders, avg_c_sg, yerr=std_c_sg, label='Continuous sampler')
plt.title("Spectral gaps on random gaussian state ladders")
plt.legend()
| gpl-2.0 |
ajdawson/colormaps | lib/colormaps/__init__.py | 1 | 1731 | """Colormap generation for matplotlib."""
# Copyright (c) 2012 Andrew Dawson
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
from __future__ import absolute_import
from .colormaps import (create_colormap,
register_colormap_base,
list_colormap_bases,
get_colormap_base_names,
get_colormap_base,
show_colormap,
ColormapBase,)
__all__ = ['create_colormap',
'register_colormap_base',
'list_colormap_bases',
'get_colormap_base_names',
'get_colormap_base',
'show_colormap',
'ColormapBase', ]
__version__ = '1.0.x'
| mit |
pnedunuri/scikit-learn | sklearn/utils/metaestimators.py | 283 | 2353 | """Utilities for meta-estimators"""
# Author: Joel Nothman
# Andreas Mueller
# Licence: BSD
from operator import attrgetter
from functools import update_wrapper
__all__ = ['if_delegate_has_method']
class _IffHasAttrDescriptor(object):
"""Implements a conditional property using the descriptor protocol.
Using this class to create a decorator will raise an ``AttributeError``
if the ``attribute_name`` is not present on the base object.
This allows ducktyping of the decorated method based on ``attribute_name``.
See https://docs.python.org/3/howto/descriptor.html for an explanation of
descriptors.
"""
def __init__(self, fn, attribute_name):
self.fn = fn
self.get_attribute = attrgetter(attribute_name)
# update the docstring of the descriptor
update_wrapper(self, fn)
def __get__(self, obj, type=None):
# raise an AttributeError if the attribute is not present on the object
if obj is not None:
# delegate only on instances, not the classes.
# this is to allow access to the docstrings.
self.get_attribute(obj)
# lambda, but not partial, allows help() to work with update_wrapper
out = lambda *args, **kwargs: self.fn(obj, *args, **kwargs)
# update the docstring of the returned function
update_wrapper(out, self.fn)
return out
def if_delegate_has_method(delegate):
"""Create a decorator for methods that are delegated to a sub-estimator
This enables ducktyping by hasattr returning True according to the
sub-estimator.
>>> from sklearn.utils.metaestimators import if_delegate_has_method
>>>
>>>
>>> class MetaEst(object):
... def __init__(self, sub_est):
... self.sub_est = sub_est
...
... @if_delegate_has_method(delegate='sub_est')
... def predict(self, X):
... return self.sub_est.predict(X)
...
>>> class HasPredict(object):
... def predict(self, X):
... return X.sum(axis=1)
...
>>> class HasNoPredict(object):
... pass
...
>>> hasattr(MetaEst(HasPredict()), 'predict')
True
>>> hasattr(MetaEst(HasNoPredict()), 'predict')
False
"""
return lambda fn: _IffHasAttrDescriptor(fn, '%s.%s' % (delegate, fn.__name__))
| bsd-3-clause |
MoamerEncsConcordiaCa/tensorflow | tensorflow/contrib/learn/python/learn/learn_io/data_feeder.py | 88 | 31139 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Implementations of different data feeders to provide data for TF trainer."""
# TODO(ipolosukhin): Replace this module with feed-dict queue runners & queues.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import itertools
import math
import numpy as np
import six
from six.moves import xrange # pylint: disable=redefined-builtin
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.platform import tf_logging as logging
# pylint: disable=g-multiple-import,g-bad-import-order
from .pandas_io import HAS_PANDAS, extract_pandas_data, extract_pandas_matrix, extract_pandas_labels
from .dask_io import HAS_DASK, extract_dask_data, extract_dask_labels
# pylint: enable=g-multiple-import,g-bad-import-order
def _get_in_out_shape(x_shape, y_shape, n_classes, batch_size=None):
"""Returns shape for input and output of the data feeder."""
x_is_dict, y_is_dict = isinstance(
x_shape, dict), y_shape is not None and isinstance(y_shape, dict)
if y_is_dict and n_classes is not None:
assert (isinstance(n_classes, dict))
if batch_size is None:
batch_size = list(x_shape.values())[0][0] if x_is_dict else x_shape[0]
elif batch_size <= 0:
raise ValueError('Invalid batch_size %d.' % batch_size)
if x_is_dict:
input_shape = {}
for k, v in list(x_shape.items()):
input_shape[k] = [batch_size] + (list(v[1:]) if len(v) > 1 else [1])
else:
x_shape = list(x_shape[1:]) if len(x_shape) > 1 else [1]
input_shape = [batch_size] + x_shape
if y_shape is None:
return input_shape, None, batch_size
def out_el_shape(out_shape, num_classes):
out_shape = list(out_shape[1:]) if len(out_shape) > 1 else []
# Skip first dimension if it is 1.
if out_shape and out_shape[0] == 1:
out_shape = out_shape[1:]
if num_classes is not None and num_classes > 1:
return [batch_size] + out_shape + [num_classes]
else:
return [batch_size] + out_shape
if not y_is_dict:
output_shape = out_el_shape(y_shape, n_classes)
else:
output_shape = dict([
(k, out_el_shape(v, n_classes[k]
if n_classes is not None and k in n_classes else None))
for k, v in list(y_shape.items())
])
return input_shape, output_shape, batch_size
def _data_type_filter(x, y):
"""Filter data types into acceptable format."""
if HAS_DASK:
x = extract_dask_data(x)
if y is not None:
y = extract_dask_labels(y)
if HAS_PANDAS:
x = extract_pandas_data(x)
if y is not None:
y = extract_pandas_labels(y)
return x, y
def _is_iterable(x):
return hasattr(x, 'next') or hasattr(x, '__next__')
def setup_train_data_feeder(x,
y,
n_classes,
batch_size=None,
shuffle=True,
epochs=None):
"""Create data feeder, to sample inputs from dataset.
If `x` and `y` are iterators, use `StreamingDataFeeder`.
Args:
x: numpy, pandas or Dask matrix or dictionary of aforementioned. Also
supports iterables.
y: numpy, pandas or Dask array or dictionary of aforementioned. Also
supports
iterables.
n_classes: number of classes. Must be None or same type as y. In case, `y`
is `dict`
(or iterable which returns dict) such that `n_classes[key] = n_classes for
y[key]`
batch_size: size to split data into parts. Must be >= 1.
shuffle: Whether to shuffle the inputs.
epochs: Number of epochs to run.
Returns:
DataFeeder object that returns training data.
Raises:
ValueError: if one of `x` and `y` is iterable and the other is not.
"""
x, y = _data_type_filter(x, y)
if HAS_DASK:
# pylint: disable=g-import-not-at-top
import dask.dataframe as dd
if (isinstance(x, (dd.Series, dd.DataFrame)) and
(y is None or isinstance(y, (dd.Series, dd.DataFrame)))):
data_feeder_cls = DaskDataFeeder
else:
data_feeder_cls = DataFeeder
else:
data_feeder_cls = DataFeeder
if _is_iterable(x):
if y is not None and not _is_iterable(y):
raise ValueError('Both x and y should be iterators for '
'streaming learning to work.')
return StreamingDataFeeder(x, y, n_classes, batch_size)
return data_feeder_cls(
x, y, n_classes, batch_size, shuffle=shuffle, epochs=epochs)
def _batch_data(x, batch_size=None):
if (batch_size is not None) and (batch_size <= 0):
raise ValueError('Invalid batch_size %d.' % batch_size)
x_first_el = six.next(x)
x = itertools.chain([x_first_el], x)
chunk = dict([(k, []) for k in list(x_first_el.keys())]) if isinstance(
x_first_el, dict) else []
chunk_filled = False
for data in x:
if isinstance(data, dict):
for k, v in list(data.items()):
chunk[k].append(v)
if (batch_size is not None) and (len(chunk[k]) >= batch_size):
chunk[k] = np.matrix(chunk[k])
chunk_filled = True
if chunk_filled:
yield chunk
chunk = dict([(k, []) for k in list(x_first_el.keys())]) if isinstance(
x_first_el, dict) else []
chunk_filled = False
else:
chunk.append(data)
if (batch_size is not None) and (len(chunk) >= batch_size):
yield np.matrix(chunk)
chunk = []
if isinstance(x_first_el, dict):
for k, v in list(data.items()):
chunk[k] = np.matrix(chunk[k])
yield chunk
else:
yield np.matrix(chunk)
def setup_predict_data_feeder(x, batch_size=None):
"""Returns an iterable for feeding into predict step.
Args:
x: numpy, pandas, Dask array or dictionary of aforementioned. Also supports
iterable.
batch_size: Size of batches to split data into. If `None`, returns one
batch of full size.
Returns:
List or iterator (or dictionary thereof) of parts of data to predict on.
Raises:
ValueError: if `batch_size` <= 0.
"""
if HAS_DASK:
x = extract_dask_data(x)
if HAS_PANDAS:
x = extract_pandas_data(x)
if _is_iterable(x):
return _batch_data(x, batch_size)
if len(x.shape) == 1:
x = np.reshape(x, (-1, 1))
if batch_size is not None:
if batch_size <= 0:
raise ValueError('Invalid batch_size %d.' % batch_size)
n_batches = int(math.ceil(float(len(x)) / batch_size))
return [x[i * batch_size:(i + 1) * batch_size] for i in xrange(n_batches)]
return [x]
def setup_processor_data_feeder(x):
"""Sets up processor iterable.
Args:
x: numpy, pandas or iterable.
Returns:
Iterable of data to process.
"""
if HAS_PANDAS:
x = extract_pandas_matrix(x)
return x
def check_array(array, dtype):
"""Checks array on dtype and converts it if different.
Args:
array: Input array.
dtype: Expected dtype.
Returns:
Original array or converted.
"""
# skip check if array is instance of other classes, e.g. h5py.Dataset
# to avoid copying array and loading whole data into memory
if isinstance(array, (np.ndarray, list)):
array = np.array(array, dtype=dtype, order=None, copy=False)
return array
def _access(data, iloc):
"""Accesses an element from collection, using integer location based indexing.
Args:
data: array-like. The collection to access
iloc: `int` or `list` of `int`s. Location(s) to access in `collection`
Returns:
The element of `a` found at location(s) `iloc`.
"""
if HAS_PANDAS:
import pandas as pd # pylint: disable=g-import-not-at-top
if isinstance(data, pd.Series) or isinstance(data, pd.DataFrame):
return data.iloc[iloc]
return data[iloc]
def _check_dtype(dtype):
if dtypes.as_dtype(dtype) == dtypes.float64:
logging.warn(
'float64 is not supported by many models, consider casting to float32.')
return dtype
class DataFeeder(object):
"""Data feeder is an example class to sample data for TF trainer."""
def __init__(self,
x,
y,
n_classes,
batch_size=None,
shuffle=True,
random_state=None,
epochs=None):
"""Initializes a DataFeeder instance.
Args:
x: One feature sample which can either Nd numpy matrix of shape
`[n_samples, n_features, ...]` or dictionary of Nd numpy matrix.
y: label vector, either floats for regression or class id for
classification. If matrix, will consider as a sequence of labels.
Can be `None` for unsupervised setting. Also supports dictionary of
labels.
n_classes: Number of classes, 0 and 1 are considered regression, `None`
will pass through the input labels without one-hot conversion. Also, if
`y` is `dict`, then `n_classes` must be `dict` such that
`n_classes[key] = n_classes for label y[key]`, `None` otherwise.
batch_size: Mini-batch size to accumulate samples in one mini batch.
shuffle: Whether to shuffle `x`.
random_state: Numpy `RandomState` object to reproduce sampling.
epochs: Number of times to iterate over input data before raising
`StopIteration` exception.
Attributes:
x: Input features (ndarray or dictionary of ndarrays).
y: Input label (ndarray or dictionary of ndarrays).
n_classes: Number of classes (if `None`, pass through indices without
one-hot conversion).
batch_size: Mini-batch size to accumulate.
input_shape: Shape of the input (or dictionary of shapes).
output_shape: Shape of the output (or dictionary of shapes).
input_dtype: DType of input (or dictionary of shapes).
output_dtype: DType of output (or dictionary of shapes.
"""
x_is_dict, y_is_dict = isinstance(x, dict), y is not None and isinstance(
y, dict)
if isinstance(y, list):
y = np.array(y)
self._x = dict([(k, check_array(v, v.dtype)) for k, v in list(x.items())
]) if x_is_dict else check_array(x, x.dtype)
self._y = None if y is None else \
dict([(k, check_array(v, v.dtype)) for k, v in list(y.items())]) if x_is_dict else check_array(y, y.dtype)
# self.n_classes is not None means we're converting raw target indices to one-hot.
if n_classes is not None:
if not y_is_dict:
y_dtype = (np.int64
if n_classes is not None and n_classes > 1 else np.float32)
self._y = (None if y is None else check_array(y, dtype=y_dtype))
self.n_classes = n_classes
self.max_epochs = epochs
x_shape = dict([(k, v.shape) for k, v in list(self._x.items())
]) if x_is_dict else self._x.shape
y_shape = dict([(k, v.shape) for k, v in list(self._y.items())
]) if y_is_dict else None if y is None else self._y.shape
self.input_shape, self.output_shape, self._batch_size = _get_in_out_shape(
x_shape, y_shape, n_classes, batch_size)
# Input dtype matches dtype of x.
self._input_dtype = dict([(k, _check_dtype(v.dtype)) for k, v in list(self._x.items())]) if x_is_dict \
else _check_dtype(self._x.dtype)
# note: self._output_dtype = np.float32 when y is None
self._output_dtype = dict([(k, _check_dtype(v.dtype)) for k, v in list(self._y.items())]) if y_is_dict \
else _check_dtype(self._y.dtype) if y is not None else np.float32
# self.n_classes is None means we're passing in raw target indices
if n_classes is not None and y_is_dict:
for key in list(n_classes.keys()):
if key in self._output_dtype:
self._output_dtype[key] = np.float32
self._shuffle = shuffle
self.random_state = np.random.RandomState(
42) if random_state is None else random_state
num_samples = list(self._x.values())[0].shape[
0] if x_is_dict else self._x.shape[0]
if self._shuffle:
self.indices = self.random_state.permutation(num_samples)
else:
self.indices = np.array(range(num_samples))
self.offset = 0
self.epoch = 0
self._epoch_placeholder = None
@property
def x(self):
return self._x
@property
def y(self):
return self._y
@property
def shuffle(self):
return self._shuffle
@property
def input_dtype(self):
return self._input_dtype
@property
def output_dtype(self):
return self._output_dtype
@property
def batch_size(self):
return self._batch_size
def make_epoch_variable(self):
"""Adds a placeholder variable for the epoch to the graph.
Returns:
The epoch placeholder.
"""
self._epoch_placeholder = array_ops.placeholder(
dtypes.int32, [1], name='epoch')
return self._epoch_placeholder
def input_builder(self):
"""Builds inputs in the graph.
Returns:
Two placeholders for inputs and outputs.
"""
def get_placeholder(shape, dtype, name_prepend):
if shape is None:
return None
if isinstance(shape, dict):
placeholder = {}
for key in list(shape.keys()):
placeholder[key] = array_ops.placeholder(
dtypes.as_dtype(dtype[key]), [None] + shape[key][1:],
name=name_prepend + '_' + key)
else:
placeholder = array_ops.placeholder(
dtypes.as_dtype(dtype), [None] + shape[1:], name=name_prepend)
return placeholder
self._input_placeholder = get_placeholder(self.input_shape,
self._input_dtype, 'input')
self._output_placeholder = get_placeholder(self.output_shape,
self._output_dtype, 'output')
return self._input_placeholder, self._output_placeholder
def set_placeholders(self, input_placeholder, output_placeholder):
"""Sets placeholders for this data feeder.
Args:
input_placeholder: Placeholder for `x` variable. Should match shape
of the examples in the x dataset.
output_placeholder: Placeholder for `y` variable. Should match
shape of the examples in the y dataset. Can be `None`.
"""
self._input_placeholder = input_placeholder
self._output_placeholder = output_placeholder
def get_feed_params(self):
"""Function returns a `dict` with data feed params while training.
Returns:
A `dict` with data feed params while training.
"""
return {
'epoch': self.epoch,
'offset': self.offset,
'batch_size': self._batch_size
}
def get_feed_dict_fn(self):
"""Returns a function that samples data into given placeholders.
Returns:
A function that when called samples a random subset of batch size
from `x` and `y`.
"""
x_is_dict, y_is_dict = isinstance(
self._x, dict), self._y is not None and isinstance(self._y, dict)
# Assign input features from random indices.
def extract(data, indices):
return (np.array(_access(data, indices)).reshape((indices.shape[0], 1)) if
len(data.shape) == 1 else _access(data, indices))
# assign labels from random indices
def assign_label(data, shape, dtype, n_classes, indices):
shape[0] = indices.shape[0]
out = np.zeros(shape, dtype=dtype)
for i in xrange(out.shape[0]):
sample = indices[i]
# self.n_classes is None means we're passing in raw target indices
if n_classes is None:
out[i] = _access(data, sample)
else:
if n_classes > 1:
if len(shape) == 2:
out.itemset((i, int(_access(data, sample))), 1.0)
else:
for idx, value in enumerate(_access(data, sample)):
out.itemset(tuple([i, idx, value]), 1.0)
else:
out[i] = _access(data, sample)
return out
def _feed_dict_fn():
"""Function that samples data into given placeholders."""
if self.max_epochs is not None and self.epoch + 1 > self.max_epochs:
raise StopIteration
assert self._input_placeholder is not None
feed_dict = {}
if self._epoch_placeholder is not None:
feed_dict[self._epoch_placeholder.name] = [self.epoch]
# Take next batch of indices.
x_len = list(self._x.values())[0].shape[
0] if x_is_dict else self._x.shape[0]
end = min(x_len, self.offset + self._batch_size)
batch_indices = self.indices[self.offset:end]
# adding input placeholder
feed_dict.update(
dict([(self._input_placeholder[k].name, extract(v, batch_indices))
for k, v in list(self._x.items())]) if x_is_dict else
{self._input_placeholder.name: extract(self._x, batch_indices)})
# move offset and reset it if necessary
self.offset += self._batch_size
if self.offset >= x_len:
self.indices = self.random_state.permutation(
x_len) if self._shuffle else np.array(range(x_len))
self.offset = 0
self.epoch += 1
# return early if there are no labels
if self._output_placeholder is None:
return feed_dict
# adding output placeholders
if y_is_dict:
for k, v in list(self._y.items()):
n_classes = (self.n_classes[k] if k in self.n_classes else
None) if self.n_classes is not None else None
shape, dtype = self.output_shape[k], self._output_dtype[k]
feed_dict.update({
self._output_placeholder[k].name:
assign_label(v, shape, dtype, n_classes, batch_indices)
})
else:
shape, dtype, n_classes = self.output_shape, self._output_dtype, self.n_classes
feed_dict.update({
self._output_placeholder.name:
assign_label(self._y, shape, dtype, n_classes, batch_indices)
})
return feed_dict
return _feed_dict_fn
class StreamingDataFeeder(DataFeeder):
"""Data feeder for TF trainer that reads data from iterator.
Streaming data feeder allows to read data as it comes it from disk or
somewhere else. It's custom to have this iterators rotate infinetly over
the dataset, to allow control of how much to learn on the trainer side.
"""
def __init__(self, x, y, n_classes, batch_size):
"""Initializes a StreamingDataFeeder instance.
Args:
x: iterator each element of which returns one feature sample. Sample can
be a Nd numpy matrix or dictionary of Nd numpy matrices.
y: iterator each element of which returns one label sample. Sample can be
a Nd numpy matrix or dictionary of Nd numpy matrices with 1 or many
classes regression values.
n_classes: indicator of how many classes the corresponding label sample
has for the purposes of one-hot conversion of label. In case where `y`
is a dictionary, `n_classes` must be dictionary (with same keys as `y`)
of how many classes there are in each label in `y`. If key is
present in `y` and missing in `n_classes`, the value is assumed `None`
and no one-hot conversion will be applied to the label with that key.
batch_size: Mini batch size to accumulate samples in one batch. If set
`None`, then assumes that iterator to return already batched element.
Attributes:
x: input features (or dictionary of input features).
y: input label (or dictionary of output features).
n_classes: number of classes.
batch_size: mini batch size to accumulate.
input_shape: shape of the input (can be dictionary depending on `x`).
output_shape: shape of the output (can be dictionary depending on `y`).
input_dtype: dtype of input (can be dictionary depending on `x`).
output_dtype: dtype of output (can be dictionary depending on `y`).
"""
# pylint: disable=invalid-name,super-init-not-called
x_first_el = six.next(x)
self._x = itertools.chain([x_first_el], x)
if y is not None:
y_first_el = six.next(y)
self._y = itertools.chain([y_first_el], y)
else:
y_first_el = None
self._y = None
self.n_classes = n_classes
x_is_dict = isinstance(x_first_el, dict)
y_is_dict = y is not None and isinstance(y_first_el, dict)
if y_is_dict and n_classes is not None:
assert isinstance(n_classes, dict)
# extract shapes for first_elements
if x_is_dict:
x_first_el_shape = dict(
[(k, [1] + list(v.shape)) for k, v in list(x_first_el.items())])
else:
x_first_el_shape = [1] + list(x_first_el.shape)
if y_is_dict:
y_first_el_shape = dict(
[(k, [1] + list(v.shape)) for k, v in list(y_first_el.items())])
elif y is None:
y_first_el_shape = None
else:
y_first_el_shape = ([1] + list(y_first_el[0].shape if isinstance(
y_first_el, list) else y_first_el.shape))
self.input_shape, self.output_shape, self._batch_size = _get_in_out_shape(
x_first_el_shape, y_first_el_shape, n_classes, batch_size)
# Input dtype of x_first_el.
if x_is_dict:
self._input_dtype = dict(
[(k, _check_dtype(v.dtype)) for k, v in list(x_first_el.items())])
else:
self._input_dtype = _check_dtype(x_first_el.dtype)
# Output dtype of y_first_el.
def check_y_dtype(el):
if isinstance(el, np.ndarray):
return el.dtype
elif isinstance(el, list):
return check_y_dtype(el[0])
else:
return _check_dtype(np.dtype(type(el)))
# Output types are floats, due to both softmaxes and regression req.
if n_classes is not None and (y is None or not y_is_dict) and n_classes > 0:
self._output_dtype = np.float32
elif y_is_dict:
self._output_dtype = dict(
[(k, check_y_dtype(v)) for k, v in list(y_first_el.items())])
elif y is None:
self._output_dtype = None
else:
self._output_dtype = check_y_dtype(y_first_el)
def get_feed_params(self):
"""Function returns a `dict` with data feed params while training.
Returns:
A `dict` with data feed params while training.
"""
return {'batch_size': self._batch_size}
def get_feed_dict_fn(self):
"""Returns a function, that will sample data and provide it to placeholders.
Returns:
A function that when called samples a random subset of batch size
from x and y.
"""
self.stopped = False
def _feed_dict_fn():
"""Samples data and provides it to placeholders.
Returns:
`dict` of input and output tensors.
"""
def init_array(shape, dtype):
"""Initialize array of given shape or dict of shapes and dtype."""
if shape is None:
return None
elif isinstance(shape, dict):
return dict([(k, np.zeros(shape[k], dtype[k]))
for k in list(shape.keys())])
else:
return np.zeros(shape, dtype=dtype)
def put_data_array(dest, index, source=None, n_classes=None):
"""Puts data array into container."""
if source is None:
dest = dest[:index]
elif n_classes is not None and n_classes > 1:
if len(self.output_shape) == 2:
dest.itemset((index, source), 1.0)
else:
for idx, value in enumerate(source):
dest.itemset(tuple([index, idx, value]), 1.0)
else:
if len(dest.shape) > 1:
dest[index, :] = source
else:
dest[index] = source[0] if isinstance(source, list) else source
return dest
def put_data_array_or_dict(holder, index, data=None, n_classes=None):
"""Puts data array or data dictionary into container."""
if holder is None:
return None
if isinstance(holder, dict):
if data is None:
data = {k: None for k in holder.keys()}
assert isinstance(data, dict)
for k in holder.keys():
num_classes = n_classes[k] if (n_classes is not None and
k in n_classes) else None
holder[k] = put_data_array(holder[k], index, data[k], num_classes)
else:
holder = put_data_array(holder, index, data, n_classes)
return holder
if self.stopped:
raise StopIteration
inp = init_array(self.input_shape, self._input_dtype)
out = init_array(self.output_shape, self._output_dtype)
for i in xrange(self._batch_size):
# Add handling when queue ends.
try:
next_inp = six.next(self._x)
inp = put_data_array_or_dict(inp, i, next_inp, None)
except StopIteration:
self.stopped = True
if i == 0:
raise
inp = put_data_array_or_dict(inp, i, None, None)
out = put_data_array_or_dict(out, i, None, None)
break
if self._y is not None:
next_out = six.next(self._y)
out = put_data_array_or_dict(out, i, next_out, self.n_classes)
# creating feed_dict
if isinstance(inp, dict):
feed_dict = dict([(self._input_placeholder[k].name, inp[k])
for k in list(self._input_placeholder.keys())])
else:
feed_dict = {self._input_placeholder.name: inp}
if self._y is not None:
if isinstance(out, dict):
feed_dict.update(
dict([(self._output_placeholder[k].name, out[k])
for k in list(self._output_placeholder.keys())]))
else:
feed_dict.update({self._output_placeholder.name: out})
return feed_dict
return _feed_dict_fn
class DaskDataFeeder(object):
"""Data feeder for that reads data from dask.Series and dask.DataFrame.
Numpy arrays can be serialized to disk and it's possible to do random seeks
into them. DaskDataFeeder will remove requirement to have full dataset in the
memory and still do random seeks for sampling of batches.
"""
def __init__(self,
x,
y,
n_classes,
batch_size,
shuffle=True,
random_state=None,
epochs=None):
"""Initializes a DaskDataFeeder instance.
Args:
x: iterator that returns for each element, returns features.
y: iterator that returns for each element, returns 1 or many classes /
regression values.
n_classes: indicator of how many classes the label has.
batch_size: Mini batch size to accumulate.
shuffle: Whether to shuffle the inputs.
random_state: random state for RNG. Note that it will mutate so use a
int value for this if you want consistent sized batches.
epochs: Number of epochs to run.
Attributes:
x: input features.
y: input label.
n_classes: number of classes.
batch_size: mini batch size to accumulate.
input_shape: shape of the input.
output_shape: shape of the output.
input_dtype: dtype of input.
output_dtype: dtype of output.
Raises:
ValueError: if `x` or `y` are `dict`, as they are not supported currently.
"""
if isinstance(x, dict) or isinstance(y, dict):
raise ValueError(
'DaskDataFeeder does not support dictionaries at the moment.')
# pylint: disable=invalid-name,super-init-not-called
import dask.dataframe as dd # pylint: disable=g-import-not-at-top
# TODO(terrytangyuan): check x and y dtypes in dask_io like pandas
self._x = x
self._y = y
# save column names
self._x_columns = list(x.columns)
if isinstance(y.columns[0], str):
self._y_columns = list(y.columns)
else:
# deal with cases where two DFs have overlapped default numeric colnames
self._y_columns = len(self._x_columns) + 1
self._y = self._y.rename(columns={y.columns[0]: self._y_columns})
# TODO(terrytangyuan): deal with unsupervised cases
# combine into a data frame
self.df = dd.multi.concat([self._x, self._y], axis=1)
self.n_classes = n_classes
x_count = x.count().compute()[0]
x_shape = (x_count, len(self._x.columns))
y_shape = (x_count, len(self._y.columns))
# TODO(terrytangyuan): Add support for shuffle and epochs.
self._shuffle = shuffle
self.epochs = epochs
self.input_shape, self.output_shape, self._batch_size = _get_in_out_shape(
x_shape, y_shape, n_classes, batch_size)
self.sample_fraction = self._batch_size / float(x_count)
self._input_dtype = _check_dtype(self._x.dtypes[0])
self._output_dtype = _check_dtype(self._y.dtypes[self._y_columns])
if random_state is None:
self.random_state = 66
else:
self.random_state = random_state
def get_feed_params(self):
"""Function returns a `dict` with data feed params while training.
Returns:
A `dict` with data feed params while training.
"""
return {'batch_size': self._batch_size}
def get_feed_dict_fn(self, input_placeholder, output_placeholder):
"""Returns a function, that will sample data and provide it to placeholders.
Args:
input_placeholder: tf.Placeholder for input features mini batch.
output_placeholder: tf.Placeholder for output labels.
Returns:
A function that when called samples a random subset of batch size
from x and y.
"""
def _feed_dict_fn():
"""Samples data and provides it to placeholders."""
# TODO(ipolosukhin): option for with/without replacement (dev version of
# dask)
sample = self.df.random_split(
[self.sample_fraction, 1 - self.sample_fraction],
random_state=self.random_state)
inp = extract_pandas_matrix(sample[0][self._x_columns].compute()).tolist()
out = extract_pandas_matrix(sample[0][self._y_columns].compute())
# convert to correct dtype
inp = np.array(inp, dtype=self._input_dtype)
# one-hot encode out for each class for cross entropy loss
if HAS_PANDAS:
import pandas as pd # pylint: disable=g-import-not-at-top
if not isinstance(out, pd.Series):
out = out.flatten()
out_max = self._y.max().compute().values[0]
encoded_out = np.zeros((out.size, out_max + 1), dtype=self._output_dtype)
encoded_out[np.arange(out.size), out] = 1
return {input_placeholder.name: inp, output_placeholder.name: encoded_out}
return _feed_dict_fn
| apache-2.0 |
huzq/scikit-learn | sklearn/ensemble/_gb.py | 2 | 68941 | """Gradient Boosted Regression Trees
This module contains methods for fitting gradient boosted regression trees for
both classification and regression.
The module structure is the following:
- The ``BaseGradientBoosting`` base class implements a common ``fit`` method
for all the estimators in the module. Regression and classification
only differ in the concrete ``LossFunction`` used.
- ``GradientBoostingClassifier`` implements gradient boosting for
classification problems.
- ``GradientBoostingRegressor`` implements gradient boosting for
regression problems.
"""
# Authors: Peter Prettenhofer, Scott White, Gilles Louppe, Emanuele Olivetti,
# Arnaud Joly, Jacob Schreiber
# License: BSD 3 clause
from abc import ABCMeta
from abc import abstractmethod
import warnings
from ._base import BaseEnsemble
from ..base import ClassifierMixin
from ..base import RegressorMixin
from ..base import BaseEstimator
from ..base import is_classifier
from ._gradient_boosting import predict_stages
from ._gradient_boosting import predict_stage
from ._gradient_boosting import _random_sample_mask
import numbers
import numpy as np
from scipy.sparse import csc_matrix
from scipy.sparse import csr_matrix
from scipy.sparse import issparse
from time import time
from ..model_selection import train_test_split
from ..tree import DecisionTreeRegressor
from ..tree._tree import DTYPE, DOUBLE
from . import _gb_losses
from ..utils import check_random_state
from ..utils import check_array
from ..utils import column_or_1d
from ..utils.validation import check_is_fitted, _check_sample_weight
from ..utils.multiclass import check_classification_targets
from ..exceptions import NotFittedError
from ..utils.validation import _deprecate_positional_args
class VerboseReporter:
"""Reports verbose output to stdout.
Parameters
----------
verbose : int
Verbosity level. If ``verbose==1`` output is printed once in a while
(when iteration mod verbose_mod is zero).; if larger than 1 then output
is printed for each update.
"""
def __init__(self, verbose):
self.verbose = verbose
def init(self, est, begin_at_stage=0):
"""Initialize reporter
Parameters
----------
est : Estimator
The estimator
begin_at_stage : int, default=0
stage at which to begin reporting
"""
# header fields and line format str
header_fields = ['Iter', 'Train Loss']
verbose_fmt = ['{iter:>10d}', '{train_score:>16.4f}']
# do oob?
if est.subsample < 1:
header_fields.append('OOB Improve')
verbose_fmt.append('{oob_impr:>16.4f}')
header_fields.append('Remaining Time')
verbose_fmt.append('{remaining_time:>16s}')
# print the header line
print(('%10s ' + '%16s ' *
(len(header_fields) - 1)) % tuple(header_fields))
self.verbose_fmt = ' '.join(verbose_fmt)
# plot verbose info each time i % verbose_mod == 0
self.verbose_mod = 1
self.start_time = time()
self.begin_at_stage = begin_at_stage
def update(self, j, est):
"""Update reporter with new iteration.
Parameters
----------
j : int
The new iteration
est : Estimator
The estimator
"""
do_oob = est.subsample < 1
# we need to take into account if we fit additional estimators.
i = j - self.begin_at_stage # iteration relative to the start iter
if (i + 1) % self.verbose_mod == 0:
oob_impr = est.oob_improvement_[j] if do_oob else 0
remaining_time = ((est.n_estimators - (j + 1)) *
(time() - self.start_time) / float(i + 1))
if remaining_time > 60:
remaining_time = '{0:.2f}m'.format(remaining_time / 60.0)
else:
remaining_time = '{0:.2f}s'.format(remaining_time)
print(self.verbose_fmt.format(iter=j + 1,
train_score=est.train_score_[j],
oob_impr=oob_impr,
remaining_time=remaining_time))
if self.verbose == 1 and ((i + 1) // (self.verbose_mod * 10) > 0):
# adjust verbose frequency (powers of 10)
self.verbose_mod *= 10
class BaseGradientBoosting(BaseEnsemble, metaclass=ABCMeta):
"""Abstract base class for Gradient Boosting. """
@abstractmethod
def __init__(self, *, loss, learning_rate, n_estimators, criterion,
min_samples_split, min_samples_leaf, min_weight_fraction_leaf,
max_depth, min_impurity_decrease, min_impurity_split,
init, subsample, max_features, ccp_alpha,
random_state, alpha=0.9, verbose=0, max_leaf_nodes=None,
warm_start=False, validation_fraction=0.1,
n_iter_no_change=None, tol=1e-4):
self.n_estimators = n_estimators
self.learning_rate = learning_rate
self.loss = loss
self.criterion = criterion
self.min_samples_split = min_samples_split
self.min_samples_leaf = min_samples_leaf
self.min_weight_fraction_leaf = min_weight_fraction_leaf
self.subsample = subsample
self.max_features = max_features
self.max_depth = max_depth
self.min_impurity_decrease = min_impurity_decrease
self.min_impurity_split = min_impurity_split
self.ccp_alpha = ccp_alpha
self.init = init
self.random_state = random_state
self.alpha = alpha
self.verbose = verbose
self.max_leaf_nodes = max_leaf_nodes
self.warm_start = warm_start
self.validation_fraction = validation_fraction
self.n_iter_no_change = n_iter_no_change
self.tol = tol
def _fit_stage(self, i, X, y, raw_predictions, sample_weight, sample_mask,
random_state, X_csc=None, X_csr=None):
"""Fit another stage of ``n_classes_`` trees to the boosting model. """
assert sample_mask.dtype == bool
loss = self.loss_
original_y = y
# Need to pass a copy of raw_predictions to negative_gradient()
# because raw_predictions is partially updated at the end of the loop
# in update_terminal_regions(), and gradients need to be evaluated at
# iteration i - 1.
raw_predictions_copy = raw_predictions.copy()
for k in range(loss.K):
if loss.is_multi_class:
y = np.array(original_y == k, dtype=np.float64)
residual = loss.negative_gradient(y, raw_predictions_copy, k=k,
sample_weight=sample_weight)
# induce regression tree on residuals
tree = DecisionTreeRegressor(
criterion=self.criterion,
splitter='best',
max_depth=self.max_depth,
min_samples_split=self.min_samples_split,
min_samples_leaf=self.min_samples_leaf,
min_weight_fraction_leaf=self.min_weight_fraction_leaf,
min_impurity_decrease=self.min_impurity_decrease,
min_impurity_split=self.min_impurity_split,
max_features=self.max_features,
max_leaf_nodes=self.max_leaf_nodes,
random_state=random_state,
ccp_alpha=self.ccp_alpha)
if self.subsample < 1.0:
# no inplace multiplication!
sample_weight = sample_weight * sample_mask.astype(np.float64)
X = X_csr if X_csr is not None else X
tree.fit(X, residual, sample_weight=sample_weight,
check_input=False)
# update tree leaves
loss.update_terminal_regions(
tree.tree_, X, y, residual, raw_predictions, sample_weight,
sample_mask, learning_rate=self.learning_rate, k=k)
# add tree to ensemble
self.estimators_[i, k] = tree
return raw_predictions
def _check_params(self):
"""Check validity of parameters and raise ValueError if not valid. """
if self.n_estimators <= 0:
raise ValueError("n_estimators must be greater than 0 but "
"was %r" % self.n_estimators)
if self.learning_rate <= 0.0:
raise ValueError("learning_rate must be greater than 0 but "
"was %r" % self.learning_rate)
if (self.loss not in self._SUPPORTED_LOSS
or self.loss not in _gb_losses.LOSS_FUNCTIONS):
raise ValueError("Loss '{0:s}' not supported. ".format(self.loss))
if self.loss == 'deviance':
loss_class = (_gb_losses.MultinomialDeviance
if len(self.classes_) > 2
else _gb_losses.BinomialDeviance)
else:
loss_class = _gb_losses.LOSS_FUNCTIONS[self.loss]
if self.loss in ('huber', 'quantile'):
self.loss_ = loss_class(self.n_classes_, self.alpha)
else:
self.loss_ = loss_class(self.n_classes_)
if not (0.0 < self.subsample <= 1.0):
raise ValueError("subsample must be in (0,1] but "
"was %r" % self.subsample)
if self.init is not None:
# init must be an estimator or 'zero'
if isinstance(self.init, BaseEstimator):
self.loss_.check_init_estimator(self.init)
elif not (isinstance(self.init, str) and self.init == 'zero'):
raise ValueError(
"The init parameter must be an estimator or 'zero'. "
"Got init={}".format(self.init)
)
if not (0.0 < self.alpha < 1.0):
raise ValueError("alpha must be in (0.0, 1.0) but "
"was %r" % self.alpha)
if isinstance(self.max_features, str):
if self.max_features == "auto":
# if is_classification
if self.n_classes_ > 1:
max_features = max(1, int(np.sqrt(self.n_features_)))
else:
# is regression
max_features = self.n_features_
elif self.max_features == "sqrt":
max_features = max(1, int(np.sqrt(self.n_features_)))
elif self.max_features == "log2":
max_features = max(1, int(np.log2(self.n_features_)))
else:
raise ValueError("Invalid value for max_features: %r. "
"Allowed string values are 'auto', 'sqrt' "
"or 'log2'." % self.max_features)
elif self.max_features is None:
max_features = self.n_features_
elif isinstance(self.max_features, numbers.Integral):
max_features = self.max_features
else: # float
if 0. < self.max_features <= 1.:
max_features = max(int(self.max_features *
self.n_features_), 1)
else:
raise ValueError("max_features must be in (0, n_features]")
self.max_features_ = max_features
if not isinstance(self.n_iter_no_change,
(numbers.Integral, type(None))):
raise ValueError("n_iter_no_change should either be None or an "
"integer. %r was passed"
% self.n_iter_no_change)
def _init_state(self):
"""Initialize model state and allocate model state data structures. """
self.init_ = self.init
if self.init_ is None:
self.init_ = self.loss_.init_estimator()
self.estimators_ = np.empty((self.n_estimators, self.loss_.K),
dtype=object)
self.train_score_ = np.zeros((self.n_estimators,), dtype=np.float64)
# do oob?
if self.subsample < 1.0:
self.oob_improvement_ = np.zeros((self.n_estimators),
dtype=np.float64)
def _clear_state(self):
"""Clear the state of the gradient boosting model. """
if hasattr(self, 'estimators_'):
self.estimators_ = np.empty((0, 0), dtype=object)
if hasattr(self, 'train_score_'):
del self.train_score_
if hasattr(self, 'oob_improvement_'):
del self.oob_improvement_
if hasattr(self, 'init_'):
del self.init_
if hasattr(self, '_rng'):
del self._rng
def _resize_state(self):
"""Add additional ``n_estimators`` entries to all attributes. """
# self.n_estimators is the number of additional est to fit
total_n_estimators = self.n_estimators
if total_n_estimators < self.estimators_.shape[0]:
raise ValueError('resize with smaller n_estimators %d < %d' %
(total_n_estimators, self.estimators_[0]))
self.estimators_ = np.resize(self.estimators_,
(total_n_estimators, self.loss_.K))
self.train_score_ = np.resize(self.train_score_, total_n_estimators)
if (self.subsample < 1 or hasattr(self, 'oob_improvement_')):
# if do oob resize arrays or create new if not available
if hasattr(self, 'oob_improvement_'):
self.oob_improvement_ = np.resize(self.oob_improvement_,
total_n_estimators)
else:
self.oob_improvement_ = np.zeros((total_n_estimators,),
dtype=np.float64)
def _is_initialized(self):
return len(getattr(self, 'estimators_', [])) > 0
def _check_initialized(self):
"""Check that the estimator is initialized, raising an error if not."""
check_is_fitted(self)
def fit(self, X, y, sample_weight=None, monitor=None):
"""Fit the gradient boosting model.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
The input samples. Internally, it will be converted to
``dtype=np.float32`` and if a sparse matrix is provided
to a sparse ``csr_matrix``.
y : array-like of shape (n_samples,)
Target values (strings or integers in classification, real numbers
in regression)
For classification, labels must correspond to classes.
sample_weight : array-like of shape (n_samples,), default=None
Sample weights. If None, then samples are equally weighted. Splits
that would create child nodes with net zero or negative weight are
ignored while searching for a split in each node. In the case of
classification, splits are also ignored if they would result in any
single class carrying a negative weight in either child node.
monitor : callable, default=None
The monitor is called after each iteration with the current
iteration, a reference to the estimator and the local variables of
``_fit_stages`` as keyword arguments ``callable(i, self,
locals())``. If the callable returns ``True`` the fitting procedure
is stopped. The monitor can be used for various things such as
computing held-out estimates, early stopping, model introspect, and
snapshoting.
Returns
-------
self : object
"""
# if not warmstart - clear the estimator state
if not self.warm_start:
self._clear_state()
# Check input
# Since check_array converts both X and y to the same dtype, but the
# trees use different types for X and y, checking them separately.
X, y = self._validate_data(X, y, accept_sparse=['csr', 'csc', 'coo'],
dtype=DTYPE, multi_output=True)
n_samples, self.n_features_ = X.shape
sample_weight_is_none = sample_weight is None
sample_weight = _check_sample_weight(sample_weight, X)
y = column_or_1d(y, warn=True)
y = self._validate_y(y, sample_weight)
if self.n_iter_no_change is not None:
stratify = y if is_classifier(self) else None
X, X_val, y, y_val, sample_weight, sample_weight_val = (
train_test_split(X, y, sample_weight,
random_state=self.random_state,
test_size=self.validation_fraction,
stratify=stratify))
if is_classifier(self):
if self.n_classes_ != np.unique(y).shape[0]:
# We choose to error here. The problem is that the init
# estimator would be trained on y, which has some missing
# classes now, so its predictions would not have the
# correct shape.
raise ValueError(
'The training data after the early stopping split '
'is missing some classes. Try using another random '
'seed.'
)
else:
X_val = y_val = sample_weight_val = None
self._check_params()
if not self._is_initialized():
# init state
self._init_state()
# fit initial model and initialize raw predictions
if self.init_ == 'zero':
raw_predictions = np.zeros(shape=(X.shape[0], self.loss_.K),
dtype=np.float64)
else:
# XXX clean this once we have a support_sample_weight tag
if sample_weight_is_none:
self.init_.fit(X, y)
else:
msg = ("The initial estimator {} does not support sample "
"weights.".format(self.init_.__class__.__name__))
try:
self.init_.fit(X, y, sample_weight=sample_weight)
except TypeError: # regular estimator without SW support
raise ValueError(msg)
except ValueError as e:
if "pass parameters to specific steps of "\
"your pipeline using the "\
"stepname__parameter" in str(e): # pipeline
raise ValueError(msg) from e
else: # regular estimator whose input checking failed
raise
raw_predictions = \
self.loss_.get_init_raw_predictions(X, self.init_)
begin_at_stage = 0
# The rng state must be preserved if warm_start is True
self._rng = check_random_state(self.random_state)
else:
# add more estimators to fitted model
# invariant: warm_start = True
if self.n_estimators < self.estimators_.shape[0]:
raise ValueError('n_estimators=%d must be larger or equal to '
'estimators_.shape[0]=%d when '
'warm_start==True'
% (self.n_estimators,
self.estimators_.shape[0]))
begin_at_stage = self.estimators_.shape[0]
# The requirements of _decision_function (called in two lines
# below) are more constrained than fit. It accepts only CSR
# matrices.
X = check_array(X, dtype=DTYPE, order="C", accept_sparse='csr')
raw_predictions = self._raw_predict(X)
self._resize_state()
# fit the boosting stages
n_stages = self._fit_stages(
X, y, raw_predictions, sample_weight, self._rng, X_val, y_val,
sample_weight_val, begin_at_stage, monitor)
# change shape of arrays after fit (early-stopping or additional ests)
if n_stages != self.estimators_.shape[0]:
self.estimators_ = self.estimators_[:n_stages]
self.train_score_ = self.train_score_[:n_stages]
if hasattr(self, 'oob_improvement_'):
self.oob_improvement_ = self.oob_improvement_[:n_stages]
self.n_estimators_ = n_stages
return self
def _fit_stages(self, X, y, raw_predictions, sample_weight, random_state,
X_val, y_val, sample_weight_val,
begin_at_stage=0, monitor=None):
"""Iteratively fits the stages.
For each stage it computes the progress (OOB, train score)
and delegates to ``_fit_stage``.
Returns the number of stages fit; might differ from ``n_estimators``
due to early stopping.
"""
n_samples = X.shape[0]
do_oob = self.subsample < 1.0
sample_mask = np.ones((n_samples, ), dtype=bool)
n_inbag = max(1, int(self.subsample * n_samples))
loss_ = self.loss_
if self.verbose:
verbose_reporter = VerboseReporter(verbose=self.verbose)
verbose_reporter.init(self, begin_at_stage)
X_csc = csc_matrix(X) if issparse(X) else None
X_csr = csr_matrix(X) if issparse(X) else None
if self.n_iter_no_change is not None:
loss_history = np.full(self.n_iter_no_change, np.inf)
# We create a generator to get the predictions for X_val after
# the addition of each successive stage
y_val_pred_iter = self._staged_raw_predict(X_val)
# perform boosting iterations
i = begin_at_stage
for i in range(begin_at_stage, self.n_estimators):
# subsampling
if do_oob:
sample_mask = _random_sample_mask(n_samples, n_inbag,
random_state)
# OOB score before adding this stage
old_oob_score = loss_(y[~sample_mask],
raw_predictions[~sample_mask],
sample_weight[~sample_mask])
# fit next stage of trees
raw_predictions = self._fit_stage(
i, X, y, raw_predictions, sample_weight, sample_mask,
random_state, X_csc, X_csr)
# track deviance (= loss)
if do_oob:
self.train_score_[i] = loss_(y[sample_mask],
raw_predictions[sample_mask],
sample_weight[sample_mask])
self.oob_improvement_[i] = (
old_oob_score - loss_(y[~sample_mask],
raw_predictions[~sample_mask],
sample_weight[~sample_mask]))
else:
# no need to fancy index w/ no subsampling
self.train_score_[i] = loss_(y, raw_predictions, sample_weight)
if self.verbose > 0:
verbose_reporter.update(i, self)
if monitor is not None:
early_stopping = monitor(i, self, locals())
if early_stopping:
break
# We also provide an early stopping based on the score from
# validation set (X_val, y_val), if n_iter_no_change is set
if self.n_iter_no_change is not None:
# By calling next(y_val_pred_iter), we get the predictions
# for X_val after the addition of the current stage
validation_loss = loss_(y_val, next(y_val_pred_iter),
sample_weight_val)
# Require validation_score to be better (less) than at least
# one of the last n_iter_no_change evaluations
if np.any(validation_loss + self.tol < loss_history):
loss_history[i % len(loss_history)] = validation_loss
else:
break
return i + 1
def _make_estimator(self, append=True):
# we don't need _make_estimator
raise NotImplementedError()
def _raw_predict_init(self, X):
"""Check input and compute raw predictions of the init estimator."""
self._check_initialized()
X = self.estimators_[0, 0]._validate_X_predict(X, check_input=True)
if X.shape[1] != self.n_features_:
raise ValueError("X.shape[1] should be {0:d}, not {1:d}.".format(
self.n_features_, X.shape[1]))
if self.init_ == 'zero':
raw_predictions = np.zeros(shape=(X.shape[0], self.loss_.K),
dtype=np.float64)
else:
raw_predictions = self.loss_.get_init_raw_predictions(
X, self.init_).astype(np.float64)
return raw_predictions
def _raw_predict(self, X):
"""Return the sum of the trees raw predictions (+ init estimator)."""
raw_predictions = self._raw_predict_init(X)
predict_stages(self.estimators_, X, self.learning_rate,
raw_predictions)
return raw_predictions
def _staged_raw_predict(self, X):
"""Compute raw predictions of ``X`` for each iteration.
This method allows monitoring (i.e. determine error on testing set)
after each stage.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
The input samples. Internally, it will be converted to
``dtype=np.float32`` and if a sparse matrix is provided
to a sparse ``csr_matrix``.
Returns
-------
raw_predictions : generator of ndarray of shape (n_samples, k)
The raw predictions of the input samples. The order of the
classes corresponds to that in the attribute :term:`classes_`.
Regression and binary classification are special cases with
``k == 1``, otherwise ``k==n_classes``.
"""
X = check_array(X, dtype=DTYPE, order="C", accept_sparse='csr')
raw_predictions = self._raw_predict_init(X)
for i in range(self.estimators_.shape[0]):
predict_stage(self.estimators_, i, X, self.learning_rate,
raw_predictions)
yield raw_predictions.copy()
@property
def feature_importances_(self):
"""The impurity-based feature importances.
The higher, the more important the feature.
The importance of a feature is computed as the (normalized)
total reduction of the criterion brought by that feature. It is also
known as the Gini importance.
Warning: impurity-based feature importances can be misleading for
high cardinality features (many unique values). See
:func:`sklearn.inspection.permutation_importance` as an alternative.
Returns
-------
feature_importances_ : array, shape (n_features,)
The values of this array sum to 1, unless all trees are single node
trees consisting of only the root node, in which case it will be an
array of zeros.
"""
self._check_initialized()
relevant_trees = [tree
for stage in self.estimators_ for tree in stage
if tree.tree_.node_count > 1]
if not relevant_trees:
# degenerate case where all trees have only one node
return np.zeros(shape=self.n_features_, dtype=np.float64)
relevant_feature_importances = [
tree.tree_.compute_feature_importances(normalize=False)
for tree in relevant_trees
]
avg_feature_importances = np.mean(relevant_feature_importances,
axis=0, dtype=np.float64)
return avg_feature_importances / np.sum(avg_feature_importances)
def _compute_partial_dependence_recursion(self, grid, target_features):
"""Fast partial dependence computation.
Parameters
----------
grid : ndarray of shape (n_samples, n_target_features)
The grid points on which the partial dependence should be
evaluated.
target_features : ndarray of shape (n_target_features,)
The set of target features for which the partial dependence
should be evaluated.
Returns
-------
averaged_predictions : ndarray of shape \
(n_trees_per_iteration, n_samples)
The value of the partial dependence function on each grid point.
"""
if self.init is not None:
warnings.warn(
'Using recursion method with a non-constant init predictor '
'will lead to incorrect partial dependence values. '
'Got init=%s.' % self.init,
UserWarning
)
grid = np.asarray(grid, dtype=DTYPE, order='C')
n_estimators, n_trees_per_stage = self.estimators_.shape
averaged_predictions = np.zeros((n_trees_per_stage, grid.shape[0]),
dtype=np.float64, order='C')
for stage in range(n_estimators):
for k in range(n_trees_per_stage):
tree = self.estimators_[stage, k].tree_
tree.compute_partial_dependence(grid, target_features,
averaged_predictions[k])
averaged_predictions *= self.learning_rate
return averaged_predictions
def _validate_y(self, y, sample_weight):
# 'sample_weight' is not utilised but is used for
# consistency with similar method _validate_y of GBC
self.n_classes_ = 1
if y.dtype.kind == 'O':
y = y.astype(DOUBLE)
# Default implementation
return y
def apply(self, X):
"""Apply trees in the ensemble to X, return leaf indices.
.. versionadded:: 0.17
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
The input samples. Internally, its dtype will be converted to
``dtype=np.float32``. If a sparse matrix is provided, it will
be converted to a sparse ``csr_matrix``.
Returns
-------
X_leaves : array-like of shape (n_samples, n_estimators, n_classes)
For each datapoint x in X and for each tree in the ensemble,
return the index of the leaf x ends up in each estimator.
In the case of binary classification n_classes is 1.
"""
self._check_initialized()
X = self.estimators_[0, 0]._validate_X_predict(X, check_input=True)
# n_classes will be equal to 1 in the binary classification or the
# regression case.
n_estimators, n_classes = self.estimators_.shape
leaves = np.zeros((X.shape[0], n_estimators, n_classes))
for i in range(n_estimators):
for j in range(n_classes):
estimator = self.estimators_[i, j]
leaves[:, i, j] = estimator.apply(X, check_input=False)
return leaves
class GradientBoostingClassifier(ClassifierMixin, BaseGradientBoosting):
"""Gradient Boosting for classification.
GB builds an additive model in a
forward stage-wise fashion; it allows for the optimization of
arbitrary differentiable loss functions. In each stage ``n_classes_``
regression trees are fit on the negative gradient of the
binomial or multinomial deviance loss function. Binary classification
is a special case where only a single regression tree is induced.
Read more in the :ref:`User Guide <gradient_boosting>`.
Parameters
----------
loss : {'deviance', 'exponential'}, default='deviance'
loss function to be optimized. 'deviance' refers to
deviance (= logistic regression) for classification
with probabilistic outputs. For loss 'exponential' gradient
boosting recovers the AdaBoost algorithm.
learning_rate : float, default=0.1
learning rate shrinks the contribution of each tree by `learning_rate`.
There is a trade-off between learning_rate and n_estimators.
n_estimators : int, default=100
The number of boosting stages to perform. Gradient boosting
is fairly robust to over-fitting so a large number usually
results in better performance.
subsample : float, default=1.0
The fraction of samples to be used for fitting the individual base
learners. If smaller than 1.0 this results in Stochastic Gradient
Boosting. `subsample` interacts with the parameter `n_estimators`.
Choosing `subsample < 1.0` leads to a reduction of variance
and an increase in bias.
criterion : {'friedman_mse', 'mse', 'mae'}, default='friedman_mse'
The function to measure the quality of a split. Supported criteria
are 'friedman_mse' for the mean squared error with improvement
score by Friedman, 'mse' for mean squared error, and 'mae' for
the mean absolute error. The default value of 'friedman_mse' is
generally the best as it can provide a better approximation in
some cases.
.. versionadded:: 0.18
min_samples_split : int or float, default=2
The minimum number of samples required to split an internal node:
- If int, then consider `min_samples_split` as the minimum number.
- If float, then `min_samples_split` is a fraction and
`ceil(min_samples_split * n_samples)` are the minimum
number of samples for each split.
.. versionchanged:: 0.18
Added float values for fractions.
min_samples_leaf : int or float, default=1
The minimum number of samples required to be at a leaf node.
A split point at any depth will only be considered if it leaves at
least ``min_samples_leaf`` training samples in each of the left and
right branches. This may have the effect of smoothing the model,
especially in regression.
- If int, then consider `min_samples_leaf` as the minimum number.
- If float, then `min_samples_leaf` is a fraction and
`ceil(min_samples_leaf * n_samples)` are the minimum
number of samples for each node.
.. versionchanged:: 0.18
Added float values for fractions.
min_weight_fraction_leaf : float, default=0.0
The minimum weighted fraction of the sum total of weights (of all
the input samples) required to be at a leaf node. Samples have
equal weight when sample_weight is not provided.
max_depth : int, default=3
maximum depth of the individual regression estimators. The maximum
depth limits the number of nodes in the tree. Tune this parameter
for best performance; the best value depends on the interaction
of the input variables.
min_impurity_decrease : float, default=0.0
A node will be split if this split induces a decrease of the impurity
greater than or equal to this value.
The weighted impurity decrease equation is the following::
N_t / N * (impurity - N_t_R / N_t * right_impurity
- N_t_L / N_t * left_impurity)
where ``N`` is the total number of samples, ``N_t`` is the number of
samples at the current node, ``N_t_L`` is the number of samples in the
left child, and ``N_t_R`` is the number of samples in the right child.
``N``, ``N_t``, ``N_t_R`` and ``N_t_L`` all refer to the weighted sum,
if ``sample_weight`` is passed.
.. versionadded:: 0.19
min_impurity_split : float, default=None
Threshold for early stopping in tree growth. A node will split
if its impurity is above the threshold, otherwise it is a leaf.
.. deprecated:: 0.19
``min_impurity_split`` has been deprecated in favor of
``min_impurity_decrease`` in 0.19. The default value of
``min_impurity_split`` has changed from 1e-7 to 0 in 0.23 and it
will be removed in 0.25. Use ``min_impurity_decrease`` instead.
init : estimator or 'zero', default=None
An estimator object that is used to compute the initial predictions.
``init`` has to provide :meth:`fit` and :meth:`predict_proba`. If
'zero', the initial raw predictions are set to zero. By default, a
``DummyEstimator`` predicting the classes priors is used.
random_state : int or RandomState, default=None
Controls the random seed given to each Tree estimator at each
boosting iteration.
In addition, it controls the random permutation of the features at
each split (see Notes for more details).
It also controls the random spliting of the training data to obtain a
validation set if `n_iter_no_change` is not None.
Pass an int for reproducible output across multiple function calls.
See :term:`Glossary <random_state>`.
max_features : {'auto', 'sqrt', 'log2'}, int or float, default=None
The number of features to consider when looking for the best split:
- If int, then consider `max_features` features at each split.
- If float, then `max_features` is a fraction and
`int(max_features * n_features)` features are considered at each
split.
- If 'auto', then `max_features=sqrt(n_features)`.
- If 'sqrt', then `max_features=sqrt(n_features)`.
- If 'log2', then `max_features=log2(n_features)`.
- If None, then `max_features=n_features`.
Choosing `max_features < n_features` leads to a reduction of variance
and an increase in bias.
Note: the search for a split does not stop until at least one
valid partition of the node samples is found, even if it requires to
effectively inspect more than ``max_features`` features.
verbose : int, default=0
Enable verbose output. If 1 then it prints progress and performance
once in a while (the more trees the lower the frequency). If greater
than 1 then it prints progress and performance for every tree.
max_leaf_nodes : int, default=None
Grow trees with ``max_leaf_nodes`` in best-first fashion.
Best nodes are defined as relative reduction in impurity.
If None then unlimited number of leaf nodes.
warm_start : bool, default=False
When set to ``True``, reuse the solution of the previous call to fit
and add more estimators to the ensemble, otherwise, just erase the
previous solution. See :term:`the Glossary <warm_start>`.
validation_fraction : float, default=0.1
The proportion of training data to set aside as validation set for
early stopping. Must be between 0 and 1.
Only used if ``n_iter_no_change`` is set to an integer.
.. versionadded:: 0.20
n_iter_no_change : int, default=None
``n_iter_no_change`` is used to decide if early stopping will be used
to terminate training when validation score is not improving. By
default it is set to None to disable early stopping. If set to a
number, it will set aside ``validation_fraction`` size of the training
data as validation and terminate training when validation score is not
improving in all of the previous ``n_iter_no_change`` numbers of
iterations. The split is stratified.
.. versionadded:: 0.20
tol : float, default=1e-4
Tolerance for the early stopping. When the loss is not improving
by at least tol for ``n_iter_no_change`` iterations (if set to a
number), the training stops.
.. versionadded:: 0.20
ccp_alpha : non-negative float, default=0.0
Complexity parameter used for Minimal Cost-Complexity Pruning. The
subtree with the largest cost complexity that is smaller than
``ccp_alpha`` will be chosen. By default, no pruning is performed. See
:ref:`minimal_cost_complexity_pruning` for details.
.. versionadded:: 0.22
Attributes
----------
n_estimators_ : int
The number of estimators as selected by early stopping (if
``n_iter_no_change`` is specified). Otherwise it is set to
``n_estimators``.
.. versionadded:: 0.20
feature_importances_ : ndarray of shape (n_features,)
The impurity-based feature importances.
The higher, the more important the feature.
The importance of a feature is computed as the (normalized)
total reduction of the criterion brought by that feature. It is also
known as the Gini importance.
Warning: impurity-based feature importances can be misleading for
high cardinality features (many unique values). See
:func:`sklearn.inspection.permutation_importance` as an alternative.
oob_improvement_ : ndarray of shape (n_estimators,)
The improvement in loss (= deviance) on the out-of-bag samples
relative to the previous iteration.
``oob_improvement_[0]`` is the improvement in
loss of the first stage over the ``init`` estimator.
Only available if ``subsample < 1.0``
train_score_ : ndarray of shape (n_estimators,)
The i-th score ``train_score_[i]`` is the deviance (= loss) of the
model at iteration ``i`` on the in-bag sample.
If ``subsample == 1`` this is the deviance on the training data.
loss_ : LossFunction
The concrete ``LossFunction`` object.
init_ : estimator
The estimator that provides the initial predictions.
Set via the ``init`` argument or ``loss.init_estimator``.
estimators_ : ndarray of DecisionTreeRegressor of \
shape (n_estimators, ``loss_.K``)
The collection of fitted sub-estimators. ``loss_.K`` is 1 for binary
classification, otherwise n_classes.
classes_ : ndarray of shape (n_classes,)
The classes labels.
n_features_ : int
The number of data features.
n_classes_ : int
The number of classes.
max_features_ : int
The inferred value of max_features.
Notes
-----
The features are always randomly permuted at each split. Therefore,
the best found split may vary, even with the same training data and
``max_features=n_features``, if the improvement of the criterion is
identical for several splits enumerated during the search of the best
split. To obtain a deterministic behaviour during fitting,
``random_state`` has to be fixed.
Examples
--------
>>> from sklearn.datasets import make_classification
>>> from sklearn.ensemble import GradientBoostingClassifier
>>> from sklearn.model_selection import train_test_split
>>> X, y = make_classification(random_state=0)
>>> X_train, X_test, y_train, y_test = train_test_split(
... X, y, random_state=0)
>>> clf = GradientBoostingClassifier(random_state=0)
>>> clf.fit(X_train, y_train)
GradientBoostingClassifier(random_state=0)
>>> clf.predict(X_test[:2])
array([1, 0])
>>> clf.score(X_test, y_test)
0.88
See also
--------
sklearn.ensemble.HistGradientBoostingClassifier,
sklearn.tree.DecisionTreeClassifier, RandomForestClassifier
AdaBoostClassifier
References
----------
J. Friedman, Greedy Function Approximation: A Gradient Boosting
Machine, The Annals of Statistics, Vol. 29, No. 5, 2001.
J. Friedman, Stochastic Gradient Boosting, 1999
T. Hastie, R. Tibshirani and J. Friedman.
Elements of Statistical Learning Ed. 2, Springer, 2009.
"""
_SUPPORTED_LOSS = ('deviance', 'exponential')
@_deprecate_positional_args
def __init__(self, *, loss='deviance', learning_rate=0.1, n_estimators=100,
subsample=1.0, criterion='friedman_mse', min_samples_split=2,
min_samples_leaf=1, min_weight_fraction_leaf=0.,
max_depth=3, min_impurity_decrease=0.,
min_impurity_split=None, init=None,
random_state=None, max_features=None, verbose=0,
max_leaf_nodes=None, warm_start=False,
validation_fraction=0.1, n_iter_no_change=None, tol=1e-4,
ccp_alpha=0.0):
super().__init__(
loss=loss, learning_rate=learning_rate, n_estimators=n_estimators,
criterion=criterion, min_samples_split=min_samples_split,
min_samples_leaf=min_samples_leaf,
min_weight_fraction_leaf=min_weight_fraction_leaf,
max_depth=max_depth, init=init, subsample=subsample,
max_features=max_features,
random_state=random_state, verbose=verbose,
max_leaf_nodes=max_leaf_nodes,
min_impurity_decrease=min_impurity_decrease,
min_impurity_split=min_impurity_split,
warm_start=warm_start, validation_fraction=validation_fraction,
n_iter_no_change=n_iter_no_change, tol=tol, ccp_alpha=ccp_alpha)
def _validate_y(self, y, sample_weight):
check_classification_targets(y)
self.classes_, y = np.unique(y, return_inverse=True)
n_trim_classes = np.count_nonzero(np.bincount(y, sample_weight))
if n_trim_classes < 2:
raise ValueError("y contains %d class after sample_weight "
"trimmed classes with zero weights, while a "
"minimum of 2 classes are required."
% n_trim_classes)
self.n_classes_ = len(self.classes_)
return y
def decision_function(self, X):
"""Compute the decision function of ``X``.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
The input samples. Internally, it will be converted to
``dtype=np.float32`` and if a sparse matrix is provided
to a sparse ``csr_matrix``.
Returns
-------
score : ndarray of shape (n_samples, n_classes) or (n_samples,)
The decision function of the input samples, which corresponds to
the raw values predicted from the trees of the ensemble . The
order of the classes corresponds to that in the attribute
:term:`classes_`. Regression and binary classification produce an
array of shape [n_samples].
"""
X = check_array(X, dtype=DTYPE, order="C", accept_sparse='csr')
raw_predictions = self._raw_predict(X)
if raw_predictions.shape[1] == 1:
return raw_predictions.ravel()
return raw_predictions
def staged_decision_function(self, X):
"""Compute decision function of ``X`` for each iteration.
This method allows monitoring (i.e. determine error on testing set)
after each stage.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
The input samples. Internally, it will be converted to
``dtype=np.float32`` and if a sparse matrix is provided
to a sparse ``csr_matrix``.
Returns
-------
score : generator of ndarray of shape (n_samples, k)
The decision function of the input samples, which corresponds to
the raw values predicted from the trees of the ensemble . The
classes corresponds to that in the attribute :term:`classes_`.
Regression and binary classification are special cases with
``k == 1``, otherwise ``k==n_classes``.
"""
yield from self._staged_raw_predict(X)
def predict(self, X):
"""Predict class for X.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
The input samples. Internally, it will be converted to
``dtype=np.float32`` and if a sparse matrix is provided
to a sparse ``csr_matrix``.
Returns
-------
y : ndarray of shape (n_samples,)
The predicted values.
"""
raw_predictions = self.decision_function(X)
encoded_labels = \
self.loss_._raw_prediction_to_decision(raw_predictions)
return self.classes_.take(encoded_labels, axis=0)
def staged_predict(self, X):
"""Predict class at each stage for X.
This method allows monitoring (i.e. determine error on testing set)
after each stage.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
The input samples. Internally, it will be converted to
``dtype=np.float32`` and if a sparse matrix is provided
to a sparse ``csr_matrix``.
Returns
-------
y : generator of ndarray of shape (n_samples,)
The predicted value of the input samples.
"""
for raw_predictions in self._staged_raw_predict(X):
encoded_labels = \
self.loss_._raw_prediction_to_decision(raw_predictions)
yield self.classes_.take(encoded_labels, axis=0)
def predict_proba(self, X):
"""Predict class probabilities for X.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
The input samples. Internally, it will be converted to
``dtype=np.float32`` and if a sparse matrix is provided
to a sparse ``csr_matrix``.
Raises
------
AttributeError
If the ``loss`` does not support probabilities.
Returns
-------
p : ndarray of shape (n_samples, n_classes)
The class probabilities of the input samples. The order of the
classes corresponds to that in the attribute :term:`classes_`.
"""
raw_predictions = self.decision_function(X)
try:
return self.loss_._raw_prediction_to_proba(raw_predictions)
except NotFittedError:
raise
except AttributeError:
raise AttributeError('loss=%r does not support predict_proba' %
self.loss)
def predict_log_proba(self, X):
"""Predict class log-probabilities for X.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
The input samples. Internally, it will be converted to
``dtype=np.float32`` and if a sparse matrix is provided
to a sparse ``csr_matrix``.
Raises
------
AttributeError
If the ``loss`` does not support probabilities.
Returns
-------
p : ndarray of shape (n_samples, n_classes)
The class log-probabilities of the input samples. The order of the
classes corresponds to that in the attribute :term:`classes_`.
"""
proba = self.predict_proba(X)
return np.log(proba)
def staged_predict_proba(self, X):
"""Predict class probabilities at each stage for X.
This method allows monitoring (i.e. determine error on testing set)
after each stage.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
The input samples. Internally, it will be converted to
``dtype=np.float32`` and if a sparse matrix is provided
to a sparse ``csr_matrix``.
Returns
-------
y : generator of ndarray of shape (n_samples,)
The predicted value of the input samples.
"""
try:
for raw_predictions in self._staged_raw_predict(X):
yield self.loss_._raw_prediction_to_proba(raw_predictions)
except NotFittedError:
raise
except AttributeError:
raise AttributeError('loss=%r does not support predict_proba' %
self.loss)
class GradientBoostingRegressor(RegressorMixin, BaseGradientBoosting):
"""Gradient Boosting for regression.
GB builds an additive model in a forward stage-wise fashion;
it allows for the optimization of arbitrary differentiable loss functions.
In each stage a regression tree is fit on the negative gradient of the
given loss function.
Read more in the :ref:`User Guide <gradient_boosting>`.
Parameters
----------
loss : {'ls', 'lad', 'huber', 'quantile'}, default='ls'
loss function to be optimized. 'ls' refers to least squares
regression. 'lad' (least absolute deviation) is a highly robust
loss function solely based on order information of the input
variables. 'huber' is a combination of the two. 'quantile'
allows quantile regression (use `alpha` to specify the quantile).
learning_rate : float, default=0.1
learning rate shrinks the contribution of each tree by `learning_rate`.
There is a trade-off between learning_rate and n_estimators.
n_estimators : int, default=100
The number of boosting stages to perform. Gradient boosting
is fairly robust to over-fitting so a large number usually
results in better performance.
subsample : float, default=1.0
The fraction of samples to be used for fitting the individual base
learners. If smaller than 1.0 this results in Stochastic Gradient
Boosting. `subsample` interacts with the parameter `n_estimators`.
Choosing `subsample < 1.0` leads to a reduction of variance
and an increase in bias.
criterion : {'friedman_mse', 'mse', 'mae'}, default='friedman_mse'
The function to measure the quality of a split. Supported criteria
are "friedman_mse" for the mean squared error with improvement
score by Friedman, "mse" for mean squared error, and "mae" for
the mean absolute error. The default value of "friedman_mse" is
generally the best as it can provide a better approximation in
some cases.
.. versionadded:: 0.18
min_samples_split : int or float, default=2
The minimum number of samples required to split an internal node:
- If int, then consider `min_samples_split` as the minimum number.
- If float, then `min_samples_split` is a fraction and
`ceil(min_samples_split * n_samples)` are the minimum
number of samples for each split.
.. versionchanged:: 0.18
Added float values for fractions.
min_samples_leaf : int or float, default=1
The minimum number of samples required to be at a leaf node.
A split point at any depth will only be considered if it leaves at
least ``min_samples_leaf`` training samples in each of the left and
right branches. This may have the effect of smoothing the model,
especially in regression.
- If int, then consider `min_samples_leaf` as the minimum number.
- If float, then `min_samples_leaf` is a fraction and
`ceil(min_samples_leaf * n_samples)` are the minimum
number of samples for each node.
.. versionchanged:: 0.18
Added float values for fractions.
min_weight_fraction_leaf : float, default=0.0
The minimum weighted fraction of the sum total of weights (of all
the input samples) required to be at a leaf node. Samples have
equal weight when sample_weight is not provided.
max_depth : int, default=3
maximum depth of the individual regression estimators. The maximum
depth limits the number of nodes in the tree. Tune this parameter
for best performance; the best value depends on the interaction
of the input variables.
min_impurity_decrease : float, default=0.0
A node will be split if this split induces a decrease of the impurity
greater than or equal to this value.
The weighted impurity decrease equation is the following::
N_t / N * (impurity - N_t_R / N_t * right_impurity
- N_t_L / N_t * left_impurity)
where ``N`` is the total number of samples, ``N_t`` is the number of
samples at the current node, ``N_t_L`` is the number of samples in the
left child, and ``N_t_R`` is the number of samples in the right child.
``N``, ``N_t``, ``N_t_R`` and ``N_t_L`` all refer to the weighted sum,
if ``sample_weight`` is passed.
.. versionadded:: 0.19
min_impurity_split : float, default=None
Threshold for early stopping in tree growth. A node will split
if its impurity is above the threshold, otherwise it is a leaf.
.. deprecated:: 0.19
``min_impurity_split`` has been deprecated in favor of
``min_impurity_decrease`` in 0.19. The default value of
``min_impurity_split`` has changed from 1e-7 to 0 in 0.23 and it
will be removed in 0.25. Use ``min_impurity_decrease`` instead.
init : estimator or 'zero', default=None
An estimator object that is used to compute the initial predictions.
``init`` has to provide :term:`fit` and :term:`predict`. If 'zero', the
initial raw predictions are set to zero. By default a
``DummyEstimator`` is used, predicting either the average target value
(for loss='ls'), or a quantile for the other losses.
random_state : int or RandomState, default=None
Controls the random seed given to each Tree estimator at each
boosting iteration.
In addition, it controls the random permutation of the features at
each split (see Notes for more details).
It also controls the random spliting of the training data to obtain a
validation set if `n_iter_no_change` is not None.
Pass an int for reproducible output across multiple function calls.
See :term:`Glossary <random_state>`.
max_features : {'auto', 'sqrt', 'log2'}, int or float, default=None
The number of features to consider when looking for the best split:
- If int, then consider `max_features` features at each split.
- If float, then `max_features` is a fraction and
`int(max_features * n_features)` features are considered at each
split.
- If "auto", then `max_features=n_features`.
- If "sqrt", then `max_features=sqrt(n_features)`.
- If "log2", then `max_features=log2(n_features)`.
- If None, then `max_features=n_features`.
Choosing `max_features < n_features` leads to a reduction of variance
and an increase in bias.
Note: the search for a split does not stop until at least one
valid partition of the node samples is found, even if it requires to
effectively inspect more than ``max_features`` features.
alpha : float, default=0.9
The alpha-quantile of the huber loss function and the quantile
loss function. Only if ``loss='huber'`` or ``loss='quantile'``.
verbose : int, default=0
Enable verbose output. If 1 then it prints progress and performance
once in a while (the more trees the lower the frequency). If greater
than 1 then it prints progress and performance for every tree.
max_leaf_nodes : int, default=None
Grow trees with ``max_leaf_nodes`` in best-first fashion.
Best nodes are defined as relative reduction in impurity.
If None then unlimited number of leaf nodes.
warm_start : bool, default=False
When set to ``True``, reuse the solution of the previous call to fit
and add more estimators to the ensemble, otherwise, just erase the
previous solution. See :term:`the Glossary <warm_start>`.
validation_fraction : float, default=0.1
The proportion of training data to set aside as validation set for
early stopping. Must be between 0 and 1.
Only used if ``n_iter_no_change`` is set to an integer.
.. versionadded:: 0.20
n_iter_no_change : int, default=None
``n_iter_no_change`` is used to decide if early stopping will be used
to terminate training when validation score is not improving. By
default it is set to None to disable early stopping. If set to a
number, it will set aside ``validation_fraction`` size of the training
data as validation and terminate training when validation score is not
improving in all of the previous ``n_iter_no_change`` numbers of
iterations.
.. versionadded:: 0.20
tol : float, default=1e-4
Tolerance for the early stopping. When the loss is not improving
by at least tol for ``n_iter_no_change`` iterations (if set to a
number), the training stops.
.. versionadded:: 0.20
ccp_alpha : non-negative float, default=0.0
Complexity parameter used for Minimal Cost-Complexity Pruning. The
subtree with the largest cost complexity that is smaller than
``ccp_alpha`` will be chosen. By default, no pruning is performed. See
:ref:`minimal_cost_complexity_pruning` for details.
.. versionadded:: 0.22
Attributes
----------
feature_importances_ : ndarray of shape (n_features,)
The impurity-based feature importances.
The higher, the more important the feature.
The importance of a feature is computed as the (normalized)
total reduction of the criterion brought by that feature. It is also
known as the Gini importance.
Warning: impurity-based feature importances can be misleading for
high cardinality features (many unique values). See
:func:`sklearn.inspection.permutation_importance` as an alternative.
oob_improvement_ : ndarray of shape (n_estimators,)
The improvement in loss (= deviance) on the out-of-bag samples
relative to the previous iteration.
``oob_improvement_[0]`` is the improvement in
loss of the first stage over the ``init`` estimator.
Only available if ``subsample < 1.0``
train_score_ : ndarray of shape (n_estimators,)
The i-th score ``train_score_[i]`` is the deviance (= loss) of the
model at iteration ``i`` on the in-bag sample.
If ``subsample == 1`` this is the deviance on the training data.
loss_ : LossFunction
The concrete ``LossFunction`` object.
init_ : estimator
The estimator that provides the initial predictions.
Set via the ``init`` argument or ``loss.init_estimator``.
estimators_ : ndarray of DecisionTreeRegressor of shape (n_estimators, 1)
The collection of fitted sub-estimators.
n_classes_ : int
The number of classes, set to 1 in regression tasks.
n_estimators_ : int
The number of estimators as selected by early stopping (if
``n_iter_no_change`` is specified). Otherwise it is set to
``n_estimators``.
n_features_ : int
The number of data features.
max_features_ : int
The inferred value of max_features.
Notes
-----
The features are always randomly permuted at each split. Therefore,
the best found split may vary, even with the same training data and
``max_features=n_features``, if the improvement of the criterion is
identical for several splits enumerated during the search of the best
split. To obtain a deterministic behaviour during fitting,
``random_state`` has to be fixed.
Examples
--------
>>> from sklearn.datasets import make_regression
>>> from sklearn.ensemble import GradientBoostingRegressor
>>> from sklearn.model_selection import train_test_split
>>> X, y = make_regression(random_state=0)
>>> X_train, X_test, y_train, y_test = train_test_split(
... X, y, random_state=0)
>>> reg = GradientBoostingRegressor(random_state=0)
>>> reg.fit(X_train, y_train)
GradientBoostingRegressor(random_state=0)
>>> reg.predict(X_test[1:2])
array([-61...])
>>> reg.score(X_test, y_test)
0.4...
See also
--------
sklearn.ensemble.HistGradientBoostingRegressor,
sklearn.tree.DecisionTreeRegressor, RandomForestRegressor
References
----------
J. Friedman, Greedy Function Approximation: A Gradient Boosting
Machine, The Annals of Statistics, Vol. 29, No. 5, 2001.
J. Friedman, Stochastic Gradient Boosting, 1999
T. Hastie, R. Tibshirani and J. Friedman.
Elements of Statistical Learning Ed. 2, Springer, 2009.
"""
_SUPPORTED_LOSS = ('ls', 'lad', 'huber', 'quantile')
@_deprecate_positional_args
def __init__(self, *, loss='ls', learning_rate=0.1, n_estimators=100,
subsample=1.0, criterion='friedman_mse', min_samples_split=2,
min_samples_leaf=1, min_weight_fraction_leaf=0.,
max_depth=3, min_impurity_decrease=0.,
min_impurity_split=None, init=None, random_state=None,
max_features=None, alpha=0.9, verbose=0, max_leaf_nodes=None,
warm_start=False, validation_fraction=0.1,
n_iter_no_change=None, tol=1e-4, ccp_alpha=0.0):
super().__init__(
loss=loss, learning_rate=learning_rate, n_estimators=n_estimators,
criterion=criterion, min_samples_split=min_samples_split,
min_samples_leaf=min_samples_leaf,
min_weight_fraction_leaf=min_weight_fraction_leaf,
max_depth=max_depth, init=init, subsample=subsample,
max_features=max_features,
min_impurity_decrease=min_impurity_decrease,
min_impurity_split=min_impurity_split,
random_state=random_state, alpha=alpha, verbose=verbose,
max_leaf_nodes=max_leaf_nodes, warm_start=warm_start,
validation_fraction=validation_fraction,
n_iter_no_change=n_iter_no_change, tol=tol, ccp_alpha=ccp_alpha)
def predict(self, X):
"""Predict regression target for X.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
The input samples. Internally, it will be converted to
``dtype=np.float32`` and if a sparse matrix is provided
to a sparse ``csr_matrix``.
Returns
-------
y : ndarray of shape (n_samples,)
The predicted values.
"""
X = check_array(X, dtype=DTYPE, order="C", accept_sparse='csr')
# In regression we can directly return the raw value from the trees.
return self._raw_predict(X).ravel()
def staged_predict(self, X):
"""Predict regression target at each stage for X.
This method allows monitoring (i.e. determine error on testing set)
after each stage.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
The input samples. Internally, it will be converted to
``dtype=np.float32`` and if a sparse matrix is provided
to a sparse ``csr_matrix``.
Returns
-------
y : generator of ndarray of shape (n_samples,)
The predicted value of the input samples.
"""
for raw_predictions in self._staged_raw_predict(X):
yield raw_predictions.ravel()
def apply(self, X):
"""Apply trees in the ensemble to X, return leaf indices.
.. versionadded:: 0.17
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
The input samples. Internally, its dtype will be converted to
``dtype=np.float32``. If a sparse matrix is provided, it will
be converted to a sparse ``csr_matrix``.
Returns
-------
X_leaves : array-like of shape (n_samples, n_estimators)
For each datapoint x in X and for each tree in the ensemble,
return the index of the leaf x ends up in each estimator.
"""
leaves = super().apply(X)
leaves = leaves.reshape(X.shape[0], self.estimators_.shape[0])
return leaves
| bsd-3-clause |
rajegannathan/grasp-lift-eeg-cat-dog-solution-updated | python-packages/mne-python-0.10/examples/preprocessing/plot_resample.py | 12 | 3364 | """
===============
Resampling data
===============
When performing experiments where timing is critical, a signal with a high
sampling rate is desired. However, having a signal with a much higher sampling
rate than is necessary needlessly consumes memory and slows down computations
operating on the data.
This example downsamples from 600 Hz to 100 Hz. This achieves a 6-fold
reduction in data size, at the cost of an equal loss of temporal resolution.
"""
# Authors: Marijn van Vliet <[email protected]>
#
# License: BSD (3-clause)
#
from __future__ import print_function
from matplotlib import pyplot as plt
import mne
from mne.io import Raw
from mne.datasets import sample
###############################################################################
# Setting up data paths and loading raw data
data_path = sample.data_path()
raw_fname = data_path + '/MEG/sample/sample_audvis_raw.fif'
raw = Raw(raw_fname, preload=True)
###############################################################################
# Since downsampling reduces the timing precision of events, we recommend
# first extracting epochs and downsampling the Epochs object:
events = mne.find_events(raw)
epochs = mne.Epochs(raw, events, event_id=2, tmin=-0.1, tmax=0.8, preload=True)
# Downsample to 100 Hz
print('Original sampling rate:', epochs.info['sfreq'], 'Hz')
epochs_resampled = epochs.resample(100, copy=True)
print('New sampling rate:', epochs_resampled.info['sfreq'], 'Hz')
# Plot a piece of data to see the effects of downsampling
plt.figure(figsize=(7, 3))
n_samples_to_plot = int(0.5 * epochs.info['sfreq']) # plot 0.5 seconds of data
plt.plot(epochs.times[:n_samples_to_plot],
epochs.get_data()[0, 0, :n_samples_to_plot], color='black')
n_samples_to_plot = int(0.5 * epochs_resampled.info['sfreq'])
plt.plot(epochs_resampled.times[:n_samples_to_plot],
epochs_resampled.get_data()[0, 0, :n_samples_to_plot],
'-o', color='red')
plt.xlabel('time (s)')
plt.legend(['original', 'downsampled'], loc='best')
plt.title('Effect of downsampling')
mne.viz.tight_layout()
###############################################################################
# When resampling epochs is unwanted or impossible, for example when the data
# doesn't fit into memory or your analysis pipeline doesn't involve epochs at
# all, the alternative approach is to resample the continous data. This
# can also be done on non-preloaded data.
# Resample to 300 Hz
raw_resampled = raw.resample(300, copy=True)
###############################################################################
# Because resampling also affects the stim channels, some trigger onsets might
# be lost in this case. While MNE attempts to downsample the stim channels in
# an intelligent manner to avoid this, the recommended approach is to find
# events on the original data before downsampling.
print('Number of events before resampling:', len(mne.find_events(raw)))
# Resample to 100 Hz (generates warning)
raw_resampled = raw.resample(100, copy=True)
print('Number of events after resampling:',
len(mne.find_events(raw_resampled)))
# To avoid losing events, jointly resample the data and event matrix
events = mne.find_events(raw)
raw_resampled, events_resampled = raw.resample(100, events=events, copy=True)
print('Number of events after resampling:', len(events_resampled))
| bsd-3-clause |
tectronics/windenergytk | examples/euler_method_demo.py | 3 | 4621 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
################################################################################
# euler_method_demo.py #
# #
# Part of UMass Amherst's Wind Energy Engineering Toolbox of Mini-Codes #
# (or Mini-Codes for short) #
# #
# Python code by Alec Koumjian - [email protected] #
# #
# This code adapted from the original Visual Basic code at #
# http://www.ceere.org/rerl/projects/software/mini-code-overview.html #
# #
# These tools can be used in conjunction with the textbook #
# "Wind Energy Explained" by J.F. Manwell, J.G. McGowan and A.L. Rogers #
# http://www.ceere.org/rerl/rerl_windenergytext.html #
# #
################################################################################
# Copyright 2009 Alec Koumjian #
# #
# This program is free software: you can redistribute it and/or modify #
# it under the terms of the GNU General Public License as published by #
# the Free Software Foundation, either version 3 of the License, or #
# (at your option) any later version. #
# #
# This program is distributed in the hope that it will be useful, #
# but WITHOUT ANY WARRANTY; without even the implied warranty of #
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the #
# GNU General Public License for more details. #
# #
# You should have received a copy of the GNU General Public License #
# along with this program. If not, see <http://www.gnu.org/licenses/>. #
################################################################################
import matplotlib.pyplot as plt
import numpy as np
# We want to find solutions to the equation:
# cosh(x)cos(x) + 1 = 0 for x
# Plotting the solutions
# To get a better understanding of the solutions we're looking for,
# we first rearrange the equation:
# cos(x) = -1/cosh(x)
# If we plot each side of the equation separately, the points where the two
# functions intersect are solutions for the equation.
def show_graph():
x = np.arange(-10, 10, 0.2)
y = np.cos(x)
y_2 = -1./np.cosh(x)
fig = plt.figure()
plt.grid()
ax = fig.add_subplot(111)
ax.plot(x, y,'-', x, y_2,'-')
plt.axis([-10,10,-2,2])
plt.show()
def epsilon(x):
return np.cosh(x) * np.cos(x) + 1
def solve(number_of_solutions=4, y=0.0, step=1, target_epsilon=0.00000000001):
"""Iteratively solve cosh(y)cos(y) + 1 = 0 for y using the Euler method"""
solutions = []
# Look for solutions until we have enough for our purposes.
while len(solutions) < number_of_solutions:
y_1 = y
y = y + step
epsilon_y = epsilon(y)
epsilon_y_1 = epsilon(y_1)
# Only when y and y_1 surround a solution do we hone in on the solution
if np.sign(epsilon_y) != np.sign(epsilon_y_1):
# Iterate closer to solution until either y or y_1 are within range
while (abs(epsilon_y) > target_epsilon) and (abs(epsilon_y_1) > target_epsilon):
n = (y_1 + y)/2
epsilon_n = epsilon(n)
epsilon_y = epsilon(y)
epsilon_y_1 = epsilon(y_1)
if np.sign(epsilon(n)) != np.sign(epsilon(y)):
y_1 = n
else:
y = n
# Add whichever marker is closer to solution
if abs(epsilon_y) < abs(epsilon_y_1):
solutions.append(y)
else:
solutions.append(y_1)
return solutions
if __name__ == "__main__":
print "The first solutions for x are:"
print solve()
show_graph() | gpl-3.0 |
xysmas/microsoft_malware_challenge | src/models/svm_bytecode/svm_bytecode.py | 2 | 6432 | """
Preliminary code for submissions on the
Microsoft Malware Classification challenge.
"""
__authors__ = 'Aaron Gonzales, Andres Ruiz'
__licence__ = 'Apache'
__email__ = '[email protected]'
import sys, os, argparse
import numpy as np
from sklearn import linear_model
from sklearn.feature_extraction import DictVectorizer
from sklearn.feature_extraction.text import TfidfTransformer
from sklearn.pipeline import Pipeline
from sklearn.grid_search import GridSearchCV
from microsoft_malware_challenge.src.utils import utils
import joblib
class Executor(object):
"""
Executes the selected classification pipeline. Right now the
custmization process is by hand, i.e. you have to code it. The
idea is to have a couple of pipelines prepared
"""
def __init__(self):
"""
Creates a new executor object and initializes the main
components.
"""
self.target_names = ['Ramnit', 'Lollipop', 'Kelihos_ver3',
'Vundo', 'Simda', 'Tracur', 'Kelihos_ver1',
'Obfuscator.ACY', 'Gatak']
self.db = utils.get_mongodb()
self.train = None
self.test = None
self.param_tunning = None
self.fitted_model = None
def _load_train(self):
"""
Loads the training dataset.
__THIS__ is the method you want to modify when querying
the database.
TODO: The data part can be just one function
"""
data_train = [(x['hexcode']['bigrams'], x['class'])
for x in self.db.samples.find({
"class": {"$exists": True}})]
return list(zip(*data_train))
def _load_test(self):
"""
Loads the testing dataset.
__THIS__ is the method you want to modify when querying
the database.
"""
data_test = [(x['hexcode']['bigrams'], '"{}"'.format(x['id']))
for x in self.db.test_samples.find({
"id":{"$exists": True}})]
return list(zip(*data_test))
def load_data(self, training=True, testing=False):
"""
Fetches the training data from the database. `training` and
testing indicate the datasets that should be loaded.
Arguments:
`training`: If False, the training dataset is NOT loaded.
`testng`: If True, the testing dataset IS loaded
"""
if training:
temp = self._load_train()
self.train = {'data': (temp[0]), 'target': temp[1]}
if testing:
temp = self._load_test()
self.test = {'data': (temp[0]), 'names': temp[1]}
def config_model(self):
"""
Configures the pipeline
"""
pip = Pipeline([
('vectorizer', DictVectorizer()),
('freq_norm', TfidfTransformer()),
('classifier', linear_model.SGDClassifier(
loss='modified_huber',
penalty='elasticnet',
alpha=1e-2,
n_jobs=-1))
])
parameters = {}
self.param_tunning = GridSearchCV(pip, parameters, n_jobs=-1)
def fit(self):
"""
Fits the parameters to the pipeline
"""
self.fitted_model = self.param_tunning.fit(self.train['data'],
self.train['target'])
def _predict(self, X, create_submission=False, filename='submission.txt'):
"""
Predicts a set of 9 probabilities per malware sample, that
correspond to the 9 malware classes. If `create_submission`
is True, then a text file named `filename` is created for
submission into Kaggle.
Arguments:
`X`: The data in which predictions will be made.
`create_submission`: Indicates whether a submission file should
be created or not.
`filename`: The file that will contain the submission.
"""
predicted_prob = self.fitted_model.predict_proba(X)
if create_submission:
to_print = np.column_stack((np.array(self.test['names']),
predicted_prob))
np.savetxt(filename, to_print, header=','.join(['"id"'] + \
['"Prediction%d"' % x for x in range(1, 10)]), \
fmt='%s', delimiter=',')
return predicted_prob
def predict_on_test(self, create_submission=False,
filename='submission.txt'):
"""
Performs predicton on the test dataset. see `_predict` for
the Keyword arguments that can be used.
Arguments:
`**kwargs`: see `_predict`.
"""
if self.test == None:
sys.stderr.write("Test set not loaded. Aborting prediction\n")
return
return self._predict(self.test['data'], create_submission, filename)
def load_model(self, filename='model.pkl'):
"""
Attempts to load the already computed model from
the `filename` file. If it is not found, then raises
and exception.
Argmuments:
`filename`: The name of the file that contains the model
"""
self.fitted_model = joblib.load(filename)
def config_parser():
"""
Configures the parser for the command line arguments
"""
parser = argparse.ArgumentParser()
parser.add_argument('--save_model', default='model',
help='specifies the directory \
where the model will be saved')
return parser
def main():
"""
Runs the main program
"""
args = config_parser().parse_args()
executor = Executor()
print("Loading data...")
executor.load_data(testing=True)
print('Configuring the model...')
executor.config_model()
print('Fitting the model...')
executor.fit()
if args.save_model:
if not os.path.isdir(args.save_model):
os.mkdir(args.save_model)
save_path = os.path.join(args.save_model, 'model.pkl')
joblib.dump(executor.fitted_model, save_path)
print('Model saved on %s.' % save_path)
print('Predicting...')
executor.predict_on_test(create_submission=True)
if __name__ == '__main__':
main()
| apache-2.0 |
CIFASIS/pylearn2 | pylearn2/train_extensions/live_monitoring.py | 30 | 11536 | """
Training extension for allowing querying of monitoring values while an
experiment executes.
"""
__authors__ = "Dustin Webb"
__copyright__ = "Copyright 2010-2012, Universite de Montreal"
__credits__ = ["Dustin Webb"]
__license__ = "3-clause BSD"
__maintainer__ = "LISA Lab"
__email__ = "pylearn-dev@googlegroups"
import copy
try:
import zmq
zmq_available = True
except:
zmq_available = False
try:
import matplotlib.pyplot as plt
pyplot_available = True
except:
pyplot_available = False
from functools import wraps
from pylearn2.monitor import Monitor
from pylearn2.train_extensions import TrainExtension
class LiveMonitorMsg(object):
"""
Base class that defines the required interface for all Live Monitor
messages.
"""
response_set = False
def get_response(self):
"""
Method that instantiates a response message for a given request
message. It is not necessary to implement this function on response
messages.
"""
raise NotImplementedError('get_response is not implemented.')
class ChannelListResponse(LiveMonitorMsg):
"""
A message containing the list of channels being monitored.
"""
pass
class ChannelListRequest(LiveMonitorMsg):
"""
A message indicating a request for a list of channels being monitored.
"""
@wraps(LiveMonitorMsg.get_response)
def get_response(self):
return ChannelListResponse()
class ChannelsResponse(LiveMonitorMsg):
"""
A message containing monitoring data related to the channels specified.
Data can be requested for all epochs or select epochs.
Parameters
----------
channel_list : list
A list of the channels for which data has been requested.
start : int
The starting epoch for which data should be returned.
end : int
The epoch after which data should be returned.
step : int
The number of epochs to be skipped between data points.
"""
def __init__(self, channel_list, start, end, step=1):
assert(
isinstance(channel_list, list)
and len(channel_list) > 0
)
self.channel_list = channel_list
assert(start >= 0)
self.start = start
self.end = end
assert(step > 0)
self.step = step
class ChannelsRequest(LiveMonitorMsg):
"""
A message for requesting data related to the channels specified.
Parameters
----------
channel_list : list
A list of the channels for which data has been requested.
start : int
The starting epoch for which data should be returned.
end : int
The epoch after which data should be returned.
step : int
The number of epochs to be skipped between data points.
"""
def __init__(self, channel_list, start=0, end=-1, step=1):
assert(
isinstance(channel_list, list)
and len(channel_list) > 0
)
self.channel_list = channel_list
assert(start >= 0)
self.start = start
self.end = end
assert(step > 0)
self.step = step
@wraps(LiveMonitorMsg.get_response)
def get_response(self):
return ChannelsResponse(
self.channel_list,
self.start,
self.end,
self.step
)
class LiveMonitoring(TrainExtension):
"""
A training extension for remotely monitoring and filtering the channels
being monitored in real time. PyZMQ must be installed for this extension
to work.
Parameters
----------
address : string
The IP addresses of the interfaces on which the monitor should listen.
req_port : int
The port number to be used to service request.
pub_port : int
The port number to be used to publish updates.
"""
def __init__(self, address='*', req_port=5555, pub_port=5556):
if not zmq_available:
raise ImportError('zeromq needs to be installed to '
'use this module.')
self.address = 'tcp://%s' % address
assert(req_port != pub_port)
assert(req_port > 1024 and req_port < 65536)
self.req_port = req_port
assert(pub_port > 1024 and pub_port < 65536)
self.pub_port = pub_port
address_template = self.address + ':%d'
self.context = zmq.Context()
self.req_sock = None
if self.req_port > 0:
self.req_sock = self.context.socket(zmq.REP)
self.req_sock.bind(address_template % self.req_port)
self.pub_sock = None
if self.pub_port > 0:
self.pub_sock = self.context.socket(zmq.PUB)
self.req_sock.bind(address_template % self.pub_port)
# Tracks the number of times on_monitor has been called
self.counter = 0
@wraps(TrainExtension.on_monitor)
def on_monitor(self, model, dataset, algorithm):
monitor = Monitor.get_monitor(model)
try:
rsqt_msg = self.req_sock.recv_pyobj(flags=zmq.NOBLOCK)
# Determine what type of message was received
rsp_msg = rsqt_msg.get_response()
if isinstance(rsp_msg, ChannelListResponse):
rsp_msg.data = list(monitor.channels.keys())
if isinstance(rsp_msg, ChannelsResponse):
channel_list = rsp_msg.channel_list
if (
not isinstance(channel_list, list)
or len(channel_list) == 0
):
channel_list = []
result = TypeError(
'ChannelResponse requires a list of channels.'
)
result = {}
for channel_name in channel_list:
if channel_name in monitor.channels.keys():
chan = copy.deepcopy(
monitor.channels[channel_name]
)
end = rsp_msg.end
if end == -1:
end = len(chan.batch_record)
# TODO copying and truncating the records individually
# like this is brittle. Is there a more robust
# solution?
chan.batch_record = chan.batch_record[
rsp_msg.start:end:rsp_msg.step
]
chan.epoch_record = chan.epoch_record[
rsp_msg.start:end:rsp_msg.step
]
chan.example_record = chan.example_record[
rsp_msg.start:end:rsp_msg.step
]
chan.time_record = chan.time_record[
rsp_msg.start:end:rsp_msg.step
]
chan.val_record = chan.val_record[
rsp_msg.start:end:rsp_msg.step
]
result[channel_name] = chan
else:
result[channel_name] = KeyError(
'Invalid channel: %s' % rsp_msg.channel_list
)
rsp_msg.data = result
self.req_sock.send_pyobj(rsp_msg)
except zmq.Again:
pass
self.counter += 1
class LiveMonitor(object):
"""
A utility class for requested data from a LiveMonitoring training
extension.
Parameters
----------
address : string
The IP address on which a LiveMonitoring process is listening.
req_port : int
The port number on which a LiveMonitoring process is listening.
"""
def __init__(self, address='127.0.0.1', req_port=5555):
"""
"""
if not zmq_available:
raise ImportError('zeromq needs to be installed to '
'use this module.')
self.address = 'tcp://%s' % address
assert(req_port > 0)
self.req_port = req_port
self.context = zmq.Context()
self.req_sock = self.context.socket(zmq.REQ)
self.req_sock.connect(self.address + ':' + str(self.req_port))
self.channels = {}
def list_channels(self):
"""
Returns a list of the channels being monitored.
"""
self.req_sock.send_pyobj(ChannelListRequest())
return self.req_sock.recv_pyobj()
def update_channels(self, channel_list, start=-1, end=-1, step=1):
"""
Retrieves data for a specified set of channels and combines that data
with any previously retrived data.
This assumes all the channels have the same number of values. It is
unclear as to whether this is a reasonable assumption. If they do not
have the same number of values then it may request to much or too
little data leading to duplicated data or wholes in the data
respectively. This could be made more robust by making a call to
retrieve all the data for all of the channels.
Parameters
----------
channel_list : list
A list of the channels for which data should be requested.
start : int
The starting epoch for which data should be requested.
step : int
The number of epochs to be skipped between data points.
"""
assert((start == -1 and end == -1) or end > start)
if start == -1:
start = 0
if len(self.channels.keys()) > 0:
channel_name = list(self.channels.keys())[0]
start = len(self.channels[channel_name].epoch_record)
self.req_sock.send_pyobj(ChannelsRequest(
channel_list, start=start, end=end, step=step
))
rsp_msg = self.req_sock.recv_pyobj()
if isinstance(rsp_msg.data, Exception):
raise rsp_msg.data
for channel in rsp_msg.data.keys():
rsp_chan = rsp_msg.data[channel]
if isinstance(rsp_chan, Exception):
raise rsp_chan
if channel not in self.channels.keys():
self.channels[channel] = rsp_chan
else:
chan = self.channels[channel]
chan.batch_record += rsp_chan.batch_record
chan.epoch_record += rsp_chan.epoch_record
chan.example_record += rsp_chan.example_record
chan.time_record += rsp_chan.time_record
chan.val_record += rsp_chan.val_record
def follow_channels(self, channel_list):
"""
Tracks and plots a specified set of channels in real time.
Parameters
----------
channel_list : list
A list of the channels for which data has been requested.
"""
if not pyplot_available:
raise ImportError('pyplot needs to be installed for '
'this functionality.')
plt.clf()
plt.ion()
while True:
self.update_channels(channel_list)
plt.clf()
for channel_name in self.channels:
plt.plot(
self.channels[channel_name].epoch_record,
self.channels[channel_name].val_record,
label=channel_name
)
plt.legend()
plt.ion()
plt.draw()
| bsd-3-clause |
themrmax/scikit-learn | sklearn/tests/test_kernel_ridge.py | 342 | 3027 | import numpy as np
import scipy.sparse as sp
from sklearn.datasets import make_regression
from sklearn.linear_model import Ridge
from sklearn.kernel_ridge import KernelRidge
from sklearn.metrics.pairwise import pairwise_kernels
from sklearn.utils.testing import ignore_warnings
from sklearn.utils.testing import assert_array_almost_equal
X, y = make_regression(n_features=10)
Xcsr = sp.csr_matrix(X)
Xcsc = sp.csc_matrix(X)
Y = np.array([y, y]).T
def test_kernel_ridge():
pred = Ridge(alpha=1, fit_intercept=False).fit(X, y).predict(X)
pred2 = KernelRidge(kernel="linear", alpha=1).fit(X, y).predict(X)
assert_array_almost_equal(pred, pred2)
def test_kernel_ridge_csr():
pred = Ridge(alpha=1, fit_intercept=False,
solver="cholesky").fit(Xcsr, y).predict(Xcsr)
pred2 = KernelRidge(kernel="linear", alpha=1).fit(Xcsr, y).predict(Xcsr)
assert_array_almost_equal(pred, pred2)
def test_kernel_ridge_csc():
pred = Ridge(alpha=1, fit_intercept=False,
solver="cholesky").fit(Xcsc, y).predict(Xcsc)
pred2 = KernelRidge(kernel="linear", alpha=1).fit(Xcsc, y).predict(Xcsc)
assert_array_almost_equal(pred, pred2)
def test_kernel_ridge_singular_kernel():
# alpha=0 causes a LinAlgError in computing the dual coefficients,
# which causes a fallback to a lstsq solver. This is tested here.
pred = Ridge(alpha=0, fit_intercept=False).fit(X, y).predict(X)
kr = KernelRidge(kernel="linear", alpha=0)
ignore_warnings(kr.fit)(X, y)
pred2 = kr.predict(X)
assert_array_almost_equal(pred, pred2)
def test_kernel_ridge_precomputed():
for kernel in ["linear", "rbf", "poly", "cosine"]:
K = pairwise_kernels(X, X, metric=kernel)
pred = KernelRidge(kernel=kernel).fit(X, y).predict(X)
pred2 = KernelRidge(kernel="precomputed").fit(K, y).predict(K)
assert_array_almost_equal(pred, pred2)
def test_kernel_ridge_precomputed_kernel_unchanged():
K = np.dot(X, X.T)
K2 = K.copy()
KernelRidge(kernel="precomputed").fit(K, y)
assert_array_almost_equal(K, K2)
def test_kernel_ridge_sample_weights():
K = np.dot(X, X.T) # precomputed kernel
sw = np.random.RandomState(0).rand(X.shape[0])
pred = Ridge(alpha=1,
fit_intercept=False).fit(X, y, sample_weight=sw).predict(X)
pred2 = KernelRidge(kernel="linear",
alpha=1).fit(X, y, sample_weight=sw).predict(X)
pred3 = KernelRidge(kernel="precomputed",
alpha=1).fit(K, y, sample_weight=sw).predict(K)
assert_array_almost_equal(pred, pred2)
assert_array_almost_equal(pred, pred3)
def test_kernel_ridge_multi_output():
pred = Ridge(alpha=1, fit_intercept=False).fit(X, Y).predict(X)
pred2 = KernelRidge(kernel="linear", alpha=1).fit(X, Y).predict(X)
assert_array_almost_equal(pred, pred2)
pred3 = KernelRidge(kernel="linear", alpha=1).fit(X, y).predict(X)
pred3 = np.array([pred3, pred3]).T
assert_array_almost_equal(pred2, pred3)
| bsd-3-clause |
rew4332/tensorflow | tensorflow/examples/tutorials/word2vec/word2vec_basic.py | 5 | 8987 | # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import math
import os
import random
import zipfile
import numpy as np
from six.moves import urllib
from six.moves import xrange # pylint: disable=redefined-builtin
import tensorflow as tf
# Step 1: Download the data.
url = 'http://mattmahoney.net/dc/'
def maybe_download(filename, expected_bytes):
"""Download a file if not present, and make sure it's the right size."""
if not os.path.exists(filename):
filename, _ = urllib.request.urlretrieve(url + filename, filename)
statinfo = os.stat(filename)
if statinfo.st_size == expected_bytes:
print('Found and verified', filename)
else:
print(statinfo.st_size)
raise Exception(
'Failed to verify ' + filename + '. Can you get to it with a browser?')
return filename
filename = maybe_download('text8.zip', 31344016)
# Read the data into a list of strings.
def read_data(filename):
"""Extract the first file enclosed in a zip file as a list of words"""
with zipfile.ZipFile(filename) as f:
data = tf.compat.as_str(f.read(f.namelist()[0])).split()
return data
words = read_data(filename)
print('Data size', len(words))
# Step 2: Build the dictionary and replace rare words with UNK token.
vocabulary_size = 50000
def build_dataset(words):
count = [['UNK', -1]]
count.extend(collections.Counter(words).most_common(vocabulary_size - 1))
dictionary = dict()
for word, _ in count:
dictionary[word] = len(dictionary)
data = list()
unk_count = 0
for word in words:
if word in dictionary:
index = dictionary[word]
else:
index = 0 # dictionary['UNK']
unk_count += 1
data.append(index)
count[0][1] = unk_count
reverse_dictionary = dict(zip(dictionary.values(), dictionary.keys()))
return data, count, dictionary, reverse_dictionary
data, count, dictionary, reverse_dictionary = build_dataset(words)
del words # Hint to reduce memory.
print('Most common words (+UNK)', count[:5])
print('Sample data', data[:10], [reverse_dictionary[i] for i in data[:10]])
data_index = 0
# Step 3: Function to generate a training batch for the skip-gram model.
def generate_batch(batch_size, num_skips, skip_window):
global data_index
assert batch_size % num_skips == 0
assert num_skips <= 2 * skip_window
batch = np.ndarray(shape=(batch_size), dtype=np.int32)
labels = np.ndarray(shape=(batch_size, 1), dtype=np.int32)
span = 2 * skip_window + 1 # [ skip_window target skip_window ]
buffer = collections.deque(maxlen=span)
for _ in range(span):
buffer.append(data[data_index])
data_index = (data_index + 1) % len(data)
for i in range(batch_size // num_skips):
target = skip_window # target label at the center of the buffer
targets_to_avoid = [ skip_window ]
for j in range(num_skips):
while target in targets_to_avoid:
target = random.randint(0, span - 1)
targets_to_avoid.append(target)
batch[i * num_skips + j] = buffer[skip_window]
labels[i * num_skips + j, 0] = buffer[target]
buffer.append(data[data_index])
data_index = (data_index + 1) % len(data)
return batch, labels
batch, labels = generate_batch(batch_size=8, num_skips=2, skip_window=1)
for i in range(8):
print(batch[i], reverse_dictionary[batch[i]],
'->', labels[i, 0], reverse_dictionary[labels[i, 0]])
# Step 4: Build and train a skip-gram model.
batch_size = 128
embedding_size = 128 # Dimension of the embedding vector.
skip_window = 1 # How many words to consider left and right.
num_skips = 2 # How many times to reuse an input to generate a label.
# We pick a random validation set to sample nearest neighbors. Here we limit the
# validation samples to the words that have a low numeric ID, which by
# construction are also the most frequent.
valid_size = 16 # Random set of words to evaluate similarity on.
valid_window = 100 # Only pick dev samples in the head of the distribution.
valid_examples = np.random.choice(valid_window, valid_size, replace=False)
num_sampled = 64 # Number of negative examples to sample.
graph = tf.Graph()
with graph.as_default():
# Input data.
train_inputs = tf.placeholder(tf.int32, shape=[batch_size])
train_labels = tf.placeholder(tf.int32, shape=[batch_size, 1])
valid_dataset = tf.constant(valid_examples, dtype=tf.int32)
# Ops and variables pinned to the CPU because of missing GPU implementation
with tf.device('/cpu:0'):
# Look up embeddings for inputs.
embeddings = tf.Variable(
tf.random_uniform([vocabulary_size, embedding_size], -1.0, 1.0))
embed = tf.nn.embedding_lookup(embeddings, train_inputs)
# Construct the variables for the NCE loss
nce_weights = tf.Variable(
tf.truncated_normal([vocabulary_size, embedding_size],
stddev=1.0 / math.sqrt(embedding_size)))
nce_biases = tf.Variable(tf.zeros([vocabulary_size]))
# Compute the average NCE loss for the batch.
# tf.nce_loss automatically draws a new sample of the negative labels each
# time we evaluate the loss.
loss = tf.reduce_mean(
tf.nn.nce_loss(nce_weights, nce_biases, embed, train_labels,
num_sampled, vocabulary_size))
# Construct the SGD optimizer using a learning rate of 1.0.
optimizer = tf.train.GradientDescentOptimizer(1.0).minimize(loss)
# Compute the cosine similarity between minibatch examples and all embeddings.
norm = tf.sqrt(tf.reduce_sum(tf.square(embeddings), 1, keep_dims=True))
normalized_embeddings = embeddings / norm
valid_embeddings = tf.nn.embedding_lookup(
normalized_embeddings, valid_dataset)
similarity = tf.matmul(
valid_embeddings, normalized_embeddings, transpose_b=True)
# Add variable initializer.
init = tf.initialize_all_variables()
# Step 5: Begin training.
num_steps = 100001
with tf.Session(graph=graph) as session:
# We must initialize all variables before we use them.
init.run()
print("Initialized")
average_loss = 0
for step in xrange(num_steps):
batch_inputs, batch_labels = generate_batch(
batch_size, num_skips, skip_window)
feed_dict = {train_inputs : batch_inputs, train_labels : batch_labels}
# We perform one update step by evaluating the optimizer op (including it
# in the list of returned values for session.run()
_, loss_val = session.run([optimizer, loss], feed_dict=feed_dict)
average_loss += loss_val
if step % 2000 == 0:
if step > 0:
average_loss /= 2000
# The average loss is an estimate of the loss over the last 2000 batches.
print("Average loss at step ", step, ": ", average_loss)
average_loss = 0
# Note that this is expensive (~20% slowdown if computed every 500 steps)
if step % 10000 == 0:
sim = similarity.eval()
for i in xrange(valid_size):
valid_word = reverse_dictionary[valid_examples[i]]
top_k = 8 # number of nearest neighbors
nearest = (-sim[i, :]).argsort()[1:top_k+1]
log_str = "Nearest to %s:" % valid_word
for k in xrange(top_k):
close_word = reverse_dictionary[nearest[k]]
log_str = "%s %s," % (log_str, close_word)
print(log_str)
final_embeddings = normalized_embeddings.eval()
# Step 6: Visualize the embeddings.
def plot_with_labels(low_dim_embs, labels, filename='tsne.png'):
assert low_dim_embs.shape[0] >= len(labels), "More labels than embeddings"
plt.figure(figsize=(18, 18)) #in inches
for i, label in enumerate(labels):
x, y = low_dim_embs[i,:]
plt.scatter(x, y)
plt.annotate(label,
xy=(x, y),
xytext=(5, 2),
textcoords='offset points',
ha='right',
va='bottom')
plt.savefig(filename)
try:
from sklearn.manifold import TSNE
import matplotlib.pyplot as plt
tsne = TSNE(perplexity=30, n_components=2, init='pca', n_iter=5000)
plot_only = 500
low_dim_embs = tsne.fit_transform(final_embeddings[:plot_only,:])
labels = [reverse_dictionary[i] for i in xrange(plot_only)]
plot_with_labels(low_dim_embs, labels)
except ImportError:
print("Please install sklearn and matplotlib to visualize embeddings.")
| apache-2.0 |
pkainz/pylearn2 | pylearn2/models/independent_multiclass_logistic.py | 44 | 2491 | """
Multiclass-classification by taking the max over a set of one-against-rest
logistic classifiers.
"""
__authors__ = "Ian Goodfellow"
__copyright__ = "Copyright 2010-2012, Universite de Montreal"
__credits__ = ["Ian Goodfellow"]
__license__ = "3-clause BSD"
__maintainer__ = "LISA Lab"
__email__ = "pylearn-dev@googlegroups"
import logging
try:
from sklearn.linear_model import LogisticRegression
except ImportError:
LogisticRegression = None
import numpy as np
from theano.compat.six.moves import xrange
logger = logging.getLogger(__name__)
class IndependentMulticlassLogistic:
"""
Fits a separate logistic regression classifier for each class, makes
predictions based on the max output: during training, views a one-hot label
vector as a vector of independent binary labels, rather than correctly
modeling them as one-hot like softmax would do.
This is what Jia+Huang used to get state of the art on CIFAR-100
Parameters
----------
C : WRITEME
"""
def __init__(self, C):
self.C = C
def fit(self, X, y):
"""
Fits the model to the given training data.
Parameters
----------
X : ndarray
2D array, each row is one example
y : ndarray
vector of integer class labels
"""
if LogisticRegression is None:
raise RuntimeError("sklearn not available.")
min_y = y.min()
max_y = y.max()
assert min_y == 0
num_classes = max_y + 1
assert num_classes > 1
logistics = []
for c in xrange(num_classes):
logger.info('fitting class {0}'.format(c))
cur_y = (y == c).astype('int32')
logistics.append(LogisticRegression(C = self.C).fit(X,cur_y))
return Classifier(logistics)
class Classifier:
"""
.. todo::
WRITEME
Parameters
----------
logistics : WRITEME
"""
def __init__(self, logistics):
assert len(logistics) > 1
num_classes = len(logistics)
num_features = logistics[0].coef_.shape[1]
self.W = np.zeros((num_features, num_classes))
self.b = np.zeros((num_classes,))
for i in xrange(num_classes):
self.W[:,i] = logistics[i].coef_
self.b[i] = logistics[i].intercept_
def predict(self, X):
"""
.. todo::
WRITEME
"""
return np.argmax(self.b + np.dot(X,self.W), 1)
| bsd-3-clause |
tehtechguy/mHTM | src/examples/mnist_simple.py | 1 | 3519 | # mnist_simple.py
#
# Author : James Mnatzaganian
# Contact : http://techtorials.me
# Organization : NanoComputing Research Lab - Rochester Institute of
# Technology
# Website : https://www.rit.edu/kgcoe/nanolab/
# Date Created : 12/13/15
#
# Description : Testing SP with MNIST using a simple demonstration.
# Python Version : 2.7.X
#
# License : MIT License http://opensource.org/licenses/mit-license.php
# Copyright : (c) 2016 James Mnatzaganian
"""
Testing SP with MNIST using a simple demonstration.
G{packagetree mHTM}
"""
__docformat__ = 'epytext'
# Native imports
import os
# Third party imports
import numpy as np
from sklearn.svm import LinearSVC
# Program imports
from mHTM.datasets.loader import load_mnist, MNISTCV
from mHTM.region import SPRegion
from mHTM.plot import plot_compare_images
def main(ntrain=800, ntest=200, nsplits=1, seed=123456789):
# Set the configuration parameters for the SP
ninputs = 784
kargs = {
'ninputs': ninputs,
'ncolumns': ninputs,
'nactive': 30,
'global_inhibition': True,
'trim': False,
'seed': seed,
'disable_boost': True,
'nsynapses': 392,
'seg_th': 10,
'syn_th': 0.5,
'pinc': 0.001,
'pdec': 0.002,
'pwindow': 0.01,
'random_permanence': True,
'nepochs': 10,
'clf': LinearSVC(random_state=seed),
'log_dir': os.path.join('simple_mnist', '1-1')
}
# Seed numpy
np.random.seed(seed)
# Get the data
(tr_x, tr_y), (te_x, te_y) = load_mnist()
x, y = np.vstack((tr_x, te_x)), np.hstack((tr_y, te_y))
# Split the data for CV
cv = MNISTCV(tr_y, te_y, ntrain, ntest, nsplits, seed)
# Execute the SP on each fold. Additionally, get results for each fitting
# method.
for i, (tr, te) in enumerate(cv):
# Create the region
sp = SPRegion(**kargs)
# Train the region
sp.fit(x[tr], y[tr])
# Test the base classifier
clf = LinearSVC(random_state=seed)
clf.fit(x[tr], y[tr])
score = clf.score(x[te], y[te])
print 'SVM Only Accuracy: {0:.2f}%'.format(score * 100)
# Test the region for the column method
score = sp.score(x[te], y[te])
print 'Column Accuracy: {0:.2f}%'.format(score * 100)
# Test the region for the probabilistic method
score = sp.score(x[te], y[te], tr_x=x[tr], score_method='prob')
print 'Probabilistic Accuracy: {0:.2f}%'.format(score * 100)
# Test the region for the dimensionality reduction method
score = sp.score(x[te], y[te], tr_x=x[tr], score_method='reduction')
ndims = len(sp.reduce_dimensions(x[0]))
print 'Input Reduced from {0} to {1}: {2:.1f}X reduction'.format(
ninputs, ndims, ninputs / float(ndims))
print 'Reduction Accuracy: {0:.2f}%'.format(score * 100)
# Get a random set of unique inputs from the training set
inputs = np.zeros((10, ninputs))
for i in xrange(10):
ix = np.random.permutation(np.where(y[tr] == i)[0])[0]
inputs[i] = x[tr][ix]
# Get the SP's predictions for the inputs
sp_pred = sp.predict(inputs)
# Get the reconstruction in the context of the SP
sp_inputs = sp.reconstruct_input(sp_pred)
# Make a plot comparing the images
title = 'Input Reconstruction: Original (top), SP SDRs (middle), ' \
'SP Reconstruction (bottom)'
shape = (28, 28)
path = os.path.join(sp.log_dir, 'input_reconstruction.png')
plot_compare_images((inputs, sp_pred, sp_inputs), shape, title,
out_path=path)
if __name__ == '__main__':
main() | mit |
elkingtonmcb/scikit-learn | examples/ensemble/plot_forest_importances_faces.py | 403 | 1519 | """
=================================================
Pixel importances with a parallel forest of trees
=================================================
This example shows the use of forests of trees to evaluate the importance
of the pixels in an image classification task (faces). The hotter the pixel,
the more important.
The code below also illustrates how the construction and the computation
of the predictions can be parallelized within multiple jobs.
"""
print(__doc__)
from time import time
import matplotlib.pyplot as plt
from sklearn.datasets import fetch_olivetti_faces
from sklearn.ensemble import ExtraTreesClassifier
# Number of cores to use to perform parallel fitting of the forest model
n_jobs = 1
# Load the faces dataset
data = fetch_olivetti_faces()
X = data.images.reshape((len(data.images), -1))
y = data.target
mask = y < 5 # Limit to 5 classes
X = X[mask]
y = y[mask]
# Build a forest and compute the pixel importances
print("Fitting ExtraTreesClassifier on faces data with %d cores..." % n_jobs)
t0 = time()
forest = ExtraTreesClassifier(n_estimators=1000,
max_features=128,
n_jobs=n_jobs,
random_state=0)
forest.fit(X, y)
print("done in %0.3fs" % (time() - t0))
importances = forest.feature_importances_
importances = importances.reshape(data.images[0].shape)
# Plot pixel importances
plt.matshow(importances, cmap=plt.cm.hot)
plt.title("Pixel importances with forests of trees")
plt.show()
| bsd-3-clause |
nonsk131/USRP2016 | analysis.py | 1 | 1565 | import pandas as pd
import os
filePath = '/tigress/np5/'
binary = ['evidence_bound.txt','evidence_unassociated.txt']
triplet = ['evidence_triplet0.txt','evidence_triplet1.txt',
'evidence_triplet2.txt','evidence_triplet3.txt',
'evidence_triplet4.txt']
quad = ['evidence_quad0.txt','evidence_quad1.txt','evidence_quad2.txt',
'evidence_quad3.txt','evidence_quad4.txt','evidence_quad5.txt',
'evidence_quad6.txt','evidence_quad7.txt','evidence_quad8.txt',
'evidence_quad9.txt','evidence_quad10.txt','evidence_quad11.txt',
'evidence_quad12.txt','evidence_quad13.txt','evidence_quad14.txt']
data = [binary] #, triplet, quad]
def get_columnName(name):
if 'triple' in name:
return name[:17]
elif 'quad' in name:
return name[:-4]
else:
name = name.split('.')
return name[0]
df_final = pd.DataFrame()
for element in data:
df = pd.DataFrame()
for name in element:
indexList = []
valueList = []
c = get_columnName(name)
fileName = os.path.join(filePath, name)
file = open(fileName, 'r')
for line in file:
line = line.split(':')
indexList.append(line[0])
line = line[1].split(',')
valueList.append(float(line[0][2:]))
file.close()
series = pd.Series(valueList, index=indexList, name=c)
df = pd.concat([df,series], axis=1)
df_final = df_final.append(df)
df_final = df_final.sort_index()
df_final.to_csv(path_or_buf='/tigress/np5/all_df.csv')
| mit |
fbagirov/scikit-learn | examples/model_selection/grid_search_digits.py | 227 | 2665 | """
============================================================
Parameter estimation using grid search with cross-validation
============================================================
This examples shows how a classifier is optimized by cross-validation,
which is done using the :class:`sklearn.grid_search.GridSearchCV` object
on a development set that comprises only half of the available labeled data.
The performance of the selected hyper-parameters and trained model is
then measured on a dedicated evaluation set that was not used during
the model selection step.
More details on tools available for model selection can be found in the
sections on :ref:`cross_validation` and :ref:`grid_search`.
"""
from __future__ import print_function
from sklearn import datasets
from sklearn.cross_validation import train_test_split
from sklearn.grid_search import GridSearchCV
from sklearn.metrics import classification_report
from sklearn.svm import SVC
print(__doc__)
# Loading the Digits dataset
digits = datasets.load_digits()
# To apply an classifier on this data, we need to flatten the image, to
# turn the data in a (samples, feature) matrix:
n_samples = len(digits.images)
X = digits.images.reshape((n_samples, -1))
y = digits.target
# Split the dataset in two equal parts
X_train, X_test, y_train, y_test = train_test_split(
X, y, test_size=0.5, random_state=0)
# Set the parameters by cross-validation
tuned_parameters = [{'kernel': ['rbf'], 'gamma': [1e-3, 1e-4],
'C': [1, 10, 100, 1000]},
{'kernel': ['linear'], 'C': [1, 10, 100, 1000]}]
scores = ['precision', 'recall']
for score in scores:
print("# Tuning hyper-parameters for %s" % score)
print()
clf = GridSearchCV(SVC(C=1), tuned_parameters, cv=5,
scoring='%s_weighted' % score)
clf.fit(X_train, y_train)
print("Best parameters set found on development set:")
print()
print(clf.best_params_)
print()
print("Grid scores on development set:")
print()
for params, mean_score, scores in clf.grid_scores_:
print("%0.3f (+/-%0.03f) for %r"
% (mean_score, scores.std() * 2, params))
print()
print("Detailed classification report:")
print()
print("The model is trained on the full development set.")
print("The scores are computed on the full evaluation set.")
print()
y_true, y_pred = y_test, clf.predict(X_test)
print(classification_report(y_true, y_pred))
print()
# Note the problem is too easy: the hyperparameter plateau is too flat and the
# output model is the same for precision and recall with ties in quality.
| bsd-3-clause |
keras-team/autokeras | docs/py/structured_data_classification.py | 1 | 7356 | """shell
pip install autokeras
"""
import numpy as np
import pandas as pd
import tensorflow as tf
import autokeras as ak
"""
## A Simple Example
The first step is to prepare your data. Here we use the [Titanic
dataset](https://www.kaggle.com/c/titanic) as an example.
"""
TRAIN_DATA_URL = "https://storage.googleapis.com/tf-datasets/titanic/train.csv"
TEST_DATA_URL = "https://storage.googleapis.com/tf-datasets/titanic/eval.csv"
train_file_path = tf.keras.utils.get_file("train.csv", TRAIN_DATA_URL)
test_file_path = tf.keras.utils.get_file("eval.csv", TEST_DATA_URL)
"""
The second step is to run the
[StructuredDataClassifier](/structured_data_classifier).
As a quick demo, we set epochs to 10.
You can also leave the epochs unspecified for an adaptive number of epochs.
"""
# Initialize the structured data classifier.
clf = ak.StructuredDataClassifier(
overwrite=True, max_trials=3
) # It tries 3 different models.
# Feed the structured data classifier with training data.
clf.fit(
# The path to the train.csv file.
train_file_path,
# The name of the label column.
"survived",
epochs=10,
)
# Predict with the best model.
predicted_y = clf.predict(test_file_path)
# Evaluate the best model with testing data.
print(clf.evaluate(test_file_path, "survived"))
"""
## Data Format
The AutoKeras StructuredDataClassifier is quite flexible for the data format.
The example above shows how to use the CSV files directly. Besides CSV files,
it also supports numpy.ndarray, pandas.DataFrame or [tf.data.Dataset](
https://www.tensorflow.org/api_docs/python/tf/data/Dataset?version=stable). The
data should be two-dimensional with numerical or categorical values.
For the classification labels,
AutoKeras accepts both plain labels, i.e. strings or integers, and one-hot encoded
encoded labels, i.e. vectors of 0s and 1s.
The labels can be numpy.ndarray, pandas.DataFrame, or pandas.Series.
The following examples show how the data can be prepared with numpy.ndarray,
pandas.DataFrame, and tensorflow.data.Dataset.
"""
# x_train as pandas.DataFrame, y_train as pandas.Series
x_train = pd.read_csv(train_file_path)
print(type(x_train)) # pandas.DataFrame
y_train = x_train.pop("survived")
print(type(y_train)) # pandas.Series
# You can also use pandas.DataFrame for y_train.
y_train = pd.DataFrame(y_train)
print(type(y_train)) # pandas.DataFrame
# You can also use numpy.ndarray for x_train and y_train.
x_train = x_train.to_numpy()
y_train = y_train.to_numpy()
print(type(x_train)) # numpy.ndarray
print(type(y_train)) # numpy.ndarray
# Preparing testing data.
x_test = pd.read_csv(test_file_path)
y_test = x_test.pop("survived")
# It tries 10 different models.
clf = ak.StructuredDataClassifier(overwrite=True, max_trials=3)
# Feed the structured data classifier with training data.
clf.fit(x_train, y_train, epochs=10)
# Predict with the best model.
predicted_y = clf.predict(x_test)
# Evaluate the best model with testing data.
print(clf.evaluate(x_test, y_test))
"""
The following code shows how to convert numpy.ndarray to tf.data.Dataset.
"""
train_set = tf.data.Dataset.from_tensor_slices((x_train.astype(np.unicode), y_train))
test_set = tf.data.Dataset.from_tensor_slices(
(x_test.to_numpy().astype(np.unicode), y_test)
)
clf = ak.StructuredDataClassifier(overwrite=True, max_trials=3)
# Feed the tensorflow Dataset to the classifier.
clf.fit(train_set, epochs=10)
# Predict with the best model.
predicted_y = clf.predict(test_set)
# Evaluate the best model with testing data.
print(clf.evaluate(test_set))
"""
You can also specify the column names and types for the data as follows. The
`column_names` is optional if the training data already have the column names,
e.g. pandas.DataFrame, CSV file. Any column, whose type is not specified will
be inferred from the training data.
"""
# Initialize the structured data classifier.
clf = ak.StructuredDataClassifier(
column_names=[
"sex",
"age",
"n_siblings_spouses",
"parch",
"fare",
"class",
"deck",
"embark_town",
"alone",
],
column_types={"sex": "categorical", "fare": "numerical"},
max_trials=10, # It tries 10 different models.
overwrite=True,
)
"""
## Validation Data
By default, AutoKeras use the last 20% of training data as validation data. As
shown in the example below, you can use `validation_split` to specify the
percentage.
"""
clf.fit(
x_train,
y_train,
# Split the training data and use the last 15% as validation data.
validation_split=0.15,
epochs=10,
)
"""
You can also use your own validation set
instead of splitting it from the training data with `validation_data`.
"""
split = 500
x_val = x_train[split:]
y_val = y_train[split:]
x_train = x_train[:split]
y_train = y_train[:split]
clf.fit(
x_train,
y_train,
# Use your own validation set.
validation_data=(x_val, y_val),
epochs=10,
)
"""
## Customized Search Space
For advanced users, you may customize your search space by using
[AutoModel](/auto_model/#automodel-class) instead of
[StructuredDataClassifier](/structured_data_classifier). You can configure the
[StructuredDataBlock](/block/#structureddatablock-class) for some high-level
configurations, e.g., `categorical_encoding` for whether to use the
[CategoricalToNumerical](/block/#categoricaltonumerical-class). You can also do
not specify these arguments, which would leave the different choices to be
tuned automatically. See the following example for detail.
"""
input_node = ak.StructuredDataInput()
output_node = ak.StructuredDataBlock(categorical_encoding=True)(input_node)
output_node = ak.ClassificationHead()(output_node)
clf = ak.AutoModel(
inputs=input_node, outputs=output_node, overwrite=True, max_trials=3
)
clf.fit(x_train, y_train, epochs=10)
"""
The usage of [AutoModel](/auto_model/#automodel-class) is similar to the
[functional API](https://www.tensorflow.org/guide/keras/functional) of Keras.
Basically, you are building a graph, whose edges are blocks and the nodes are
intermediate outputs of blocks.
To add an edge from `input_node` to `output_node` with
`output_node = ak.[some_block]([block_args])(input_node)`.
You can even also use more fine grained blocks to customize the search space even
further. See the following example.
"""
input_node = ak.StructuredDataInput()
output_node = ak.CategoricalToNumerical()(input_node)
output_node = ak.DenseBlock()(output_node)
output_node = ak.ClassificationHead()(output_node)
clf = ak.AutoModel(
inputs=input_node, outputs=output_node, overwrite=True, max_trials=1
)
clf.fit(x_train, y_train, epochs=1)
clf.predict(x_train)
"""
You can also export the best model found by AutoKeras as a Keras Model.
"""
model = clf.export_model()
model.summary()
print(x_train.dtype)
# numpy array in object (mixed type) is not supported.
# convert it to unicode.
model.predict(x_train.astype(np.unicode))
"""
## Reference
[StructuredDataClassifier](/structured_data_classifier),
[AutoModel](/auto_model/#automodel-class),
[StructuredDataBlock](/block/#structureddatablock-class),
[DenseBlock](/block/#denseblock-class),
[StructuredDataInput](/node/#structureddatainput-class),
[ClassificationHead](/block/#classificationhead-class),
[CategoricalToNumerical](/block/#categoricaltonumerical-class).
"""
| apache-2.0 |
Caoimhinmg/PmagPy | programs/pmag_gui.py | 1 | 32221 | #!/usr/bin/env pythonw
# pylint: disable=W0612,C0111,C0103,W0201,E402
print("-I- Importing Pmag GUI dependencies")
#from pmag_env import set_env
#set_env.set_backend(wx=True)
import matplotlib
if not matplotlib.get_backend() == 'WXAgg':
matplotlib.use('WXAgg')
import wx
import wx.lib.buttons as buttons
import wx.lib.newevent as newevent
import os
import sys
from pmagpy import pmag
from pmagpy import ipmag
from pmagpy import builder2 as builder
from pmagpy import new_builder as nb
from dialogs import pmag_basic_dialogs_native3 as pbd3
from dialogs import pmag_basic_dialogs as pbd2
from dialogs import pmag_er_magic_dialogs
from dialogs import pmag_gui_menu3 as pmag_gui_menu
from dialogs import ErMagicBuilder
from dialogs import demag_dialogs
from dialogs import pmag_widgets as pw
global PMAGPY_DIRECTORY
import pmagpy.find_pmag_dir as find_pmag_dir
PMAGPY_DIRECTORY = find_pmag_dir.get_pmag_dir()
from programs import demag_gui
from programs import thellier_gui
#from programs import thellier_gui3
class MagMainFrame(wx.Frame):
""""""
try:
version= pmag.get_version()
except:
version = ""
title = "Pmag GUI version: %s"%version
if sys.platform in ['win32', 'win64']:
title += " Powered by Enthought Canopy"
def __init__(self, WD=None, DM=None, dmodel=None):
"""
Input working directory, data model number (2.5 or 3),
and data model (optional).
"""
wx.Frame.__init__(self, None, wx.ID_ANY, self.title, name='pmag_gui mainframe')
#set icon
self.icon = wx.Icon()
icon_path = os.path.join(PMAGPY_DIRECTORY, 'programs', 'images', 'PmagPy.ico')
if os.path.isfile(icon_path):
self.icon.CopyFromBitmap(wx.Bitmap(icon_path, wx.BITMAP_TYPE_ANY))
self.SetIcon(self.icon)
else:
print("-I- PmagPy icon file not found -- skipping")
# if DM was provided:
if DM:
self.data_model_num = int(float(DM))
# try to get DM from command line args
if not DM:
self.data_model_num = int(float(pmag.get_named_arg_from_sys("-DM", 0)))
DM = self.data_model_num
# if WD was provided:
if WD:
self.WD = WD
else:
WD = pmag.get_named_arg_from_sys("-WD", '')
self.WD = WD
self.data_model = dmodel
self.FIRST_RUN = True
self.panel = wx.Panel(self, name='pmag_gui main panel')
self.InitUI()
if WD and DM:
self.set_dm(self.data_model_num)
if WD:
self.dir_path.SetValue(self.WD)
# for use as module:
self.resource_dir = os.getcwd()
# set some things
self.HtmlIsOpen = False
self.Bind(wx.EVT_CLOSE, self.on_menu_exit)
# if not specified on the command line,
# make the user choose data model num (2 or 3)
# and working directory
wx.CallAfter(self.get_dm_and_wd, DM, WD)
def get_dm_and_wd(self, DM=None, WD=None):
"""
If DM and/or WD are missing, call user-input dialogs
to ascertain that information.
Parameters
----------
self
DM : int
number of data model to use (2 or 3), default None
WD : str
name of working directory, default None
"""
if not DM:
self.get_dm_num()
if not WD:
self.get_DIR()
# no need to get wd_data
return
if self.data_model_num == 2:
self.get_wd_data2()
else:
self.get_wd_data()
def get_dm_num(self):
"""
Show dialog to get user input for which data model to use,
2 or 3.
Set self.data_model_num, and create 3.0 contribution or
2.5 ErMagicBuilder as needed.
"""
ui_dialog = demag_dialogs.user_input(self,['data_model'],
parse_funcs=[float],
heading="Please input prefered data model (2.5,3.0). Note: 2.5 is for legacy projects only, if you have new data OR if you want to upgrade your old data, please use 3.0.",
values=[3])
# figure out where to put this
res = ui_dialog.ShowModal()
vals = ui_dialog.get_values()
self.data_model_num = int(vals[1]['data_model'])
#
if self.data_model_num not in (2, 3):
pw.simple_warning("Input data model not recognized, defaulting to 3")
self.data_model_num = 3
self.set_dm(self.data_model_num)
def set_dm(self, num):
"""
Make GUI changes based on data model num.
Get info from WD in appropriate format
"""
#enable or disable self.btn1a
if self.data_model_num == 3:
self.btn1a.Enable()
else:
self.btn1a.Disable()
#
# set pmag_basic_dialogs
global pmag_basic_dialogs
if self.data_model_num == 2:
pmag_basic_dialogs = pbd2
#wx.CallAfter(self.get_wd_data2)
elif self.data_model_num == 3:
pmag_basic_dialogs = pbd3
#wx.CallAfter(self.get_wd_data)
# do / re-do menubar
menubar = pmag_gui_menu.MagICMenu(self, data_model_num=self.data_model_num)
self.SetMenuBar(menubar)
def get_wd_data(self):
"""
Show dialog to get user input for which directory
to set as working directory.
"""
wait = wx.BusyInfo('Reading in data from current working directory, please wait...')
#wx.Yield()
print('-I- Read in any available data from working directory')
self.contribution = nb.Contribution(self.WD, dmodel=self.data_model)
del wait
def get_wd_data2(self):
wait = wx.BusyInfo('Reading in data from current working directory, please wait...')
#wx.Yield()
print('-I- Read in any available data from working directory (data model 2)')
self.er_magic = builder.ErMagicBuilder(self.WD,
data_model=self.data_model)
del wait
def InitUI(self):
menubar = pmag_gui_menu.MagICMenu(self, data_model_num=self.data_model_num)
self.SetMenuBar(menubar)
#pnl = self.panel
#---sizer logo ----
#start_image = wx.Image("/Users/ronshaar/PmagPy/images/logo2.png")
#start_image = wx.Image("/Users/Python/simple_examples/001.png")
#start_image.Rescale(start_image.GetWidth(), start_image.GetHeight())
#image = wx.BitmapFromImage(start_image)
#self.logo = wx.StaticBitmap(self.panel, -1, image)
#---sizer 0 ----
bSizer0 = wx.StaticBoxSizer(wx.StaticBox(self.panel, wx.ID_ANY, "Choose MagIC project directory"), wx.HORIZONTAL)
self.dir_path = wx.TextCtrl(self.panel, id=-1, size=(600,25), style=wx.TE_READONLY)
self.change_dir_button = buttons.GenButton(self.panel, id=-1, label="change directory",size=(-1, -1))
self.change_dir_button.SetBackgroundColour("#F8F8FF")
self.change_dir_button.InitColours()
self.Bind(wx.EVT_BUTTON, self.on_change_dir_button, self.change_dir_button)
bSizer0.Add(self.change_dir_button, wx.ALIGN_LEFT)
bSizer0.AddSpacer(40)
bSizer0.Add(self.dir_path,wx.ALIGN_CENTER_VERTICAL)
# not fully implemented method for saving/reverting WD
# last saved: []
#bSizer0_1 = wx.StaticBoxSizer( wx.StaticBox( self.panel, wx.ID_ANY, "Save MagIC project directory in current state or revert to last-saved state" ), wx.HORIZONTAL )
#saved_label = wx.StaticText(self.panel, -1, "Last saved:", (20, 120))
#self.last_saved_time = wx.TextCtrl(self.panel, id=-1, size=(100,25), style=wx.TE_READONLY)
#now = datetime.datetime.now()
#now_string = "{}:{}:{}".format(now.hour, now.minute, now.second)
#self.last_saved_time.write(now_string)
#self.save_dir_button = buttons.GenButton(self.panel, id=-1, label = "save dir", size=(-1, -1))
#self.revert_dir_button = buttons.GenButton(self.panel, id=-1, label = "revert dir", size=(-1, -1))
#self.Bind(wx.EVT_BUTTON, self.on_revert_dir_button, self.revert_dir_button)
#self.Bind(wx.EVT_BUTTON, self.on_save_dir_button, self.save_dir_button)
#bSizer0_1.Add(saved_label, flag=wx.RIGHT, border=10)
#bSizer0_1.Add(self.last_saved_time, flag=wx.RIGHT, border=10)
#bSizer0_1.Add(self.save_dir_button,flag=wx.ALIGN_LEFT|wx.RIGHT, border=10)
#bSizer0_1.Add(self.revert_dir_button,wx.ALIGN_LEFT)
#
#---sizer 1 ----
bSizer1 = wx.StaticBoxSizer(wx.StaticBox(self.panel, wx.ID_ANY, "Import data to working directory"), wx.HORIZONTAL)
text = "1. Convert magnetometer files to MagIC format"
self.btn1 = buttons.GenButton(self.panel, id=-1, label=text,
size=(450, 50), name='step 1')
self.btn1.SetBackgroundColour("#FDC68A")
self.btn1.InitColours()
self.Bind(wx.EVT_BUTTON, self.on_convert_file, self.btn1)
text = "2. (optional) Calculate geographic/tilt-corrected directions"
self.btn2 = buttons.GenButton(self.panel, id=-1, label=text, size=(450, 50), name='step 2')
self.btn2.SetBackgroundColour("#FDC68A")
self.btn2.InitColours()
self.Bind(wx.EVT_BUTTON, self.on_orientation_button, self.btn2)
text = "3. (optional) Add MagIC metadata for uploading data to MagIC "
self.btn3 = buttons.GenButton(self.panel, id=-1, label=text, size=(450, 50), name='step 3')
self.btn3.SetBackgroundColour("#FDC68A")
self.btn3.InitColours()
self.Bind(wx.EVT_BUTTON, self.on_er_data, self.btn3)
text = "Unpack txt file downloaded from MagIC"
self.btn4 = buttons.GenButton(self.panel, id=-1, label=text, size=(330, 50))
self.btn4.SetBackgroundColour("#FDC68A")
self.btn4.InitColours()
self.Bind(wx.EVT_BUTTON, self.on_unpack, self.btn4)
text = "Convert directory to 3.0. format (legacy data only)"
self.btn1a = buttons.GenButton(self.panel, id=-1, label=text,
size=(330, 50), name='step 1a')
self.btn1a.SetBackgroundColour("#FDC68A")
self.btn1a.InitColours()
self.Bind(wx.EVT_BUTTON, self.on_convert_3, self.btn1a)
#str = "OR"
OR = wx.StaticText(self.panel, -1, "or", (20, 120))
font = wx.Font(18, wx.SWISS, wx.NORMAL, wx.NORMAL)
OR.SetFont(font)
#bSizer0.Add(self.panel,self.btn1,wx.ALIGN_TOP)
bSizer1_1 = wx.BoxSizer(wx.VERTICAL)
bSizer1_1.AddSpacer(20)
bSizer1_1.Add(self.btn1, wx.ALIGN_TOP)
bSizer1_1.AddSpacer(20)
bSizer1_1.Add(self.btn2, wx.ALIGN_TOP)
bSizer1_1.AddSpacer(20)
bSizer1_1.Add(self.btn3, wx.ALIGN_TOP)
bSizer1_1.AddSpacer(20)
bSizer1.Add(bSizer1_1, wx.ALIGN_CENTER, wx.EXPAND)
bSizer1.AddSpacer(20)
bSizer1.Add(OR, 0, wx.ALIGN_CENTER, 0)
bSizer1.AddSpacer(20)
bSizer1_2 = wx.BoxSizer(wx.VERTICAL)
spacing = 60 #if self.data_model_num == 3 else 90
bSizer1_2.AddSpacer(spacing)
bSizer1_2.Add(self.btn4, 0, wx.ALIGN_CENTER, 0)
bSizer1_2.AddSpacer(20)
bSizer1_2.Add(self.btn1a, 0, wx.ALIGN_CENTER, 0)
bSizer1_2.AddSpacer(20)
bSizer1.Add(bSizer1_2)
bSizer1.AddSpacer(20)
#---sizer 2 ----
bSizer2 = wx.StaticBoxSizer(wx.StaticBox(self.panel, wx.ID_ANY, "Analysis and plots" ), wx.HORIZONTAL)
text = "Demag GUI"
self.btn_demag_gui = buttons.GenButton(self.panel, id=-1, label=text, size=(300, 50), name='demag gui')
self.btn_demag_gui.SetBackgroundColour("#6ECFF6")
self.btn_demag_gui.InitColours()
self.Bind(wx.EVT_BUTTON, self.on_run_demag_gui, self.btn_demag_gui)
text = "Thellier GUI"
self.btn_thellier_gui = buttons.GenButton(self.panel, id=-1, label=text, size=(300, 50), name='thellier gui')
self.btn_thellier_gui.SetBackgroundColour("#6ECFF6")
self.btn_thellier_gui.InitColours()
self.Bind(wx.EVT_BUTTON, self.on_run_thellier_gui, self.btn_thellier_gui)
bSizer2.AddSpacer(20)
bSizer2.Add(self.btn_demag_gui, 0, wx.ALIGN_CENTER, 0)
bSizer2.AddSpacer(20)
bSizer2.Add(self.btn_thellier_gui, 0, wx.ALIGN_CENTER, 0)
bSizer2.AddSpacer(20)
#---sizer 3 ----
bSizer3 = wx.StaticBoxSizer(wx.StaticBox(self.panel, wx.ID_ANY, "Create file for upload to MagIC database"), wx.HORIZONTAL)
text = "Create MagIC txt file for upload"
self.btn_upload = buttons.GenButton(self.panel, id=-1, label=text, size=(300, 50))
self.btn_upload.SetBackgroundColour("#C4DF9B")
self.btn_upload.InitColours()
bSizer3.AddSpacer(20)
bSizer3.Add(self.btn_upload, 0, wx.ALIGN_CENTER, 0)
bSizer3.AddSpacer(20)
self.Bind(wx.EVT_BUTTON, self.on_btn_upload, self.btn_upload)
#---arange sizers ----
hbox = wx.BoxSizer(wx.HORIZONTAL)
vbox = wx.BoxSizer(wx.VERTICAL)
vbox.AddSpacer(5)
#vbox.Add(self.logo,0,wx.ALIGN_CENTER,0)
vbox.AddSpacer(5)
vbox.Add(bSizer0, 0, wx.ALIGN_CENTER, 0)
vbox.AddSpacer(10)
#vbox.Add(bSizer0_1, 0, wx.ALIGN_CENTER, 0)
#vbox.AddSpacer(10)
vbox.Add(bSizer1, 0, wx.ALIGN_CENTER, 0)
vbox.AddSpacer(10)
vbox.Add(bSizer2, 0, wx.ALIGN_CENTER, 0)
vbox.AddSpacer(10)
vbox.Add(bSizer3, 0, wx.ALIGN_CENTER, 0)
vbox.AddSpacer(10)
hbox.AddSpacer(10)
hbox.Add(vbox, 0, wx.ALIGN_CENTER, 0)
hbox.AddSpacer(5)
self.panel.SetSizer(hbox)
hbox.Fit(self)
#----------------------------------------------------------------------
def get_DIR(self):
"""
Choose a working directory dialog
"""
if "-WD" in sys.argv and self.FIRST_RUN:
ind = sys.argv.index('-WD')
self.WD = os.path.abspath(sys.argv[ind+1])
os.chdir(self.WD)
self.WD = os.getcwd()
self.dir_path.SetValue(self.WD)
else:
self.on_change_dir_button(None)
#self.WD = os.getcwd()
self.FIRST_RUN = False
# this functionality is not fully working yet, so I've removed it for now
#try:
# print "trying listdir"
# os.listdir(self.WD)
#except Exception as ex:
# print ex
#print "self.WD.split('/')", self.WD.split('/')
#if len(self.WD.split('/')) <= 4:
# print "no to saving this directory"
#else:
# print "do on_save_dir_button"
# self.on_save_dir_button(None)
#----------------------------------------------------------------------
#def getFolderBitmap():
# img = folder_icon.GetImage().Rescale(50, 50)
# return img.ConvertToBitmap()
def on_change_dir_button(self, event, show=True):
currentDirectory = os.getcwd()
self.change_dir_dialog = wx.DirDialog(self.panel, "Choose your working directory to create or edit a MagIC contribution:", defaultPath=currentDirectory, style=wx.DD_DEFAULT_STYLE | wx.DD_NEW_DIR_BUTTON | wx.DD_CHANGE_DIR)
if show:
self.on_finish_change_dir(self.change_dir_dialog)
def on_finish_change_dir(self, dialog, show=True):
if not show:
self.WD = dialog.GetPath()
os.chdir(self.WD)
self.dir_path.SetValue(self.WD)
elif dialog.ShowModal() == wx.ID_OK:
self.WD = dialog.GetPath()
os.chdir(self.WD)
self.dir_path.SetValue(self.WD)
dialog.Destroy()
if self.data_model_num == 2:
self.get_wd_data2()
else:
self.get_wd_data()
else:
dialog.Destroy()
# def on_revert_dir_button(self, event):
# if self.last_saved_time.GetLineText(0) == "not saved":
# dia = wx.MessageDialog(self.panel, "You can't revert, because your working directory has not been saved. Are you sure you're in the right directory?", "Can't be done", wx.OK)
# dia.ShowModal()
# return
# dia = wx.MessageDialog(self.panel, "Are you sure you want to revert to the last saved state? All changes since {} will be lost".format(self.last_saved_time.GetLineText(0)), "Not so fast", wx.YES_NO|wx.NO_DEFAULT)
# ok = dia.ShowModal()
# if ok == wx.ID_YES:
# os.chdir('..')
# wd = self.WD
# shutil.rmtree(wd)
# shutil.move(self.saved_dir, self.WD)
# os.chdir(self.WD)
# self.on_save_dir_button(None)
# else:
# print "-I Don't revert"
# def on_save_dir_button(self, event):
# try:
# if len(self.WD.split('/')) <= 4:
# self.last_saved_time.Clear()
# self.last_saved_time.write("not saved")
# return
# os.chdir('..')
# wd = self.WD
# wd = wd.rstrip('/')
# ind = wd.rfind('/') + 1
# saved_prefix, saved_folder = wd[:ind], wd[ind:]
# self.saved_dir = saved_prefix + "copy_" + saved_folder
# if "copy_" + saved_folder in os.listdir(saved_prefix):
# shutil.rmtree(self.saved_dir)
# shutil.copytree(self.WD, self.saved_dir)
# self.last_saved_time.Clear()
# now = datetime.datetime.now()
# now_string = "{}:{}:{}".format(now.hour, now.minute, now.second)
# self.last_saved_time.write(now_string)
# os.chdir(self.WD)
# except:# OSError:
# print "-I Problem copying working directory"
# self.last_saved_time.Clear()
# self.last_saved_time.write("not saved")
def on_run_thellier_gui(self, event):
outstring = "thellier_gui.py -WD %s"%self.WD
print("-I- running python script:\n %s"%(outstring))
if self.data_model_num == 2.5:
thellier_gui.main(self.WD, standalone_app=False, parent=self, DM=self.data_model_num)
else:
# disable and hide Pmag GUI mainframe
self.Disable()
self.Hide()
# show busyinfo
wait = wx.BusyInfo('Compiling required data, please wait...')
wx.Yield()
# create custom Thellier GUI closing event and bind it
ThellierGuiExitEvent, EVT_THELLIER_GUI_EXIT = newevent.NewCommandEvent()
self.Bind(EVT_THELLIER_GUI_EXIT, self.on_analysis_gui_exit)
# make and show the Thellier GUI frame
thellier_gui_frame = thellier_gui.Arai_GUI(self.WD, self,
standalone=False,
DM=self.data_model_num,
evt_quit=ThellierGuiExitEvent)
if not thellier_gui_frame: print("Thellier GUI failed to start aborting"); del wait; return
thellier_gui_frame.Centre()
thellier_gui_frame.Show()
del wait
def on_run_demag_gui(self, event):
outstring = "demag_gui.py -WD %s"%self.WD
print("-I- running python script:\n %s"%(outstring))
if self.data_model_num == 2:
demag_gui.start(self.WD, standalone_app=False, parent=self, DM=self.data_model_num)
else:
# disable and hide Pmag GUI mainframe
self.Disable()
self.Hide()
# show busyinfo
wait = wx.BusyInfo('Compiling required data, please wait...')
wx.Yield()
# create custom Demag GUI closing event and bind it
DemagGuiExitEvent, EVT_DEMAG_GUI_EXIT = newevent.NewCommandEvent()
self.Bind(EVT_DEMAG_GUI_EXIT, self.on_analysis_gui_exit)
# make and show the Demag GUI frame
demag_gui_frame = demag_gui.Demag_GUI(self.WD, self,
write_to_log_file=False,
data_model=self.data_model_num,
evt_quit=DemagGuiExitEvent)
demag_gui_frame.Centre()
demag_gui_frame.Show()
del wait
def on_analysis_gui_exit(self, event):
"""
When Thellier or Demag GUI closes,
show and enable Pmag GUI main frame.
Read in an updated contribution object
based on any changed files.
(For Pmag GUI 3.0 only)
"""
self.Enable()
self.Show()
# also, refresh contribution object based on files
# that may have been written/overwritten by Thellier GUI
self.get_wd_data()
def on_convert_file(self, event):
pmag_dialogs_dia = pmag_basic_dialogs.import_magnetometer_data(self, wx.ID_ANY, '', self.WD)
pmag_dialogs_dia.Show()
pmag_dialogs_dia.Center()
self.Hide()
def on_convert_3(self, event):
# turn files from 2.5 --> 3.0 (rough translation)
meas, upgraded, no_upgrade = pmag.convert_directory_2_to_3('magic_measurements.txt',
input_dir=self.WD, output_dir=self.WD,
data_model=self.contribution.data_model)
if not meas:
wx.MessageBox('2.5 --> 3.0 failed. Do you have a magic_measurements.txt file in your working directory?',
'Info', wx.OK | wx.ICON_INFORMATION)
return
# create a contribution
self.contribution = nb.Contribution(self.WD)
# make skeleton files with specimen, sample, site, location data
self.contribution.propagate_measurement_info()
#
# note what DIDN'T upgrade
#no_upgrade = []
#for fname in os.listdir(self.WD):
# if 'rmag' in fname:
# no_upgrade.append(fname)
# elif fname in ['pmag_results.txt', 'pmag_criteria.txt',
# 'er_synthetics.txt', 'er_images.txt',
# 'er_plots.txt', 'er_ages.txt']:
# no_upgrade.append(fname)
# pop up
upgraded_string = ", ".join(upgraded)
if no_upgrade:
no_upgrade_string = ", ".join(no_upgrade)
msg = '2.5 --> 3.0 translation completed!\n\nThese 3.0 format files were created: {}.\n\nHowever, these 2.5 format files could not be upgraded: {}.\n\nTo convert all 2.5 files, use the MagIC upgrade tool: https://www2.earthref.org/MagIC/upgrade\n'.format(upgraded_string, no_upgrade_string)
if 'criteria.txt' in upgraded:
msg += '\nNote: Please check your criteria file for completeness and accuracy, as not all 2.5 files will be fully upgraded.'
if 'pmag_criteria.txt' in no_upgrade:
msg += '\nNote: Not all criteria files can be upgraded, even on the MagIC site. You may need to recreate an old pmag_criteria file from scratch in Thellier GUI or Demag GUI.'
wx.MessageBox(msg, 'Warning', wx.OK | wx.ICON_INFORMATION)
else:
msg = '2.5 --> 3.0 translation completed!\nThese files were converted: {}'.format(upgraded_string)
wx.MessageBox(msg, 'Info', wx.OK | wx.ICON_INFORMATION)
def on_er_data(self, event):
if self.data_model_num == 2:
if not os.path.isfile(os.path.join(self.WD, 'magic_measurements.txt')):
print('-W- {} is missing'.format(os.path.join(self.WD, 'magic_measurements.txt')))
pw.simple_warning("Your working directory must have a magic_measurements.txt file to run this step. Make sure you have fully completed step 1 (import magnetometer file), by combining all imported magnetometer files into one magic_measurements file.")
return False
#self.ErMagic_frame = ErMagicBuilder.MagIC_model_builder(self.WD, self, self.ErMagic_data)#,self.Data,self.Data_hierarchy)
wait = wx.BusyInfo('Compiling required data, please wait...')
wx.Yield()
self.ErMagic_frame = ErMagicBuilder.MagIC_model_builder(self.WD, self, self.er_magic)#,self.Data,self.Data_hierarchy)
elif self.data_model_num == 3:
if not os.path.isfile(os.path.join(self.WD, 'measurements.txt')):
pw.simple_warning("Your working directory must have a 3.0. format measurements.txt file to run this step. Make sure you have fully completed step 1 (import magnetometer file) and ALSO converted to 3.0., if necessary), then try again.")
return False
wait = wx.BusyInfo('Compiling required data, please wait...')
wx.Yield()
self.ErMagic_frame = ErMagicBuilder.MagIC_model_builder3(self.WD, self, self.contribution)
self.ErMagic_frame.Show()
self.ErMagic_frame.Center()
size = wx.DisplaySize()
size = (size[0] - 0.3 * size[0], size[1] - 0.3 * size[1]) # gets total available screen space - 10%
self.ErMagic_frame.Raise()
del wait
def init_check_window(self):
self.check_dia = pmag_er_magic_dialogs.ErMagicCheckFrame(self, 'Check Data', self.WD, self.er_magic)# initiates the object that will control steps 1-6 of checking headers, filling in cell values, etc.
def init_check_window3(self):
self.check_dia = pmag_er_magic_dialogs.ErMagicCheckFrame3(self, 'Check Data', self.WD, self.contribution)
def on_orientation_button(self, event):
wait = wx.BusyInfo('Compiling required data, please wait...')
wx.Yield()
#dw, dh = wx.DisplaySize()
size = wx.DisplaySize()
size = (size[0]-0.1 * size[0], size[1]-0.1 * size[1])
if self.data_model_num == 3:
frame = pmag_basic_dialogs.OrientFrameGrid3(self, -1, 'demag_orient.txt',
self.WD, self.contribution,
size)
else:
frame = pmag_basic_dialogs.OrientFrameGrid(self, -1, 'demag_orient.txt',
self.WD, self.er_magic, size)
frame.Show(True)
frame.Centre()
self.Hide()
del wait
def on_unpack(self, event):
dlg = wx.FileDialog(
None, message = "choose txt file to unpack",
defaultDir=self.WD,
defaultFile="",
style=wx.FD_OPEN #| wx.FD_CHANGE_DIR
)
if dlg.ShowModal() == wx.ID_OK:
FILE = dlg.GetPath()
input_dir, f = os.path.split(FILE)
else:
return False
outstring="download_magic.py -f {} -WD {} -ID {}".format(f, self.WD, input_dir)
# run as module:
print("-I- running python script:\n %s"%(outstring))
wait = wx.BusyInfo("Please wait, working...")
wx.Yield()
ex = None
try:
if ipmag.download_magic(f, self.WD, input_dir, overwrite=True):
text = "Successfully ran download_magic.py program.\nMagIC files were saved in your working directory.\nSee Terminal/message window for details."
else:
text = "Something went wrong. Make sure you chose a valid file downloaded from the MagIC database and try again."
except Exception as ex:
text = "Something went wrong. Make sure you chose a valid file downloaded from the MagIC database and try again."
del wait
dlg = wx.MessageDialog(self, caption="Saved", message=text, style=wx.OK)
result = dlg.ShowModal()
if result == wx.ID_OK:
dlg.Destroy()
if ex:
raise(ex)
def on_btn_upload(self, event):
outstring="upload_magic.py"
print("-I- running python script:\n %s"%(outstring))
wait = wx.BusyInfo("Please wait, working...")
wx.Yield()
if self.data_model_num == 3:
res, error_message, has_problems, all_failing_items = ipmag.upload_magic3(dir_path=self.WD,
vocab=self.contribution.vocab,
contribution=self.contribution)
if self.data_model_num == 2:
res, error_message, errors = ipmag.upload_magic(dir_path=self.WD, data_model=self.er_magic.data_model)
del wait
if res:
text = "You are ready to upload.\n Your file: {} was generated in MagIC Project Directory.\nDrag and drop this file in the MagIC database.".format(os.path.split(res)[1])
dlg = wx.MessageDialog(self, caption="Saved", message=text, style=wx.OK)
else:
text = "There were some problems with the creation of your upload file.\nError message: {}\nSee Terminal/message window for details".format(error_message)
dlg = wx.MessageDialog(self, caption="Error", message=text, style=wx.OK)
result = dlg.ShowModal()
if result == wx.ID_OK:
dlg.Destroy()
if self.data_model_num == 3:
from programs import magic_gui
self.Disable()
self.Hide()
self.magic_gui_frame = magic_gui.MainFrame(self.WD,
dmodel=self.data_model,
title="Validations",
contribution=self.contribution)
self.magic_gui_frame.validation_mode = ['specimens']
self.magic_gui_frame.failing_items = all_failing_items
self.magic_gui_frame.change_dir_button.Disable()
self.magic_gui_frame.Centre()
self.magic_gui_frame.Show()
self.magic_gui_frame.highlight_problems(has_problems)
#
# change name of upload button to 'exit validation mode'
self.magic_gui_frame.bSizer2.GetStaticBox().SetLabel('return to main GUI')
self.magic_gui_frame.btn_upload.SetLabel("exit validation mode")
# bind that button to quitting magic gui and re-enabling Pmag GUI
self.magic_gui_frame.Bind(wx.EVT_BUTTON, self.on_end_validation, self.magic_gui_frame.btn_upload)
def on_end_validation(self, event):
self.Enable()
self.Show()
self.magic_gui_frame.Destroy()
def on_menu_exit(self, event):
# also delete appropriate copy file
try:
self.help_window.Destroy()
except:
pass
if '-i' in sys.argv:
self.Destroy()
try:
sys.exit() # can raise TypeError if wx inspector was used
except Exception as ex:
if isinstance(ex, TypeError):
pass
else:
raise ex
def main():
if '-h' in sys.argv:
print("See https://earthref.org/PmagPy/cookbook/#pmag_gui.py for a complete tutorial")
sys.exit()
print('-I- Starting Pmag GUI - please be patient')
# if redirect is true, wxpython makes its own output window for stdout/stderr
if 'darwin' in sys.platform:
app = wx.App(redirect=False)
else:
app = wx.App(redirect=False)
app.frame = MagMainFrame()
working_dir = pmag.get_named_arg_from_sys('-WD', '.')
## this causes an error with Canopy Python
## (it works with brew Python)
## need to use these lines for Py2app
#if working_dir == '.':
# app.frame.on_change_dir_button(None)
app.frame.Show()
app.frame.Center()
## use for debugging:
#if '-i' in sys.argv:
# import wx.lib.inspection
# wx.lib.inspection.InspectionTool().Show()
app.MainLoop()
if __name__ == "__main__":
main()
| bsd-3-clause |
adamcandy/QGIS-Meshing | extras/shape/displayShapefileMesh.py | 3 | 1676 | import shapefile
##########################################################################
#
# QGIS-meshing plugins.
#
# Copyright (C) 2012-2013 Imperial College London and others.
#
# Please see the AUTHORS file in the main source directory for a
# full list of copyright holders.
#
# Dr Adam S. Candy, [email protected]
# Applied Modelling and Computation Group
# Department of Earth Science and Engineering
# Imperial College London
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation,
# version 2.1 of the License.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307
# USA
#
##########################################################################
import matplotlib.pyplot as pyplot
import sys
sf = shapefile.Reader(sys.argv[1])
shapes = sf.shapes()
print(shapes[0].shapeType)
#print(shapes[0].points)
print(shapes[0].parts)
print(shapes[0].points)
i = -1
for s in shapes:
points = s.points
x = []
y = []
print("shp start")
for p in points:
i+=1
print("%d-->%s"%(i,p))
x.append(p[0])
y.append(p[1])
pyplot.plot(x,y)
#pyplot.xlim(-1,5)
#pyplot.ylim(-1,5)
pyplot.show()
| lgpl-2.1 |
Adai0808/scikit-learn | examples/tree/plot_tree_regression.py | 206 | 1476 | """
===================================================================
Decision Tree Regression
===================================================================
A 1D regression with decision tree.
The :ref:`decision trees <tree>` is
used to fit a sine curve with addition noisy observation. As a result, it
learns local linear regressions approximating the sine curve.
We can see that if the maximum depth of the tree (controlled by the
`max_depth` parameter) is set too high, the decision trees learn too fine
details of the training data and learn from the noise, i.e. they overfit.
"""
print(__doc__)
# Import the necessary modules and libraries
import numpy as np
from sklearn.tree import DecisionTreeRegressor
import matplotlib.pyplot as plt
# Create a random dataset
rng = np.random.RandomState(1)
X = np.sort(5 * rng.rand(80, 1), axis=0)
y = np.sin(X).ravel()
y[::5] += 3 * (0.5 - rng.rand(16))
# Fit regression model
regr_1 = DecisionTreeRegressor(max_depth=2)
regr_2 = DecisionTreeRegressor(max_depth=5)
regr_1.fit(X, y)
regr_2.fit(X, y)
# Predict
X_test = np.arange(0.0, 5.0, 0.01)[:, np.newaxis]
y_1 = regr_1.predict(X_test)
y_2 = regr_2.predict(X_test)
# Plot the results
plt.figure()
plt.scatter(X, y, c="k", label="data")
plt.plot(X_test, y_1, c="g", label="max_depth=2", linewidth=2)
plt.plot(X_test, y_2, c="r", label="max_depth=5", linewidth=2)
plt.xlabel("data")
plt.ylabel("target")
plt.title("Decision Tree Regression")
plt.legend()
plt.show()
| bsd-3-clause |
gfrubi/GR | figuras-editables/fig-Lane_Emden.py | 4 | 1688 | # -*- coding: utf-8 -*-
from matplotlib.pyplot import *
from numpy import *
from scipy.integrate import odeint, quad
import matplotlib.pyplot as plt
style.use('classic')
def dtheta(theta, x, n):
if modf(n)[0] == 0.0:
return(theta[1], -2*theta[1]/x-(theta[0])**n)
else:
if theta[0] < 0.0 :
return(theta[1], -2*theta[1]/x+(abs(theta[0]))**n)
else:
return(theta[1], -2*theta[1]/x-(theta[0])**n)
theta0 = [1.0, 0.0]
x = linspace(1.0e-30, 35.0, 1000000)
enes = [0.,1.,1.5,3.,5.]
Thetas = []
#raices=zeros(len(enes))
for i in range(len(enes)):
sol = odeint(dtheta, theta0, x, args=(enes[i],))
if len(where(sol[:,0]<0)[0]) is not 0:
pos = (where(sol[:,0] < 0)[0][0])-1 #tiene algunos nan, por eso se cae
elif len(where(isnan(sol[:,0])==True)[0]) is not 0:
pos = where(isnan(sol[:,0])==True)[0][0]-1
else:
pos = len(x)
# thetapp=dtheta([sol[pos,0], sol[pos,1]],x[pos+1],enes[i])[1] #Segunda derivada en la ultima posicion
# x1 = x[pos] - sol[pos,1]/thetapp - sqrt(sol[pos,1]**2-2*sol[pos,0]*thetapp)/thetapp
# raices[i]=x1
Thetas.append(sol[:pos,0])
colores=['blue','red','brown','purple','black']
dasheses=[[],[5,2],[5,5],[5,2,2,2],[2,2]]
fig, axes = plt.subplots(figsize=(8,6))
for i in range(len(enes)):
axes.plot(x[:len(Thetas[i])], Thetas[i], colores[i], dashes=dasheses[i], label='$n = %1.1f$'%enes[i], linewidth=1.50)
axes.legend(loc='best')
#axes.set_title(u'Funciones de Lane-Emden para distintos valores de $n$')
axes.set_xlabel('$x$', fontsize=15)
axes.set_ylabel('$\Theta(x)$', fontsize=15)
axes.set_xlim(0,8)
axes.set_ylim(0,1)
axes.grid()
fig.savefig('../fig/fig-Lane-Emden.pdf')
#fig.show()
| gpl-3.0 |
aflaxman/scikit-learn | sklearn/svm/tests/test_svm.py | 33 | 35916 | """
Testing for Support Vector Machine module (sklearn.svm)
TODO: remove hard coded numerical results when possible
"""
import numpy as np
import itertools
from numpy.testing import assert_array_equal, assert_array_almost_equal
from numpy.testing import assert_almost_equal
from numpy.testing import assert_allclose
from scipy import sparse
from sklearn import svm, linear_model, datasets, metrics, base
from sklearn.model_selection import train_test_split
from sklearn.datasets import make_classification, make_blobs
from sklearn.metrics import f1_score
from sklearn.metrics.pairwise import rbf_kernel
from sklearn.utils import check_random_state
from sklearn.utils.testing import assert_equal, assert_true, assert_false
from sklearn.utils.testing import assert_greater, assert_in, assert_less
from sklearn.utils.testing import assert_raises_regexp, assert_warns
from sklearn.utils.testing import assert_warns_message, assert_raise_message
from sklearn.utils.testing import ignore_warnings, assert_raises
from sklearn.exceptions import ConvergenceWarning
from sklearn.exceptions import NotFittedError
from sklearn.multiclass import OneVsRestClassifier
from sklearn.externals import six
# toy sample
X = [[-2, -1], [-1, -1], [-1, -2], [1, 1], [1, 2], [2, 1]]
Y = [1, 1, 1, 2, 2, 2]
T = [[-1, -1], [2, 2], [3, 2]]
true_result = [1, 2, 2]
# also load the iris dataset
iris = datasets.load_iris()
rng = check_random_state(42)
perm = rng.permutation(iris.target.size)
iris.data = iris.data[perm]
iris.target = iris.target[perm]
def test_libsvm_parameters():
# Test parameters on classes that make use of libsvm.
clf = svm.SVC(kernel='linear').fit(X, Y)
assert_array_equal(clf.dual_coef_, [[-0.25, .25]])
assert_array_equal(clf.support_, [1, 3])
assert_array_equal(clf.support_vectors_, (X[1], X[3]))
assert_array_equal(clf.intercept_, [0.])
assert_array_equal(clf.predict(X), Y)
def test_libsvm_iris():
# Check consistency on dataset iris.
# shuffle the dataset so that labels are not ordered
for k in ('linear', 'rbf'):
clf = svm.SVC(kernel=k).fit(iris.data, iris.target)
assert_greater(np.mean(clf.predict(iris.data) == iris.target), 0.9)
assert_true(hasattr(clf, "coef_") == (k == 'linear'))
assert_array_equal(clf.classes_, np.sort(clf.classes_))
# check also the low-level API
model = svm.libsvm.fit(iris.data, iris.target.astype(np.float64))
pred = svm.libsvm.predict(iris.data, *model)
assert_greater(np.mean(pred == iris.target), .95)
model = svm.libsvm.fit(iris.data, iris.target.astype(np.float64),
kernel='linear')
pred = svm.libsvm.predict(iris.data, *model, kernel='linear')
assert_greater(np.mean(pred == iris.target), .95)
pred = svm.libsvm.cross_validation(iris.data,
iris.target.astype(np.float64), 5,
kernel='linear',
random_seed=0)
assert_greater(np.mean(pred == iris.target), .95)
# If random_seed >= 0, the libsvm rng is seeded (by calling `srand`), hence
# we should get deterministic results (assuming that there is no other
# thread calling this wrapper calling `srand` concurrently).
pred2 = svm.libsvm.cross_validation(iris.data,
iris.target.astype(np.float64), 5,
kernel='linear',
random_seed=0)
assert_array_equal(pred, pred2)
def test_precomputed():
# SVC with a precomputed kernel.
# We test it with a toy dataset and with iris.
clf = svm.SVC(kernel='precomputed')
# Gram matrix for train data (square matrix)
# (we use just a linear kernel)
K = np.dot(X, np.array(X).T)
clf.fit(K, Y)
# Gram matrix for test data (rectangular matrix)
KT = np.dot(T, np.array(X).T)
pred = clf.predict(KT)
assert_raises(ValueError, clf.predict, KT.T)
assert_array_equal(clf.dual_coef_, [[-0.25, .25]])
assert_array_equal(clf.support_, [1, 3])
assert_array_equal(clf.intercept_, [0])
assert_array_almost_equal(clf.support_, [1, 3])
assert_array_equal(pred, true_result)
# Gram matrix for test data but compute KT[i,j]
# for support vectors j only.
KT = np.zeros_like(KT)
for i in range(len(T)):
for j in clf.support_:
KT[i, j] = np.dot(T[i], X[j])
pred = clf.predict(KT)
assert_array_equal(pred, true_result)
# same as before, but using a callable function instead of the kernel
# matrix. kernel is just a linear kernel
kfunc = lambda x, y: np.dot(x, y.T)
clf = svm.SVC(kernel=kfunc)
clf.fit(X, Y)
pred = clf.predict(T)
assert_array_equal(clf.dual_coef_, [[-0.25, .25]])
assert_array_equal(clf.intercept_, [0])
assert_array_almost_equal(clf.support_, [1, 3])
assert_array_equal(pred, true_result)
# test a precomputed kernel with the iris dataset
# and check parameters against a linear SVC
clf = svm.SVC(kernel='precomputed')
clf2 = svm.SVC(kernel='linear')
K = np.dot(iris.data, iris.data.T)
clf.fit(K, iris.target)
clf2.fit(iris.data, iris.target)
pred = clf.predict(K)
assert_array_almost_equal(clf.support_, clf2.support_)
assert_array_almost_equal(clf.dual_coef_, clf2.dual_coef_)
assert_array_almost_equal(clf.intercept_, clf2.intercept_)
assert_almost_equal(np.mean(pred == iris.target), .99, decimal=2)
# Gram matrix for test data but compute KT[i,j]
# for support vectors j only.
K = np.zeros_like(K)
for i in range(len(iris.data)):
for j in clf.support_:
K[i, j] = np.dot(iris.data[i], iris.data[j])
pred = clf.predict(K)
assert_almost_equal(np.mean(pred == iris.target), .99, decimal=2)
clf = svm.SVC(kernel=kfunc)
clf.fit(iris.data, iris.target)
assert_almost_equal(np.mean(pred == iris.target), .99, decimal=2)
def test_svr():
# Test Support Vector Regression
diabetes = datasets.load_diabetes()
for clf in (svm.NuSVR(kernel='linear', nu=.4, C=1.0),
svm.NuSVR(kernel='linear', nu=.4, C=10.),
svm.SVR(kernel='linear', C=10.),
svm.LinearSVR(C=10.),
svm.LinearSVR(C=10.),
):
clf.fit(diabetes.data, diabetes.target)
assert_greater(clf.score(diabetes.data, diabetes.target), 0.02)
# non-regression test; previously, BaseLibSVM would check that
# len(np.unique(y)) < 2, which must only be done for SVC
svm.SVR().fit(diabetes.data, np.ones(len(diabetes.data)))
svm.LinearSVR().fit(diabetes.data, np.ones(len(diabetes.data)))
def test_linearsvr():
# check that SVR(kernel='linear') and LinearSVC() give
# comparable results
diabetes = datasets.load_diabetes()
lsvr = svm.LinearSVR(C=1e3).fit(diabetes.data, diabetes.target)
score1 = lsvr.score(diabetes.data, diabetes.target)
svr = svm.SVR(kernel='linear', C=1e3).fit(diabetes.data, diabetes.target)
score2 = svr.score(diabetes.data, diabetes.target)
assert_allclose(np.linalg.norm(lsvr.coef_),
np.linalg.norm(svr.coef_), 1, 0.0001)
assert_almost_equal(score1, score2, 2)
def test_linearsvr_fit_sampleweight():
# check correct result when sample_weight is 1
# check that SVR(kernel='linear') and LinearSVC() give
# comparable results
diabetes = datasets.load_diabetes()
n_samples = len(diabetes.target)
unit_weight = np.ones(n_samples)
lsvr = svm.LinearSVR(C=1e3).fit(diabetes.data, diabetes.target,
sample_weight=unit_weight)
score1 = lsvr.score(diabetes.data, diabetes.target)
lsvr_no_weight = svm.LinearSVR(C=1e3).fit(diabetes.data, diabetes.target)
score2 = lsvr_no_weight.score(diabetes.data, diabetes.target)
assert_allclose(np.linalg.norm(lsvr.coef_),
np.linalg.norm(lsvr_no_weight.coef_), 1, 0.0001)
assert_almost_equal(score1, score2, 2)
# check that fit(X) = fit([X1, X2, X3],sample_weight = [n1, n2, n3]) where
# X = X1 repeated n1 times, X2 repeated n2 times and so forth
random_state = check_random_state(0)
random_weight = random_state.randint(0, 10, n_samples)
lsvr_unflat = svm.LinearSVR(C=1e3).fit(diabetes.data, diabetes.target,
sample_weight=random_weight)
score3 = lsvr_unflat.score(diabetes.data, diabetes.target,
sample_weight=random_weight)
X_flat = np.repeat(diabetes.data, random_weight, axis=0)
y_flat = np.repeat(diabetes.target, random_weight, axis=0)
lsvr_flat = svm.LinearSVR(C=1e3).fit(X_flat, y_flat)
score4 = lsvr_flat.score(X_flat, y_flat)
assert_almost_equal(score3, score4, 2)
def test_svr_errors():
X = [[0.0], [1.0]]
y = [0.0, 0.5]
# Bad kernel
clf = svm.SVR(kernel=lambda x, y: np.array([[1.0]]))
clf.fit(X, y)
assert_raises(ValueError, clf.predict, X)
def test_oneclass():
# Test OneClassSVM
clf = svm.OneClassSVM()
clf.fit(X)
pred = clf.predict(T)
assert_array_equal(pred, [-1, -1, -1])
assert_equal(pred.dtype, np.dtype('intp'))
assert_array_almost_equal(clf.intercept_, [-1.008], decimal=3)
assert_array_almost_equal(clf.dual_coef_,
[[0.632, 0.233, 0.633, 0.234, 0.632, 0.633]],
decimal=3)
assert_raises(AttributeError, lambda: clf.coef_)
def test_oneclass_decision_function():
# Test OneClassSVM decision function
clf = svm.OneClassSVM()
rnd = check_random_state(2)
# Generate train data
X = 0.3 * rnd.randn(100, 2)
X_train = np.r_[X + 2, X - 2]
# Generate some regular novel observations
X = 0.3 * rnd.randn(20, 2)
X_test = np.r_[X + 2, X - 2]
# Generate some abnormal novel observations
X_outliers = rnd.uniform(low=-4, high=4, size=(20, 2))
# fit the model
clf = svm.OneClassSVM(nu=0.1, kernel="rbf", gamma=0.1)
clf.fit(X_train)
# predict things
y_pred_test = clf.predict(X_test)
assert_greater(np.mean(y_pred_test == 1), .9)
y_pred_outliers = clf.predict(X_outliers)
assert_greater(np.mean(y_pred_outliers == -1), .9)
dec_func_test = clf.decision_function(X_test)
assert_array_equal((dec_func_test > 0).ravel(), y_pred_test == 1)
dec_func_outliers = clf.decision_function(X_outliers)
assert_array_equal((dec_func_outliers > 0).ravel(), y_pred_outliers == 1)
def test_tweak_params():
# Make sure some tweaking of parameters works.
# We change clf.dual_coef_ at run time and expect .predict() to change
# accordingly. Notice that this is not trivial since it involves a lot
# of C/Python copying in the libsvm bindings.
# The success of this test ensures that the mapping between libsvm and
# the python classifier is complete.
clf = svm.SVC(kernel='linear', C=1.0)
clf.fit(X, Y)
assert_array_equal(clf.dual_coef_, [[-.25, .25]])
assert_array_equal(clf.predict([[-.1, -.1]]), [1])
clf._dual_coef_ = np.array([[.0, 1.]])
assert_array_equal(clf.predict([[-.1, -.1]]), [2])
def test_probability():
# Predict probabilities using SVC
# This uses cross validation, so we use a slightly bigger testing set.
for clf in (svm.SVC(probability=True, random_state=0, C=1.0),
svm.NuSVC(probability=True, random_state=0)):
clf.fit(iris.data, iris.target)
prob_predict = clf.predict_proba(iris.data)
assert_array_almost_equal(
np.sum(prob_predict, 1), np.ones(iris.data.shape[0]))
assert_true(np.mean(np.argmax(prob_predict, 1)
== clf.predict(iris.data)) > 0.9)
assert_almost_equal(clf.predict_proba(iris.data),
np.exp(clf.predict_log_proba(iris.data)), 8)
def test_decision_function():
# Test decision_function
# Sanity check, test that decision_function implemented in python
# returns the same as the one in libsvm
# multi class:
clf = svm.SVC(kernel='linear', C=0.1,
decision_function_shape='ovo').fit(iris.data, iris.target)
dec = np.dot(iris.data, clf.coef_.T) + clf.intercept_
assert_array_almost_equal(dec, clf.decision_function(iris.data))
# binary:
clf.fit(X, Y)
dec = np.dot(X, clf.coef_.T) + clf.intercept_
prediction = clf.predict(X)
assert_array_almost_equal(dec.ravel(), clf.decision_function(X))
assert_array_almost_equal(
prediction,
clf.classes_[(clf.decision_function(X) > 0).astype(np.int)])
expected = np.array([-1., -0.66, -1., 0.66, 1., 1.])
assert_array_almost_equal(clf.decision_function(X), expected, 2)
# kernel binary:
clf = svm.SVC(kernel='rbf', gamma=1, decision_function_shape='ovo')
clf.fit(X, Y)
rbfs = rbf_kernel(X, clf.support_vectors_, gamma=clf.gamma)
dec = np.dot(rbfs, clf.dual_coef_.T) + clf.intercept_
assert_array_almost_equal(dec.ravel(), clf.decision_function(X))
def test_decision_function_shape():
# check that decision_function_shape='ovr' gives
# correct shape and is consistent with predict
clf = svm.SVC(kernel='linear', C=0.1,
decision_function_shape='ovr').fit(iris.data, iris.target)
dec = clf.decision_function(iris.data)
assert_equal(dec.shape, (len(iris.data), 3))
assert_array_equal(clf.predict(iris.data), np.argmax(dec, axis=1))
# with five classes:
X, y = make_blobs(n_samples=80, centers=5, random_state=0)
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=0)
clf = svm.SVC(kernel='linear', C=0.1,
decision_function_shape='ovr').fit(X_train, y_train)
dec = clf.decision_function(X_test)
assert_equal(dec.shape, (len(X_test), 5))
assert_array_equal(clf.predict(X_test), np.argmax(dec, axis=1))
# check shape of ovo_decition_function=True
clf = svm.SVC(kernel='linear', C=0.1,
decision_function_shape='ovo').fit(X_train, y_train)
dec = clf.decision_function(X_train)
assert_equal(dec.shape, (len(X_train), 10))
def test_svr_predict():
# Test SVR's decision_function
# Sanity check, test that predict implemented in python
# returns the same as the one in libsvm
X = iris.data
y = iris.target
# linear kernel
reg = svm.SVR(kernel='linear', C=0.1).fit(X, y)
dec = np.dot(X, reg.coef_.T) + reg.intercept_
assert_array_almost_equal(dec.ravel(), reg.predict(X).ravel())
# rbf kernel
reg = svm.SVR(kernel='rbf', gamma=1).fit(X, y)
rbfs = rbf_kernel(X, reg.support_vectors_, gamma=reg.gamma)
dec = np.dot(rbfs, reg.dual_coef_.T) + reg.intercept_
assert_array_almost_equal(dec.ravel(), reg.predict(X).ravel())
def test_weight():
# Test class weights
clf = svm.SVC(class_weight={1: 0.1})
# we give a small weights to class 1
clf.fit(X, Y)
# so all predicted values belong to class 2
assert_array_almost_equal(clf.predict(X), [2] * 6)
X_, y_ = make_classification(n_samples=200, n_features=10,
weights=[0.833, 0.167], random_state=2)
for clf in (linear_model.LogisticRegression(),
svm.LinearSVC(random_state=0), svm.SVC()):
clf.set_params(class_weight={0: .1, 1: 10})
clf.fit(X_[:100], y_[:100])
y_pred = clf.predict(X_[100:])
assert_true(f1_score(y_[100:], y_pred) > .3)
def test_sample_weights():
# Test weights on individual samples
# TODO: check on NuSVR, OneClass, etc.
clf = svm.SVC()
clf.fit(X, Y)
assert_array_equal(clf.predict([X[2]]), [1.])
sample_weight = [.1] * 3 + [10] * 3
clf.fit(X, Y, sample_weight=sample_weight)
assert_array_equal(clf.predict([X[2]]), [2.])
# test that rescaling all samples is the same as changing C
clf = svm.SVC()
clf.fit(X, Y)
dual_coef_no_weight = clf.dual_coef_
clf.set_params(C=100)
clf.fit(X, Y, sample_weight=np.repeat(0.01, len(X)))
assert_array_almost_equal(dual_coef_no_weight, clf.dual_coef_)
def test_auto_weight():
# Test class weights for imbalanced data
from sklearn.linear_model import LogisticRegression
# We take as dataset the two-dimensional projection of iris so
# that it is not separable and remove half of predictors from
# class 1.
# We add one to the targets as a non-regression test: class_weight="balanced"
# used to work only when the labels where a range [0..K).
from sklearn.utils import compute_class_weight
X, y = iris.data[:, :2], iris.target + 1
unbalanced = np.delete(np.arange(y.size), np.where(y > 2)[0][::2])
classes = np.unique(y[unbalanced])
class_weights = compute_class_weight('balanced', classes, y[unbalanced])
assert_true(np.argmax(class_weights) == 2)
for clf in (svm.SVC(kernel='linear'), svm.LinearSVC(random_state=0),
LogisticRegression()):
# check that score is better when class='balanced' is set.
y_pred = clf.fit(X[unbalanced], y[unbalanced]).predict(X)
clf.set_params(class_weight='balanced')
y_pred_balanced = clf.fit(X[unbalanced], y[unbalanced],).predict(X)
assert_true(metrics.f1_score(y, y_pred, average='macro')
<= metrics.f1_score(y, y_pred_balanced,
average='macro'))
def test_bad_input():
# Test that it gives proper exception on deficient input
# impossible value of C
assert_raises(ValueError, svm.SVC(C=-1).fit, X, Y)
# impossible value of nu
clf = svm.NuSVC(nu=0.0)
assert_raises(ValueError, clf.fit, X, Y)
Y2 = Y[:-1] # wrong dimensions for labels
assert_raises(ValueError, clf.fit, X, Y2)
# Test with arrays that are non-contiguous.
for clf in (svm.SVC(), svm.LinearSVC(random_state=0)):
Xf = np.asfortranarray(X)
assert_false(Xf.flags['C_CONTIGUOUS'])
yf = np.ascontiguousarray(np.tile(Y, (2, 1)).T)
yf = yf[:, -1]
assert_false(yf.flags['F_CONTIGUOUS'])
assert_false(yf.flags['C_CONTIGUOUS'])
clf.fit(Xf, yf)
assert_array_equal(clf.predict(T), true_result)
# error for precomputed kernelsx
clf = svm.SVC(kernel='precomputed')
assert_raises(ValueError, clf.fit, X, Y)
# sample_weight bad dimensions
clf = svm.SVC()
assert_raises(ValueError, clf.fit, X, Y, sample_weight=range(len(X) - 1))
# predict with sparse input when trained with dense
clf = svm.SVC().fit(X, Y)
assert_raises(ValueError, clf.predict, sparse.lil_matrix(X))
Xt = np.array(X).T
clf.fit(np.dot(X, Xt), Y)
assert_raises(ValueError, clf.predict, X)
clf = svm.SVC()
clf.fit(X, Y)
assert_raises(ValueError, clf.predict, Xt)
def test_unicode_kernel():
# Test that a unicode kernel name does not cause a TypeError on clf.fit
if six.PY2:
# Test unicode (same as str on python3)
clf = svm.SVC(kernel=unicode('linear'))
clf.fit(X, Y)
# Test ascii bytes (str is bytes in python2)
clf = svm.SVC(kernel=str('linear'))
clf.fit(X, Y)
else:
# Test unicode (str is unicode in python3)
clf = svm.SVC(kernel=str('linear'))
clf.fit(X, Y)
# Test ascii bytes (same as str on python2)
clf = svm.SVC(kernel=bytes('linear', 'ascii'))
clf.fit(X, Y)
# Test default behavior on both versions
clf = svm.SVC(kernel='linear')
clf.fit(X, Y)
def test_sparse_precomputed():
clf = svm.SVC(kernel='precomputed')
sparse_gram = sparse.csr_matrix([[1, 0], [0, 1]])
try:
clf.fit(sparse_gram, [0, 1])
assert not "reached"
except TypeError as e:
assert_in("Sparse precomputed", str(e))
def test_linearsvc_parameters():
# Test possible parameter combinations in LinearSVC
# Generate list of possible parameter combinations
losses = ['hinge', 'squared_hinge', 'logistic_regression', 'foo']
penalties, duals = ['l1', 'l2', 'bar'], [True, False]
X, y = make_classification(n_samples=5, n_features=5)
for loss, penalty, dual in itertools.product(losses, penalties, duals):
clf = svm.LinearSVC(penalty=penalty, loss=loss, dual=dual)
if ((loss, penalty) == ('hinge', 'l1') or
(loss, penalty, dual) == ('hinge', 'l2', False) or
(penalty, dual) == ('l1', True) or
loss == 'foo' or penalty == 'bar'):
assert_raises_regexp(ValueError,
"Unsupported set of arguments.*penalty='%s.*"
"loss='%s.*dual=%s"
% (penalty, loss, dual),
clf.fit, X, y)
else:
clf.fit(X, y)
# Incorrect loss value - test if explicit error message is raised
assert_raises_regexp(ValueError, ".*loss='l3' is not supported.*",
svm.LinearSVC(loss="l3").fit, X, y)
# FIXME remove in 1.0
def test_linearsvx_loss_penalty_deprecations():
X, y = [[0.0], [1.0]], [0, 1]
msg = ("loss='%s' has been deprecated in favor of "
"loss='%s' as of 0.16. Backward compatibility"
" for the %s will be removed in %s")
# LinearSVC
# loss l1 --> hinge
assert_warns_message(DeprecationWarning,
msg % ("l1", "hinge", "loss='l1'", "1.0"),
svm.LinearSVC(loss="l1").fit, X, y)
# loss l2 --> squared_hinge
assert_warns_message(DeprecationWarning,
msg % ("l2", "squared_hinge", "loss='l2'", "1.0"),
svm.LinearSVC(loss="l2").fit, X, y)
# LinearSVR
# loss l1 --> epsilon_insensitive
assert_warns_message(DeprecationWarning,
msg % ("l1", "epsilon_insensitive", "loss='l1'",
"1.0"),
svm.LinearSVR(loss="l1").fit, X, y)
# loss l2 --> squared_epsilon_insensitive
assert_warns_message(DeprecationWarning,
msg % ("l2", "squared_epsilon_insensitive",
"loss='l2'", "1.0"),
svm.LinearSVR(loss="l2").fit, X, y)
def test_linear_svx_uppercase_loss_penality_raises_error():
# Check if Upper case notation raises error at _fit_liblinear
# which is called by fit
X, y = [[0.0], [1.0]], [0, 1]
assert_raise_message(ValueError, "loss='SQuared_hinge' is not supported",
svm.LinearSVC(loss="SQuared_hinge").fit, X, y)
assert_raise_message(ValueError, ("The combination of penalty='L2'"
" and loss='squared_hinge' is not supported"),
svm.LinearSVC(penalty="L2").fit, X, y)
def test_linearsvc():
# Test basic routines using LinearSVC
clf = svm.LinearSVC(random_state=0).fit(X, Y)
# by default should have intercept
assert_true(clf.fit_intercept)
assert_array_equal(clf.predict(T), true_result)
assert_array_almost_equal(clf.intercept_, [0], decimal=3)
# the same with l1 penalty
clf = svm.LinearSVC(penalty='l1', loss='squared_hinge', dual=False,
random_state=0).fit(X, Y)
assert_array_equal(clf.predict(T), true_result)
# l2 penalty with dual formulation
clf = svm.LinearSVC(penalty='l2', dual=True, random_state=0).fit(X, Y)
assert_array_equal(clf.predict(T), true_result)
# l2 penalty, l1 loss
clf = svm.LinearSVC(penalty='l2', loss='hinge', dual=True, random_state=0)
clf.fit(X, Y)
assert_array_equal(clf.predict(T), true_result)
# test also decision function
dec = clf.decision_function(T)
res = (dec > 0).astype(np.int) + 1
assert_array_equal(res, true_result)
def test_linearsvc_crammer_singer():
# Test LinearSVC with crammer_singer multi-class svm
ovr_clf = svm.LinearSVC(random_state=0).fit(iris.data, iris.target)
cs_clf = svm.LinearSVC(multi_class='crammer_singer', random_state=0)
cs_clf.fit(iris.data, iris.target)
# similar prediction for ovr and crammer-singer:
assert_true((ovr_clf.predict(iris.data) ==
cs_clf.predict(iris.data)).mean() > .9)
# classifiers shouldn't be the same
assert_true((ovr_clf.coef_ != cs_clf.coef_).all())
# test decision function
assert_array_equal(cs_clf.predict(iris.data),
np.argmax(cs_clf.decision_function(iris.data), axis=1))
dec_func = np.dot(iris.data, cs_clf.coef_.T) + cs_clf.intercept_
assert_array_almost_equal(dec_func, cs_clf.decision_function(iris.data))
def test_linearsvc_fit_sampleweight():
# check correct result when sample_weight is 1
n_samples = len(X)
unit_weight = np.ones(n_samples)
clf = svm.LinearSVC(random_state=0).fit(X, Y)
clf_unitweight = svm.LinearSVC(random_state=0).\
fit(X, Y, sample_weight=unit_weight)
# check if same as sample_weight=None
assert_array_equal(clf_unitweight.predict(T), clf.predict(T))
assert_allclose(clf.coef_, clf_unitweight.coef_, 1, 0.0001)
# check that fit(X) = fit([X1, X2, X3],sample_weight = [n1, n2, n3]) where
# X = X1 repeated n1 times, X2 repeated n2 times and so forth
random_state = check_random_state(0)
random_weight = random_state.randint(0, 10, n_samples)
lsvc_unflat = svm.LinearSVC(random_state=0).\
fit(X, Y, sample_weight=random_weight)
pred1 = lsvc_unflat.predict(T)
X_flat = np.repeat(X, random_weight, axis=0)
y_flat = np.repeat(Y, random_weight, axis=0)
lsvc_flat = svm.LinearSVC(random_state=0).fit(X_flat, y_flat)
pred2 = lsvc_flat.predict(T)
assert_array_equal(pred1, pred2)
assert_allclose(lsvc_unflat.coef_, lsvc_flat.coef_, 1, 0.0001)
def test_crammer_singer_binary():
# Test Crammer-Singer formulation in the binary case
X, y = make_classification(n_classes=2, random_state=0)
for fit_intercept in (True, False):
acc = svm.LinearSVC(fit_intercept=fit_intercept,
multi_class="crammer_singer",
random_state=0).fit(X, y).score(X, y)
assert_greater(acc, 0.9)
def test_linearsvc_iris():
# Test that LinearSVC gives plausible predictions on the iris dataset
# Also, test symbolic class names (classes_).
target = iris.target_names[iris.target]
clf = svm.LinearSVC(random_state=0).fit(iris.data, target)
assert_equal(set(clf.classes_), set(iris.target_names))
assert_greater(np.mean(clf.predict(iris.data) == target), 0.8)
dec = clf.decision_function(iris.data)
pred = iris.target_names[np.argmax(dec, 1)]
assert_array_equal(pred, clf.predict(iris.data))
def test_dense_liblinear_intercept_handling(classifier=svm.LinearSVC):
# Test that dense liblinear honours intercept_scaling param
X = [[2, 1],
[3, 1],
[1, 3],
[2, 3]]
y = [0, 0, 1, 1]
clf = classifier(fit_intercept=True, penalty='l1', loss='squared_hinge',
dual=False, C=4, tol=1e-7, random_state=0)
assert_true(clf.intercept_scaling == 1, clf.intercept_scaling)
assert_true(clf.fit_intercept)
# when intercept_scaling is low the intercept value is highly "penalized"
# by regularization
clf.intercept_scaling = 1
clf.fit(X, y)
assert_almost_equal(clf.intercept_, 0, decimal=5)
# when intercept_scaling is sufficiently high, the intercept value
# is not affected by regularization
clf.intercept_scaling = 100
clf.fit(X, y)
intercept1 = clf.intercept_
assert_less(intercept1, -1)
# when intercept_scaling is sufficiently high, the intercept value
# doesn't depend on intercept_scaling value
clf.intercept_scaling = 1000
clf.fit(X, y)
intercept2 = clf.intercept_
assert_array_almost_equal(intercept1, intercept2, decimal=2)
def test_liblinear_set_coef():
# multi-class case
clf = svm.LinearSVC().fit(iris.data, iris.target)
values = clf.decision_function(iris.data)
clf.coef_ = clf.coef_.copy()
clf.intercept_ = clf.intercept_.copy()
values2 = clf.decision_function(iris.data)
assert_array_almost_equal(values, values2)
# binary-class case
X = [[2, 1],
[3, 1],
[1, 3],
[2, 3]]
y = [0, 0, 1, 1]
clf = svm.LinearSVC().fit(X, y)
values = clf.decision_function(X)
clf.coef_ = clf.coef_.copy()
clf.intercept_ = clf.intercept_.copy()
values2 = clf.decision_function(X)
assert_array_equal(values, values2)
def test_immutable_coef_property():
# Check that primal coef modification are not silently ignored
svms = [
svm.SVC(kernel='linear').fit(iris.data, iris.target),
svm.NuSVC(kernel='linear').fit(iris.data, iris.target),
svm.SVR(kernel='linear').fit(iris.data, iris.target),
svm.NuSVR(kernel='linear').fit(iris.data, iris.target),
svm.OneClassSVM(kernel='linear').fit(iris.data),
]
for clf in svms:
assert_raises(AttributeError, clf.__setattr__, 'coef_', np.arange(3))
assert_raises((RuntimeError, ValueError),
clf.coef_.__setitem__, (0, 0), 0)
def test_linearsvc_verbose():
# stdout: redirect
import os
stdout = os.dup(1) # save original stdout
os.dup2(os.pipe()[1], 1) # replace it
# actual call
clf = svm.LinearSVC(verbose=1)
clf.fit(X, Y)
# stdout: restore
os.dup2(stdout, 1) # restore original stdout
def test_svc_clone_with_callable_kernel():
# create SVM with callable linear kernel, check that results are the same
# as with built-in linear kernel
svm_callable = svm.SVC(kernel=lambda x, y: np.dot(x, y.T),
probability=True, random_state=0,
decision_function_shape='ovr')
# clone for checking clonability with lambda functions..
svm_cloned = base.clone(svm_callable)
svm_cloned.fit(iris.data, iris.target)
svm_builtin = svm.SVC(kernel='linear', probability=True, random_state=0,
decision_function_shape='ovr')
svm_builtin.fit(iris.data, iris.target)
assert_array_almost_equal(svm_cloned.dual_coef_,
svm_builtin.dual_coef_)
assert_array_almost_equal(svm_cloned.intercept_,
svm_builtin.intercept_)
assert_array_equal(svm_cloned.predict(iris.data),
svm_builtin.predict(iris.data))
assert_array_almost_equal(svm_cloned.predict_proba(iris.data),
svm_builtin.predict_proba(iris.data),
decimal=4)
assert_array_almost_equal(svm_cloned.decision_function(iris.data),
svm_builtin.decision_function(iris.data))
def test_svc_bad_kernel():
svc = svm.SVC(kernel=lambda x, y: x)
assert_raises(ValueError, svc.fit, X, Y)
def test_timeout():
a = svm.SVC(kernel=lambda x, y: np.dot(x, y.T), probability=True,
random_state=0, max_iter=1)
assert_warns(ConvergenceWarning, a.fit, X, Y)
def test_unfitted():
X = "foo!" # input validation not required when SVM not fitted
clf = svm.SVC()
assert_raises_regexp(Exception, r".*\bSVC\b.*\bnot\b.*\bfitted\b",
clf.predict, X)
clf = svm.NuSVR()
assert_raises_regexp(Exception, r".*\bNuSVR\b.*\bnot\b.*\bfitted\b",
clf.predict, X)
# ignore convergence warnings from max_iter=1
@ignore_warnings
def test_consistent_proba():
a = svm.SVC(probability=True, max_iter=1, random_state=0)
proba_1 = a.fit(X, Y).predict_proba(X)
a = svm.SVC(probability=True, max_iter=1, random_state=0)
proba_2 = a.fit(X, Y).predict_proba(X)
assert_array_almost_equal(proba_1, proba_2)
def test_linear_svc_convergence_warnings():
# Test that warnings are raised if model does not converge
lsvc = svm.LinearSVC(max_iter=2, verbose=1)
assert_warns(ConvergenceWarning, lsvc.fit, X, Y)
assert_equal(lsvc.n_iter_, 2)
def test_svr_coef_sign():
# Test that SVR(kernel="linear") has coef_ with the right sign.
# Non-regression test for #2933.
X = np.random.RandomState(21).randn(10, 3)
y = np.random.RandomState(12).randn(10)
for svr in [svm.SVR(kernel='linear'), svm.NuSVR(kernel='linear'),
svm.LinearSVR()]:
svr.fit(X, y)
assert_array_almost_equal(svr.predict(X),
np.dot(X, svr.coef_.ravel()) + svr.intercept_)
def test_linear_svc_intercept_scaling():
# Test that the right error message is thrown when intercept_scaling <= 0
for i in [-1, 0]:
lsvc = svm.LinearSVC(intercept_scaling=i)
msg = ('Intercept scaling is %r but needs to be greater than 0.'
' To disable fitting an intercept,'
' set fit_intercept=False.' % lsvc.intercept_scaling)
assert_raise_message(ValueError, msg, lsvc.fit, X, Y)
def test_lsvc_intercept_scaling_zero():
# Test that intercept_scaling is ignored when fit_intercept is False
lsvc = svm.LinearSVC(fit_intercept=False)
lsvc.fit(X, Y)
assert_equal(lsvc.intercept_, 0.)
def test_hasattr_predict_proba():
# Method must be (un)available before or after fit, switched by
# `probability` param
G = svm.SVC(probability=True)
assert_true(hasattr(G, 'predict_proba'))
G.fit(iris.data, iris.target)
assert_true(hasattr(G, 'predict_proba'))
G = svm.SVC(probability=False)
assert_false(hasattr(G, 'predict_proba'))
G.fit(iris.data, iris.target)
assert_false(hasattr(G, 'predict_proba'))
# Switching to `probability=True` after fitting should make
# predict_proba available, but calling it must not work:
G.probability = True
assert_true(hasattr(G, 'predict_proba'))
msg = "predict_proba is not available when fitted with probability=False"
assert_raise_message(NotFittedError, msg, G.predict_proba, iris.data)
def test_decision_function_shape_two_class():
for n_classes in [2, 3]:
X, y = make_blobs(centers=n_classes, random_state=0)
for estimator in [svm.SVC, svm.NuSVC]:
clf = OneVsRestClassifier(estimator(
decision_function_shape="ovr")).fit(X, y)
assert_equal(len(clf.predict(X)), len(y))
def test_ovr_decision_function():
# One point from each quadrant represents one class
X_train = np.array([[1, 1], [-1, 1], [-1, -1], [1, -1]])
y_train = [0, 1, 2, 3]
# First point is closer to the decision boundaries than the second point
base_points = np.array([[5, 5], [10, 10]])
# For all the quadrants (classes)
X_test = np.vstack((
base_points * [1, 1], # Q1
base_points * [-1, 1], # Q2
base_points * [-1, -1], # Q3
base_points * [1, -1] # Q4
))
y_test = [0] * 2 + [1] * 2 + [2] * 2 + [3] * 2
clf = svm.SVC(kernel='linear', decision_function_shape='ovr')
clf.fit(X_train, y_train)
y_pred = clf.predict(X_test)
# Test if the prediction is the same as y
assert_array_equal(y_pred, y_test)
deci_val = clf.decision_function(X_test)
# Assert that the predicted class has the maximum value
assert_array_equal(np.argmax(deci_val, axis=1), y_pred)
# Get decision value at test points for the predicted class
pred_class_deci_val = deci_val[range(8), y_pred].reshape((4, 2))
# Assert pred_class_deci_val > 0 here
assert_greater(np.min(pred_class_deci_val), 0.0)
# Test if the first point has lower decision value on every quadrant
# compared to the second point
assert_true(np.all(pred_class_deci_val[:, 0] < pred_class_deci_val[:, 1]))
| bsd-3-clause |
Elucidation/ChessboardDetect | centralSymmetryTile.py | 1 | 3522 | # coding=utf-8
import PIL.Image
import matplotlib.image as mpimg
import scipy.ndimage
import cv2 # For Sobel etc
import glob
import numpy as np
import matplotlib.pyplot as plt
from random import shuffle
import os
np.set_printoptions(suppress=True, linewidth=200) # Better printing of arrays
def getRingIndices(radius):
# Bottom
row1 = np.ones(radius*2+1, dtype=int)*radius
col1 = np.arange(radius*2+1)-radius
# Right
row2 = -np.arange(1,radius*2+1)+radius
col2 = np.ones(radius*2, dtype=int)*radius
# Top
row3 = -np.ones(radius*2, dtype=int)*radius
col3 = -np.arange(1,radius*2+1)+radius
# Left
row4 = np.arange(1,radius*2+1-1)-radius
col4 = -np.ones(radius*2-1, dtype=int)*radius
rows = np.hstack([row1, row2, row3, row4])
cols = np.hstack([col1, col2, col3, col4])
return (rows,cols)
def countSteps(ring):
# Build a big ring so we can handle circular edges
bigring = np.hstack([ring,ring,ring])
n = len(ring)
# Go through middle portion of ring
count = 0
for i in (np.arange(n) + n):
if (bigring[i] != bigring[i-1] and (bigring[i-1] == bigring[i-2]) and (bigring[i] == bigring[i+1])):
count += 1
return count
# Load a tile image and check the central symmetry around a ring
def main():
bad_tile_filepaths = sorted(glob.glob('dataset_binary_5/bad/img_*.png'))
good_tile_filepaths = sorted(glob.glob('dataset_binary_5/good/img_*.png'))
# shuffle(bad_tile_filepaths)
# shuffle(good_tile_filepaths)
# Setup
tile_radius = (PIL.Image.open(good_tile_filepaths[0]).size[0]-1)/2 #(img.shape[0]-1)/2
radius = 5
# filepath = 'dataset_binary_5/bad/img_01_008.png'
# plt.figure(figsize=(20,20))
# plt.subplot(121)
# plt.title('False Positives')
rows, cols = getRingIndices(radius)
# Center in tile
rows += tile_radius
cols += tile_radius
# for i in range(20):
# filepath = bad_tile_filepaths[i]
# img = PIL.Image.open(filepath).convert('L')
# img = np.array(img)
# # img = cv2.adaptiveThreshold(img,255,cv2.ADAPTIVE_THRESH_GAUSSIAN_C, cv2.THRESH_BINARY,11,2)
# ring = img[rows,cols]
# plt.plot(ring + i*255*2, '.-')
# plt.plot([0,len(ring)-1], np.ones(2) + 127 + i*255*2, 'k:', alpha=0.2)
# plt.text(0, i*255*2, countSteps(ring))
# # Good tiles
# plt.subplot(122)
# plt.title('True Positives')
# for i in range(20):
# filepath = good_tile_filepaths[i]
# img = PIL.Image.open(filepath).convert('L')
# img = np.array(img)
# # img = cv2.adaptiveThreshold(img,255,cv2.ADAPTIVE_THRESH_GAUSSIAN_C, cv2.THRESH_BINARY,11,2)
# ring = img[rows,cols]
# plt.plot(ring + i*255*2, '.-')
# plt.plot([0,len(ring)-1], np.ones(2) + 127 + i*255*2, 'k:', alpha=0.2)
# plt.text(0, i*255*2, countSteps(ring))
# plt.show()
good_steps = []
bad_steps = []
for i in range(len(bad_tile_filepaths)):
filepath = bad_tile_filepaths[i]
img = PIL.Image.open(filepath).convert('L')
img = np.array(img)
ring = img[rows,cols]
steps = countSteps(ring)
bad_steps.append(steps)
for i in range(len(good_tile_filepaths)):
filepath = good_tile_filepaths[i]
img = PIL.Image.open(filepath).convert('L')
img = np.array(img)
ring = img[rows,cols]
steps = countSteps(ring)
good_steps.append(steps)
# print(good_steps)
# print(bad_steps)
plt.subplot(121)
plt.hist(bad_steps)
plt.title('False Positives')
plt.subplot(122)
plt.hist(good_steps)
plt.title('True Positives')
plt.show()
if __name__ == '__main__':
main()
| mit |
valexandersaulys/prudential_insurance_kaggle | read_in.py | 1 | 5285 | """
My draft for reading in the code contained in the csv files.
Medical_Keyword_1-48 are dummy variables.
"""
import pandas as pd
import numpy as np
def get_data():
# Hardcoding in the paths here
TRAIN_PATH = "./train.csv"
TEST_PATH = "./test.csv"
# Import via pandas
old_train = pd.read_csv(TRAIN_PATH)
old_test = pd.read_csv(TEST_PATH)
converted_train_list = []
converted_test_list = [] # I will later use pandas to get a full df
# Make lists for conversions
categorical_data_list = [ "Product_Info_1", "Product_Info_2",
"Product_Info_3", "Product_Info_5",
"Product_Info_6", "Product_Info_7",
"Employment_Info_2", "Employment_Info_3",
"Employment_Info_5", "InsuredInfo_1",
"InsuredInfo_2", "InsuredInfo_3",
"InsuredInfo_4", "InsuredInfo_5",
"InsuredInfo_6", "InsuredInfo_7", "Insurance_History_1",
"Insurance_History_2", "Insurance_History_3",
"Insurance_History_4", "Insurance_History_7",
"Insurance_History_8", "Insurance_History_9",
"Family_Hist_1", "Medical_History_2",
"Medical_History_3", "Medical_History_4",
"Medical_History_5", "Medical_History_6",
"Medical_History_7", "Medical_History_8",
"Medical_History_9", "Medical_History_11",
"Medical_History_12", "Medical_History_13",
"Medical_History_14", "Medical_History_16",
"Medical_History_17", "Medical_History_18",
"Medical_History_19", "Medical_History_20",
"Medical_History_21", "Medical_History_22",
"Medical_History_23", "Medical_History_25",
"Medical_History_26", "Medical_History_27",
"Medical_History_28", "Medical_History_29",
"Medical_History_30", "Medical_History_31",
"Medical_History_33", "Medical_History_34",
"Medical_History_35", "Medical_History_36",
"Medical_History_37", "Medical_History_38",
"Medical_History_39", "Medical_History_40",
"Medical_History_41" ]
continuous_data_list = [ "Product_Info_4", "Ins_Age", "Ht", "Wt", "BMI",
"Employment_Info_1", "Employment_Info_4",
"Employment_Info_6", "Insurance_History_5",
"Family_Hist_2", "Family_Hist_3",
"Family_Hist_4", "Family_Hist_5" ]
discrete_data_list = [ "Medical_History_1", "Medical_History_10",
"Medical_History_15", "Medical_History_24",
"Medical_History_32" ]
# Convert categorical data use pandas get_dummies
for category in categorical_data_list:
# First for training
dummies = pd.get_dummies(old_train[category], dummy_na=False)
converted_train_list.append(dummies)
# Then for testing
dummies = pd.get_dummies(old_test[category], dummy_na=False)
converted_test_list.append(dummies)
# Convert continuous data to float32
df = old_train[continuous_data_list].convert_objects(convert_numeric=True)
tf = old_test[continuous_data_list].convert_objects(convert_numeric=True)
# I don't know how appending a list of dataframes will work, should be fine
converted_train_list.append(df); converted_test_list.append(tf)
# Convert Discrete data to variables (don't know how it really looks atm)
for category in discrete_data_list:
# First for training
dummies = pd.get_dummies(old_train[category], dummy_na=False)
converted_train_list.append(dummies)
# Then for testing
dummies = pd.get_dummies(old_test[category], dummy_na=False)
converted_test_list.append(dummies)
# Make the full dataframes here
train = pd.concat(converted_train_list,axis=1)
test = pd.concat(converted_test_list,axis=1)
# So far I've made the assumption that there are no new variables or
# features in the test dataset vs. the train dataset. This will rectify that
columns_to_keep = list(train.columns.values)
""" Prints for Debugging """
#print list(train.columns.values)
#print list(test.columns.values)
print train.columns
print test.columns
# Get the y_data bits
y_train = old_train["Response"]
test_id = old_test["Id"]
# To Return
x_train = train[columns_to_keep];
x_test = test[columns_to_keep];
# Returning an error:
# IndexError: index 4540 is out of bounds for axis 1 with size 1679
# Return everything
return x_train, y_train, x_test, test_id;
| gpl-2.0 |
Jul13/wepy | wepy/io/sp500.py | 1 | 1798 | # Author: Gheorghe Postelnicu
import os
from datetime import datetime, date
from bs4 import BeautifulSoup
import pandas as pd
import urllib.request as urllib2
from topyc.util.file import latest_filename
SITE = "http://en.wikipedia.org/wiki/List_of_S%26P_500_companies"
def store_snapshot(base_dir):
hdr = {'User-Agent': 'Mozilla/5.0'}
req = urllib2.Request(SITE, headers=hdr)
page = urllib2.urlopen(req)
soup = BeautifulSoup(page, 'html5lib')
table = soup.find('table', {'class': 'wikitable sortable'})
sectors = []
subindustries = []
tickers = []
dates = []
for row in table.findAll('tr'):
col = row.findAll('td')
if len(col) > 0:
sector = str(col[3].string.strip()).lower().replace(' ', '_')
subindustry = str(col[4].string.strip()).lower().replace(' ', '_')
date_first_added = None
buf = col[6]
if buf.string:
date_first_added = datetime.strptime(buf.string.strip(), '%Y-%m-%d').date()
ticker = str(col[0].string.strip())
tickers.append(ticker)
sectors.append(sector)
subindustries.append(subindustry)
dates.append(date_first_added)
sp500 = pd.DataFrame({'ticker': tickers, 'sector': sectors, 'subindustry': subindustries,
'date_first_added': dates})
snapshot_file = datetime.today().strftime('%Y%m%d')
out_file = os.path.join(base_dir, '{}.csv'.format(snapshot_file))
sp500.to_csv(out_file, index=False)
def load_latest(base_dir):
df = pd.read_csv(latest_filename('{}/*.csv'.format(base_dir)),
parse_dates=[0]) # Parse date_first_added column.
df.date_first_added.fillna(date(1970, 1, 1), inplace=True)
return df
| apache-2.0 |
shikhar413/openmc | tests/regression_tests/tally_slice_merge/test.py | 8 | 6593 | import hashlib
import itertools
import openmc
from tests.testing_harness import PyAPITestHarness
class TallySliceMergeTestHarness(PyAPITestHarness):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
# Define nuclides and scores to add to both tallies
self.nuclides = ['U235', 'U238']
self.scores = ['fission', 'nu-fission']
# Define filters for energy and spatial domain
low_energy = openmc.EnergyFilter([0., 0.625])
high_energy = openmc.EnergyFilter([0.625, 20.e6])
merged_energies = low_energy.merge(high_energy)
cell_21 = openmc.CellFilter(21)
cell_27 = openmc.CellFilter(27)
distribcell_filter = openmc.DistribcellFilter(21)
mesh = openmc.RegularMesh(name='mesh')
mesh.dimension = [2, 2]
mesh.lower_left = [-50., -50.]
mesh.upper_right = [+50., +50.]
mesh_filter = openmc.MeshFilter(mesh)
self.cell_filters = [cell_21, cell_27]
self.energy_filters = [low_energy, high_energy]
# Initialize cell tallies with filters, nuclides and scores
tallies = []
for energy_filter in self.energy_filters:
for cell_filter in self.cell_filters:
for nuclide in self.nuclides:
for score in self.scores:
tally = openmc.Tally()
tally.estimator = 'tracklength'
tally.scores.append(score)
tally.nuclides.append(nuclide)
tally.filters.append(cell_filter)
tally.filters.append(energy_filter)
tallies.append(tally)
# Merge all cell tallies together
while len(tallies) != 1:
halfway = len(tallies) // 2
zip_split = zip(tallies[:halfway], tallies[halfway:])
tallies = list(map(lambda xy: xy[0].merge(xy[1]), zip_split))
# Specify a name for the tally
tallies[0].name = 'cell tally'
# Initialize a distribcell tally
distribcell_tally = openmc.Tally(name='distribcell tally')
distribcell_tally.estimator = 'tracklength'
distribcell_tally.filters = [distribcell_filter, merged_energies]
for score in self.scores:
distribcell_tally.scores.append(score)
for nuclide in self.nuclides:
distribcell_tally.nuclides.append(nuclide)
mesh_tally = openmc.Tally(name='mesh tally')
mesh_tally.estimator = 'tracklength'
mesh_tally.filters = [mesh_filter, merged_energies]
mesh_tally.scores = self.scores
mesh_tally.nuclides = self.nuclides
# Add tallies to a Tallies object
self._model.tallies = [tallies[0], distribcell_tally, mesh_tally]
def _get_results(self, hash_output=False):
"""Digest info in the statepoint and return as a string."""
# Read the statepoint file.
sp = openmc.StatePoint(self._sp_name)
# Extract the cell tally
tallies = [sp.get_tally(name='cell tally')]
# Slice the tallies by cell filter bins
cell_filter_prod = itertools.product(tallies, self.cell_filters)
tallies = map(lambda tf: tf[0].get_slice(filters=[type(tf[1])],
filter_bins=[(tf[1].bins[0],)]),
cell_filter_prod)
# Slice the tallies by energy filter bins
energy_filter_prod = itertools.product(tallies, self.energy_filters)
tallies = map(lambda tf: tf[0].get_slice(filters=[type(tf[1])],
filter_bins=[(tf[1].bins[0],)]),
energy_filter_prod)
# Slice the tallies by nuclide
nuclide_prod = itertools.product(tallies, self.nuclides)
tallies = map(lambda tn: tn[0].get_slice(nuclides=[tn[1]]), nuclide_prod)
# Slice the tallies by score
score_prod = itertools.product(tallies, self.scores)
tallies = map(lambda ts: ts[0].get_slice(scores=[ts[1]]), score_prod)
tallies = list(tallies)
# Initialize an output string
outstr = ''
# Append sliced Tally Pandas DataFrames to output string
for tally in tallies:
df = tally.get_pandas_dataframe()
outstr += df.to_string()
# Merge all tallies together
while len(tallies) != 1:
halfway = int(len(tallies) / 2)
zip_split = zip(tallies[:halfway], tallies[halfway:])
tallies = list(map(lambda xy: xy[0].merge(xy[1]), zip_split))
# Append merged Tally Pandas DataFrame to output string
df = tallies[0].get_pandas_dataframe()
outstr += df.to_string() + '\n'
# Extract the distribcell tally
distribcell_tally = sp.get_tally(name='distribcell tally')
# Sum up a few subdomains from the distribcell tally
sum1 = distribcell_tally.summation(filter_type=openmc.DistribcellFilter,
filter_bins=[0, 100, 2000, 30000])
# Sum up a few subdomains from the distribcell tally
sum2 = distribcell_tally.summation(filter_type=openmc.DistribcellFilter,
filter_bins=[500, 5000, 50000])
# Merge the distribcell tally slices
merge_tally = sum1.merge(sum2)
# Append merged Tally Pandas DataFrame to output string
df = merge_tally.get_pandas_dataframe()
outstr += df.to_string() + '\n'
# Extract the mesh tally
mesh_tally = sp.get_tally(name='mesh tally')
# Sum up a few subdomains from the mesh tally
sum1 = mesh_tally.summation(filter_type=openmc.MeshFilter,
filter_bins=[(1, 1), (1, 2)])
# Sum up a few subdomains from the mesh tally
sum2 = mesh_tally.summation(filter_type=openmc.MeshFilter,
filter_bins=[(2, 1), (2, 2)])
# Merge the mesh tally slices
merge_tally = sum1.merge(sum2)
# Append merged Tally Pandas DataFrame to output string
df = merge_tally.get_pandas_dataframe()
outstr += df.to_string() + '\n'
# Hash the results if necessary
if hash_output:
sha512 = hashlib.sha512()
sha512.update(outstr.encode('utf-8'))
outstr = sha512.hexdigest()
return outstr
def test_tally_slice_merge():
harness = TallySliceMergeTestHarness('statepoint.10.h5')
harness.main()
| mit |
seaotterman/tensorflow | tensorflow/contrib/learn/python/learn/estimators/estimator_test.py | 14 | 46097 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for Estimator."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import functools
import itertools
import json
import os
import tempfile
import numpy as np
import six
from six.moves import xrange # pylint: disable=redefined-builtin
from google.protobuf import text_format
from tensorflow.contrib import learn
from tensorflow.contrib import lookup
from tensorflow.contrib.framework.python.ops import variables
from tensorflow.contrib.layers.python.layers import feature_column as feature_column_lib
from tensorflow.contrib.layers.python.layers import optimizers
from tensorflow.contrib.learn.python.learn import experiment
from tensorflow.contrib.learn.python.learn import models
from tensorflow.contrib.learn.python.learn import monitors as monitors_lib
from tensorflow.contrib.learn.python.learn.datasets import base
from tensorflow.contrib.learn.python.learn.estimators import _sklearn
from tensorflow.contrib.learn.python.learn.estimators import constants
from tensorflow.contrib.learn.python.learn.estimators import estimator
from tensorflow.contrib.learn.python.learn.estimators import linear
from tensorflow.contrib.learn.python.learn.estimators import model_fn
from tensorflow.contrib.learn.python.learn.estimators import run_config
from tensorflow.contrib.learn.python.learn.utils import input_fn_utils
from tensorflow.contrib.metrics.python.ops import metric_ops
from tensorflow.contrib.testing.python.framework import util_test
from tensorflow.python.client import session as session_lib
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.lib.io import file_io
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import check_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import parsing_ops
from tensorflow.python.ops import variables as variables_lib
from tensorflow.python.platform import gfile
from tensorflow.python.platform import test
from tensorflow.python.saved_model import loader
from tensorflow.python.saved_model import tag_constants
from tensorflow.python.training import basic_session_run_hooks
from tensorflow.python.training import checkpoint_state_pb2
from tensorflow.python.training import input as input_lib
from tensorflow.python.training import monitored_session
from tensorflow.python.training import saver as saver_lib
from tensorflow.python.training import session_run_hook
from tensorflow.python.util import compat
_BOSTON_INPUT_DIM = 13
_IRIS_INPUT_DIM = 4
def boston_input_fn(num_epochs=None):
boston = base.load_boston()
features = input_lib.limit_epochs(
array_ops.reshape(
constant_op.constant(boston.data), [-1, _BOSTON_INPUT_DIM]),
num_epochs=num_epochs)
labels = array_ops.reshape(constant_op.constant(boston.target), [-1, 1])
return features, labels
def iris_input_fn():
iris = base.load_iris()
features = array_ops.reshape(
constant_op.constant(iris.data), [-1, _IRIS_INPUT_DIM])
labels = array_ops.reshape(constant_op.constant(iris.target), [-1])
return features, labels
def iris_input_fn_labels_dict():
iris = base.load_iris()
features = array_ops.reshape(
constant_op.constant(iris.data), [-1, _IRIS_INPUT_DIM])
labels = {
'labels': array_ops.reshape(constant_op.constant(iris.target), [-1])
}
return features, labels
def boston_eval_fn():
boston = base.load_boston()
n_examples = len(boston.target)
features = array_ops.reshape(
constant_op.constant(boston.data), [n_examples, _BOSTON_INPUT_DIM])
labels = array_ops.reshape(
constant_op.constant(boston.target), [n_examples, 1])
return array_ops.concat([features, features], 0), array_ops.concat(
[labels, labels], 0)
def extract(data, key):
if isinstance(data, dict):
assert key in data
return data[key]
else:
return data
def linear_model_params_fn(features, labels, mode, params):
features = extract(features, 'input')
labels = extract(labels, 'labels')
assert mode in (model_fn.ModeKeys.TRAIN, model_fn.ModeKeys.EVAL,
model_fn.ModeKeys.INFER)
prediction, loss = (models.linear_regression_zero_init(features, labels))
train_op = optimizers.optimize_loss(
loss,
variables.get_global_step(),
optimizer='Adagrad',
learning_rate=params['learning_rate'])
return prediction, loss, train_op
def linear_model_fn(features, labels, mode):
features = extract(features, 'input')
labels = extract(labels, 'labels')
assert mode in (model_fn.ModeKeys.TRAIN, model_fn.ModeKeys.EVAL,
model_fn.ModeKeys.INFER)
if isinstance(features, dict):
(_, features), = features.items()
prediction, loss = (models.linear_regression_zero_init(features, labels))
train_op = optimizers.optimize_loss(
loss, variables.get_global_step(), optimizer='Adagrad', learning_rate=0.1)
return prediction, loss, train_op
def linear_model_fn_with_model_fn_ops(features, labels, mode):
"""Same as linear_model_fn, but returns `ModelFnOps`."""
assert mode in (model_fn.ModeKeys.TRAIN, model_fn.ModeKeys.EVAL,
model_fn.ModeKeys.INFER)
prediction, loss = (models.linear_regression_zero_init(features, labels))
train_op = optimizers.optimize_loss(
loss, variables.get_global_step(), optimizer='Adagrad', learning_rate=0.1)
return model_fn.ModelFnOps(
mode=mode, predictions=prediction, loss=loss, train_op=train_op)
def logistic_model_no_mode_fn(features, labels):
features = extract(features, 'input')
labels = extract(labels, 'labels')
labels = array_ops.one_hot(labels, 3, 1, 0)
prediction, loss = (models.logistic_regression_zero_init(features, labels))
train_op = optimizers.optimize_loss(
loss, variables.get_global_step(), optimizer='Adagrad', learning_rate=0.1)
return {
'class': math_ops.argmax(prediction, 1),
'prob': prediction
}, loss, train_op
VOCAB_FILE_CONTENT = 'emerson\nlake\npalmer\n'
EXTRA_FILE_CONTENT = 'kermit\npiggy\nralph\n'
def _build_estimator_for_export_tests(tmpdir):
def _input_fn():
iris = base.load_iris()
return {
'feature': constant_op.constant(
iris.data, dtype=dtypes.float32)
}, constant_op.constant(
iris.target, shape=[150], dtype=dtypes.int32)
feature_columns = [
feature_column_lib.real_valued_column(
'feature', dimension=4)
]
est = linear.LinearRegressor(feature_columns)
est.fit(input_fn=_input_fn, steps=20)
feature_spec = feature_column_lib.create_feature_spec_for_parsing(
feature_columns)
serving_input_fn = input_fn_utils.build_parsing_serving_input_fn(feature_spec)
# hack in an op that uses an asset, in order to test asset export.
# this is not actually valid, of course.
def serving_input_fn_with_asset():
features, labels, inputs = serving_input_fn()
vocab_file_name = os.path.join(tmpdir, 'my_vocab_file')
vocab_file = gfile.GFile(vocab_file_name, mode='w')
vocab_file.write(VOCAB_FILE_CONTENT)
vocab_file.close()
hashtable = lookup.HashTable(
lookup.TextFileStringTableInitializer(vocab_file_name), 'x')
features['bogus_lookup'] = hashtable.lookup(
math_ops.to_int64(features['feature']))
return input_fn_utils.InputFnOps(features, labels, inputs)
return est, serving_input_fn_with_asset
def _build_estimator_for_resource_export_test():
def _input_fn():
iris = base.load_iris()
return {
'feature': constant_op.constant(iris.data, dtype=dtypes.float32)
}, constant_op.constant(
iris.target, shape=[150], dtype=dtypes.int32)
feature_columns = [
feature_column_lib.real_valued_column('feature', dimension=4)
]
def resource_constant_model_fn(unused_features, unused_labels, mode):
"""A model_fn that loads a constant from a resource and serves it."""
assert mode in (model_fn.ModeKeys.TRAIN, model_fn.ModeKeys.EVAL,
model_fn.ModeKeys.INFER)
const = constant_op.constant(-1, dtype=dtypes.int64)
table = lookup.MutableHashTable(
dtypes.string, dtypes.int64, const, name='LookupTableModel')
if mode in (model_fn.ModeKeys.TRAIN, model_fn.ModeKeys.EVAL):
key = constant_op.constant(['key'])
value = constant_op.constant([42], dtype=dtypes.int64)
train_op_1 = table.insert(key, value)
training_state = lookup.MutableHashTable(
dtypes.string, dtypes.int64, const, name='LookupTableTrainingState')
training_op_2 = training_state.insert(key, value)
return const, const, control_flow_ops.group(train_op_1, training_op_2)
if mode == model_fn.ModeKeys.INFER:
key = constant_op.constant(['key'])
prediction = table.lookup(key)
return prediction, const, control_flow_ops.no_op()
est = estimator.Estimator(model_fn=resource_constant_model_fn)
est.fit(input_fn=_input_fn, steps=1)
feature_spec = feature_column_lib.create_feature_spec_for_parsing(
feature_columns)
serving_input_fn = input_fn_utils.build_parsing_serving_input_fn(feature_spec)
return est, serving_input_fn
class CheckCallsMonitor(monitors_lib.BaseMonitor):
def __init__(self, expect_calls):
super(CheckCallsMonitor, self).__init__()
self.begin_calls = None
self.end_calls = None
self.expect_calls = expect_calls
def begin(self, max_steps):
self.begin_calls = 0
self.end_calls = 0
def step_begin(self, step):
self.begin_calls += 1
return {}
def step_end(self, step, outputs):
self.end_calls += 1
return False
def end(self):
assert (self.end_calls == self.expect_calls and
self.begin_calls == self.expect_calls)
def _model_fn_ops(
expected_features, expected_labels, actual_features, actual_labels, mode):
assert_ops = tuple([
check_ops.assert_equal(
expected_features[k], actual_features[k], name='assert_%s' % k)
for k in expected_features
] + [
check_ops.assert_equal(
expected_labels, actual_labels, name='assert_labels')
])
with ops.control_dependencies(assert_ops):
return model_fn.ModelFnOps(
mode=mode,
predictions=constant_op.constant(0.),
loss=constant_op.constant(0.),
train_op=constant_op.constant(0.))
def _make_input_fn(features, labels):
def _input_fn():
return {
k: constant_op.constant(v)
for k, v in six.iteritems(features)
}, constant_op.constant(labels)
return _input_fn
class EstimatorModelFnTest(test.TestCase):
def testModelFnArgs(self):
features = {'x': 42., 'y': 43.}
labels = 44.
expected_params = {'some_param': 'some_value'}
expected_config = run_config.RunConfig()
expected_config.i_am_test = True
# TODO(ptucker): We have to roll our own mock since Estimator._get_arguments
# doesn't work with mock fns.
model_fn_call_count = [0]
# `features` and `labels` are passed by position, `arg0` and `arg1` here.
def _model_fn(arg0, arg1, mode, params, config):
model_fn_call_count[0] += 1
self.assertItemsEqual(features.keys(), arg0.keys())
self.assertEqual(model_fn.ModeKeys.TRAIN, mode)
self.assertEqual(expected_params, params)
self.assertTrue(config.i_am_test)
return _model_fn_ops(features, labels, arg0, arg1, mode)
est = estimator.Estimator(
model_fn=_model_fn, params=expected_params, config=expected_config)
self.assertEqual(0, model_fn_call_count[0])
est.fit(input_fn=_make_input_fn(features, labels), steps=1)
self.assertEqual(1, model_fn_call_count[0])
def testPartialModelFnArgs(self):
features = {'x': 42., 'y': 43.}
labels = 44.
expected_params = {'some_param': 'some_value'}
expected_config = run_config.RunConfig()
expected_config.i_am_test = True
expected_foo = 45.
expected_bar = 46.
# TODO(ptucker): We have to roll our own mock since Estimator._get_arguments
# doesn't work with mock fns.
model_fn_call_count = [0]
# `features` and `labels` are passed by position, `arg0` and `arg1` here.
def _model_fn(arg0, arg1, foo, mode, params, config, bar):
model_fn_call_count[0] += 1
self.assertEqual(expected_foo, foo)
self.assertEqual(expected_bar, bar)
self.assertItemsEqual(features.keys(), arg0.keys())
self.assertEqual(model_fn.ModeKeys.TRAIN, mode)
self.assertEqual(expected_params, params)
self.assertTrue(config.i_am_test)
return _model_fn_ops(features, labels, arg0, arg1, mode)
partial_model_fn = functools.partial(
_model_fn, foo=expected_foo, bar=expected_bar)
est = estimator.Estimator(
model_fn=partial_model_fn, params=expected_params,
config=expected_config)
self.assertEqual(0, model_fn_call_count[0])
est.fit(input_fn=_make_input_fn(features, labels), steps=1)
self.assertEqual(1, model_fn_call_count[0])
def testModelFnWithModelDir(self):
expected_param = {'some_param': 'some_value'}
expected_model_dir = tempfile.mkdtemp()
def _argument_checker(features, labels, mode, params, config=None,
model_dir=None):
_, _, _ = features, labels, config
self.assertEqual(model_fn.ModeKeys.TRAIN, mode)
self.assertEqual(expected_param, params)
self.assertEqual(model_dir, expected_model_dir)
return constant_op.constant(0.), constant_op.constant(
0.), constant_op.constant(0.)
est = estimator.Estimator(model_fn=_argument_checker,
params=expected_param,
model_dir=expected_model_dir)
est.fit(input_fn=boston_input_fn, steps=1)
def testInvalidModelFn_no_train_op(self):
def _invalid_model_fn(features, labels):
# pylint: disable=unused-argument
w = variables_lib.Variable(42.0, 'weight')
loss = 100.0 - w
return None, loss, None
est = estimator.Estimator(model_fn=_invalid_model_fn)
with self.assertRaisesRegexp(ValueError, 'Missing training_op'):
est.fit(input_fn=boston_input_fn, steps=1)
def testInvalidModelFn_no_loss(self):
def _invalid_model_fn(features, labels, mode):
# pylint: disable=unused-argument
w = variables_lib.Variable(42.0, 'weight')
loss = 100.0 - w
train_op = w.assign_add(loss / 100.0)
predictions = loss
if mode == model_fn.ModeKeys.EVAL:
loss = None
return predictions, loss, train_op
est = estimator.Estimator(model_fn=_invalid_model_fn)
est.fit(input_fn=boston_input_fn, steps=1)
with self.assertRaisesRegexp(ValueError, 'Missing loss'):
est.evaluate(input_fn=boston_eval_fn, steps=1)
def testInvalidModelFn_no_prediction(self):
def _invalid_model_fn(features, labels):
# pylint: disable=unused-argument
w = variables_lib.Variable(42.0, 'weight')
loss = 100.0 - w
train_op = w.assign_add(loss / 100.0)
return None, loss, train_op
est = estimator.Estimator(model_fn=_invalid_model_fn)
est.fit(input_fn=boston_input_fn, steps=1)
with self.assertRaisesRegexp(ValueError, 'Missing prediction'):
est.evaluate(input_fn=boston_eval_fn, steps=1)
with self.assertRaisesRegexp(ValueError, 'Missing prediction'):
est.predict(input_fn=boston_input_fn)
with self.assertRaisesRegexp(ValueError, 'Missing prediction'):
est.predict(
input_fn=functools.partial(
boston_input_fn, num_epochs=1),
as_iterable=True)
def testModelFnScaffoldInTraining(self):
self.is_init_fn_called = False
def _init_fn(scaffold, session):
_, _ = scaffold, session
self.is_init_fn_called = True
def _model_fn_scaffold(features, labels, mode):
_, _ = features, labels
return model_fn.ModelFnOps(
mode=mode,
predictions=constant_op.constant(0.),
loss=constant_op.constant(0.),
train_op=constant_op.constant(0.),
scaffold=monitored_session.Scaffold(init_fn=_init_fn))
est = estimator.Estimator(model_fn=_model_fn_scaffold)
est.fit(input_fn=boston_input_fn, steps=1)
self.assertTrue(self.is_init_fn_called)
def testModelFnScaffoldSaverUsage(self):
def _model_fn_scaffold(features, labels, mode):
_, _ = features, labels
variables_lib.Variable(1., 'weight')
real_saver = saver_lib.Saver()
self.mock_saver = test.mock.Mock(
wraps=real_saver, saver_def=real_saver.saver_def)
return model_fn.ModelFnOps(
mode=mode,
predictions=constant_op.constant([[1.]]),
loss=constant_op.constant(0.),
train_op=constant_op.constant(0.),
scaffold=monitored_session.Scaffold(saver=self.mock_saver))
def input_fn():
return {
'x': constant_op.constant([[1.]]),
}, constant_op.constant([[1.]])
est = estimator.Estimator(model_fn=_model_fn_scaffold)
est.fit(input_fn=input_fn, steps=1)
self.assertTrue(self.mock_saver.save.called)
est.evaluate(input_fn=input_fn, steps=1)
self.assertTrue(self.mock_saver.restore.called)
est.predict(input_fn=input_fn)
self.assertTrue(self.mock_saver.restore.called)
def serving_input_fn():
serialized_tf_example = array_ops.placeholder(dtype=dtypes.string,
shape=[None],
name='input_example_tensor')
features, labels = input_fn()
return input_fn_utils.InputFnOps(
features, labels, {'examples': serialized_tf_example})
est.export_savedmodel(est.model_dir + '/export', serving_input_fn)
self.assertTrue(self.mock_saver.restore.called)
class EstimatorTest(test.TestCase):
def testExperimentIntegration(self):
exp = experiment.Experiment(
estimator=estimator.Estimator(model_fn=linear_model_fn),
train_input_fn=boston_input_fn,
eval_input_fn=boston_input_fn)
exp.test()
def testCheckpointSaverHookSuppressesTheDefaultOne(self):
saver_hook = test.mock.Mock(
spec=basic_session_run_hooks.CheckpointSaverHook)
saver_hook.before_run.return_value = None
est = estimator.Estimator(model_fn=linear_model_fn)
est.fit(input_fn=boston_input_fn, steps=1, monitors=[saver_hook])
# test nothing is saved, due to suppressing default saver
with self.assertRaises(learn.NotFittedError):
est.evaluate(input_fn=boston_input_fn, steps=1)
def testCustomConfig(self):
test_random_seed = 5783452
class TestInput(object):
def __init__(self):
self.random_seed = 0
def config_test_input_fn(self):
self.random_seed = ops.get_default_graph().seed
return constant_op.constant([[1.]]), constant_op.constant([1.])
config = run_config.RunConfig(tf_random_seed=test_random_seed)
test_input = TestInput()
est = estimator.Estimator(model_fn=linear_model_fn, config=config)
est.fit(input_fn=test_input.config_test_input_fn, steps=1)
# If input_fn ran, it will have given us the random seed set on the graph.
self.assertEquals(test_random_seed, test_input.random_seed)
def testRunConfigModelDir(self):
config = run_config.RunConfig(model_dir='test_dir')
est = estimator.Estimator(model_fn=linear_model_fn,
config=config)
self.assertEqual('test_dir', est.config.model_dir)
self.assertEqual('test_dir', est.model_dir)
def testModelDirAndRunConfigModelDir(self):
config = run_config.RunConfig(model_dir='test_dir')
est = estimator.Estimator(model_fn=linear_model_fn,
config=config,
model_dir='test_dir')
self.assertEqual('test_dir', est.config.model_dir)
with self.assertRaisesRegexp(
ValueError,
'model_dir are set both in constructor and RunConfig, '
'but with different'):
estimator.Estimator(model_fn=linear_model_fn,
config=config,
model_dir='different_dir')
def testModelDirIsCopiedToRunConfig(self):
config = run_config.RunConfig()
self.assertIsNone(config.model_dir)
est = estimator.Estimator(model_fn=linear_model_fn,
model_dir='test_dir',
config=config)
self.assertEqual('test_dir', est.config.model_dir)
self.assertEqual('test_dir', est.model_dir)
def testModelDirAsTempDir(self):
with test.mock.patch.object(tempfile, 'mkdtemp', return_value='temp_dir'):
est = estimator.Estimator(model_fn=linear_model_fn)
self.assertEqual('temp_dir', est.config.model_dir)
self.assertEqual('temp_dir', est.model_dir)
def testCheckInputs(self):
est = estimator.SKCompat(estimator.Estimator(model_fn=linear_model_fn))
# Lambdas so we have to different objects to compare
right_features = lambda: np.ones(shape=[7, 8], dtype=np.float32)
right_labels = lambda: np.ones(shape=[7, 10], dtype=np.int32)
est.fit(right_features(), right_labels(), steps=1)
# TODO(wicke): This does not fail for np.int32 because of data_feeder magic.
wrong_type_features = np.ones(shape=[7, 8], dtype=np.int64)
wrong_size_features = np.ones(shape=[7, 10])
wrong_type_labels = np.ones(shape=[7, 10], dtype=np.float32)
wrong_size_labels = np.ones(shape=[7, 11])
est.fit(x=right_features(), y=right_labels(), steps=1)
with self.assertRaises(ValueError):
est.fit(x=wrong_type_features, y=right_labels(), steps=1)
with self.assertRaises(ValueError):
est.fit(x=wrong_size_features, y=right_labels(), steps=1)
with self.assertRaises(ValueError):
est.fit(x=right_features(), y=wrong_type_labels, steps=1)
with self.assertRaises(ValueError):
est.fit(x=right_features(), y=wrong_size_labels, steps=1)
def testBadInput(self):
est = estimator.Estimator(model_fn=linear_model_fn)
self.assertRaisesRegexp(
ValueError,
'Either x or input_fn must be provided.',
est.fit,
x=None,
input_fn=None,
steps=1)
self.assertRaisesRegexp(
ValueError,
'Can not provide both input_fn and x or y',
est.fit,
x='X',
input_fn=iris_input_fn,
steps=1)
self.assertRaisesRegexp(
ValueError,
'Can not provide both input_fn and x or y',
est.fit,
y='Y',
input_fn=iris_input_fn,
steps=1)
self.assertRaisesRegexp(
ValueError,
'Can not provide both input_fn and batch_size',
est.fit,
input_fn=iris_input_fn,
batch_size=100,
steps=1)
self.assertRaisesRegexp(
ValueError,
'Inputs cannot be tensors. Please provide input_fn.',
est.fit,
x=constant_op.constant(1.),
steps=1)
def testUntrained(self):
boston = base.load_boston()
est = estimator.SKCompat(estimator.Estimator(model_fn=linear_model_fn))
with self.assertRaises(learn.NotFittedError):
_ = est.score(x=boston.data, y=boston.target.astype(np.float64))
with self.assertRaises(learn.NotFittedError):
est.predict(x=boston.data)
def testContinueTraining(self):
boston = base.load_boston()
output_dir = tempfile.mkdtemp()
est = estimator.SKCompat(
estimator.Estimator(
model_fn=linear_model_fn, model_dir=output_dir))
float64_labels = boston.target.astype(np.float64)
est.fit(x=boston.data, y=float64_labels, steps=50)
scores = est.score(
x=boston.data,
y=float64_labels,
metrics={'MSE': metric_ops.streaming_mean_squared_error})
del est
# Create another estimator object with the same output dir.
est2 = estimator.SKCompat(
estimator.Estimator(
model_fn=linear_model_fn, model_dir=output_dir))
# Check we can evaluate and predict.
scores2 = est2.score(
x=boston.data,
y=float64_labels,
metrics={'MSE': metric_ops.streaming_mean_squared_error})
self.assertAllClose(scores['MSE'], scores2['MSE'])
predictions = np.array(list(est2.predict(x=boston.data)))
other_score = _sklearn.mean_squared_error(predictions, float64_labels)
self.assertAllClose(scores['MSE'], other_score)
# Check we can keep training.
est2.fit(x=boston.data, y=float64_labels, steps=100)
scores3 = est2.score(
x=boston.data,
y=float64_labels,
metrics={'MSE': metric_ops.streaming_mean_squared_error})
self.assertLess(scores3['MSE'], scores['MSE'])
def test_checkpoint_contains_relative_paths(self):
tmpdir = tempfile.mkdtemp()
est = estimator.Estimator(
model_dir=tmpdir,
model_fn=linear_model_fn_with_model_fn_ops)
est.fit(input_fn=boston_input_fn, steps=5)
checkpoint_file_content = file_io.read_file_to_string(
os.path.join(tmpdir, 'checkpoint'))
ckpt = checkpoint_state_pb2.CheckpointState()
text_format.Merge(checkpoint_file_content, ckpt)
self.assertEqual(ckpt.model_checkpoint_path, 'model.ckpt-5')
self.assertAllEqual(
['model.ckpt-1', 'model.ckpt-5'], ckpt.all_model_checkpoint_paths)
def test_train_save_copy_reload(self):
tmpdir = tempfile.mkdtemp()
model_dir1 = os.path.join(tmpdir, 'model_dir1')
est1 = estimator.Estimator(
model_dir=model_dir1,
model_fn=linear_model_fn_with_model_fn_ops)
est1.fit(input_fn=boston_input_fn, steps=5)
model_dir2 = os.path.join(tmpdir, 'model_dir2')
os.renames(model_dir1, model_dir2)
est2 = estimator.Estimator(
model_dir=model_dir2,
model_fn=linear_model_fn_with_model_fn_ops)
self.assertEqual(5, est2.get_variable_value('global_step'))
est2.fit(input_fn=boston_input_fn, steps=5)
self.assertEqual(10, est2.get_variable_value('global_step'))
def testEstimatorParams(self):
boston = base.load_boston()
est = estimator.SKCompat(
estimator.Estimator(
model_fn=linear_model_params_fn, params={'learning_rate': 0.01}))
est.fit(x=boston.data, y=boston.target, steps=100)
def testHooksNotChanged(self):
est = estimator.Estimator(model_fn=logistic_model_no_mode_fn)
# We pass empty array and expect it to remain empty after calling
# fit and evaluate. Requires inside to copy this array if any hooks were
# added.
my_array = []
est.fit(input_fn=iris_input_fn, steps=100, monitors=my_array)
_ = est.evaluate(input_fn=iris_input_fn, steps=1, hooks=my_array)
self.assertEqual(my_array, [])
def testIrisIterator(self):
iris = base.load_iris()
est = estimator.Estimator(model_fn=logistic_model_no_mode_fn)
x_iter = itertools.islice(iris.data, 100)
y_iter = itertools.islice(iris.target, 100)
estimator.SKCompat(est).fit(x_iter, y_iter, steps=20)
eval_result = est.evaluate(input_fn=iris_input_fn, steps=1)
x_iter_eval = itertools.islice(iris.data, 100)
y_iter_eval = itertools.islice(iris.target, 100)
score_result = estimator.SKCompat(est).score(x_iter_eval, y_iter_eval)
print(score_result)
self.assertItemsEqual(eval_result.keys(), score_result.keys())
self.assertItemsEqual(['global_step', 'loss'], score_result.keys())
predictions = estimator.SKCompat(est).predict(x=iris.data)['class']
self.assertEqual(len(predictions), iris.target.shape[0])
def testIrisIteratorArray(self):
iris = base.load_iris()
est = estimator.Estimator(model_fn=logistic_model_no_mode_fn)
x_iter = itertools.islice(iris.data, 100)
y_iter = (np.array(x) for x in iris.target)
est.fit(x_iter, y_iter, steps=100)
_ = est.evaluate(input_fn=iris_input_fn, steps=1)
_ = six.next(est.predict(x=iris.data))['class']
def testIrisIteratorPlainInt(self):
iris = base.load_iris()
est = estimator.Estimator(model_fn=logistic_model_no_mode_fn)
x_iter = itertools.islice(iris.data, 100)
y_iter = (v for v in iris.target)
est.fit(x_iter, y_iter, steps=100)
_ = est.evaluate(input_fn=iris_input_fn, steps=1)
_ = six.next(est.predict(x=iris.data))['class']
def testIrisTruncatedIterator(self):
iris = base.load_iris()
est = estimator.Estimator(model_fn=logistic_model_no_mode_fn)
x_iter = itertools.islice(iris.data, 50)
y_iter = ([np.int32(v)] for v in iris.target)
est.fit(x_iter, y_iter, steps=100)
def testTrainStepsIsIncremental(self):
est = estimator.Estimator(model_fn=linear_model_fn)
est.fit(input_fn=boston_input_fn, steps=10)
self.assertEqual(10, est.get_variable_value('global_step'))
est.fit(input_fn=boston_input_fn, steps=15)
self.assertEqual(25, est.get_variable_value('global_step'))
def testTrainMaxStepsIsNotIncremental(self):
est = estimator.Estimator(model_fn=linear_model_fn)
est.fit(input_fn=boston_input_fn, max_steps=10)
self.assertEqual(10, est.get_variable_value('global_step'))
est.fit(input_fn=boston_input_fn, max_steps=15)
self.assertEqual(15, est.get_variable_value('global_step'))
def testPredict(self):
est = estimator.Estimator(model_fn=linear_model_fn)
boston = base.load_boston()
est.fit(input_fn=boston_input_fn, steps=1)
output = list(est.predict(x=boston.data, batch_size=10))
self.assertEqual(len(output), boston.target.shape[0])
def testWithModelFnOps(self):
"""Test for model_fn that returns `ModelFnOps`."""
est = estimator.Estimator(model_fn=linear_model_fn_with_model_fn_ops)
boston = base.load_boston()
est.fit(input_fn=boston_input_fn, steps=1)
input_fn = functools.partial(boston_input_fn, num_epochs=1)
scores = est.evaluate(input_fn=input_fn, steps=1)
self.assertIn('loss', scores.keys())
output = list(est.predict(input_fn=input_fn))
self.assertEqual(len(output), boston.target.shape[0])
def testWrongInput(self):
def other_input_fn():
return {
'other': constant_op.constant([0, 0, 0])
}, constant_op.constant([0, 0, 0])
est = estimator.Estimator(model_fn=linear_model_fn)
est.fit(input_fn=boston_input_fn, steps=1)
with self.assertRaises(ValueError):
est.fit(input_fn=other_input_fn, steps=1)
def testMonitorsForFit(self):
est = estimator.Estimator(model_fn=linear_model_fn)
est.fit(input_fn=boston_input_fn,
steps=21,
monitors=[CheckCallsMonitor(expect_calls=21)])
def testHooksForEvaluate(self):
class CheckCallHook(session_run_hook.SessionRunHook):
def __init__(self):
self.run_count = 0
def after_run(self, run_context, run_values):
self.run_count += 1
est = learn.Estimator(model_fn=linear_model_fn)
est.fit(input_fn=boston_input_fn, steps=1)
hook = CheckCallHook()
est.evaluate(input_fn=boston_eval_fn, steps=3, hooks=[hook])
self.assertEqual(3, hook.run_count)
def testSummaryWriting(self):
est = estimator.Estimator(model_fn=linear_model_fn)
est.fit(input_fn=boston_input_fn, steps=200)
est.evaluate(input_fn=boston_input_fn, steps=200)
loss_summary = util_test.simple_values_from_events(
util_test.latest_events(est.model_dir), ['OptimizeLoss/loss'])
self.assertEqual(1, len(loss_summary))
def testLossInGraphCollection(self):
class _LossCheckerHook(session_run_hook.SessionRunHook):
def begin(self):
self.loss_collection = ops.get_collection(ops.GraphKeys.LOSSES)
hook = _LossCheckerHook()
est = estimator.Estimator(model_fn=linear_model_fn)
est.fit(input_fn=boston_input_fn, steps=200, monitors=[hook])
self.assertTrue(hook.loss_collection)
def test_export_returns_exported_dirname(self):
expected = '/path/to/some_dir'
with test.mock.patch.object(estimator, 'export') as mock_export_module:
mock_export_module._export_estimator.return_value = expected
est = estimator.Estimator(model_fn=linear_model_fn)
actual = est.export('/path/to')
self.assertEquals(expected, actual)
def test_export_savedmodel(self):
tmpdir = tempfile.mkdtemp()
est, serving_input_fn = _build_estimator_for_export_tests(tmpdir)
extra_file_name = os.path.join(
compat.as_bytes(tmpdir), compat.as_bytes('my_extra_file'))
extra_file = gfile.GFile(extra_file_name, mode='w')
extra_file.write(EXTRA_FILE_CONTENT)
extra_file.close()
assets_extra = {'some/sub/directory/my_extra_file': extra_file_name}
export_dir_base = os.path.join(
compat.as_bytes(tmpdir), compat.as_bytes('export'))
export_dir = est.export_savedmodel(
export_dir_base, serving_input_fn, assets_extra=assets_extra)
self.assertTrue(gfile.Exists(export_dir_base))
self.assertTrue(gfile.Exists(export_dir))
self.assertTrue(
gfile.Exists(
os.path.join(
compat.as_bytes(export_dir), compat.as_bytes(
'saved_model.pb'))))
self.assertTrue(
gfile.Exists(
os.path.join(
compat.as_bytes(export_dir), compat.as_bytes('variables'))))
self.assertTrue(
gfile.Exists(
os.path.join(
compat.as_bytes(export_dir),
compat.as_bytes('variables/variables.index'))))
self.assertTrue(
gfile.Exists(
os.path.join(
compat.as_bytes(export_dir),
compat.as_bytes('variables/variables.data-00000-of-00001'))))
self.assertTrue(
gfile.Exists(
os.path.join(
compat.as_bytes(export_dir), compat.as_bytes('assets'))))
self.assertTrue(
gfile.Exists(
os.path.join(
compat.as_bytes(export_dir),
compat.as_bytes('assets/my_vocab_file'))))
self.assertEqual(
compat.as_bytes(VOCAB_FILE_CONTENT),
compat.as_bytes(
gfile.GFile(
os.path.join(
compat.as_bytes(export_dir),
compat.as_bytes('assets/my_vocab_file'))).read()))
expected_extra_path = os.path.join(
compat.as_bytes(export_dir),
compat.as_bytes('assets.extra/some/sub/directory/my_extra_file'))
self.assertTrue(
gfile.Exists(
os.path.join(
compat.as_bytes(export_dir), compat.as_bytes('assets.extra'))))
self.assertTrue(gfile.Exists(expected_extra_path))
self.assertEqual(
compat.as_bytes(EXTRA_FILE_CONTENT),
compat.as_bytes(gfile.GFile(expected_extra_path).read()))
expected_vocab_file = os.path.join(
compat.as_bytes(tmpdir), compat.as_bytes('my_vocab_file'))
# Restore, to validate that the export was well-formed.
with ops.Graph().as_default() as graph:
with session_lib.Session(graph=graph) as sess:
loader.load(sess, [tag_constants.SERVING], export_dir)
assets = [
x.eval()
for x in graph.get_collection(ops.GraphKeys.ASSET_FILEPATHS)
]
self.assertItemsEqual([expected_vocab_file], assets)
graph_ops = [x.name for x in graph.get_operations()]
self.assertTrue('input_example_tensor' in graph_ops)
self.assertTrue('ParseExample/ParseExample' in graph_ops)
self.assertTrue('linear/linear/feature/matmul' in graph_ops)
self.assertSameElements(
['bogus_lookup', 'feature'],
graph.get_collection(
constants.COLLECTION_DEF_KEY_FOR_INPUT_FEATURE_KEYS))
# cleanup
gfile.DeleteRecursively(tmpdir)
def test_export_savedmodel_with_resource(self):
tmpdir = tempfile.mkdtemp()
est, serving_input_fn = _build_estimator_for_resource_export_test()
export_dir_base = os.path.join(
compat.as_bytes(tmpdir), compat.as_bytes('export'))
export_dir = est.export_savedmodel(export_dir_base, serving_input_fn)
self.assertTrue(gfile.Exists(export_dir_base))
self.assertTrue(gfile.Exists(export_dir))
self.assertTrue(
gfile.Exists(
os.path.join(
compat.as_bytes(export_dir), compat.as_bytes(
'saved_model.pb'))))
self.assertTrue(
gfile.Exists(
os.path.join(
compat.as_bytes(export_dir), compat.as_bytes('variables'))))
self.assertTrue(
gfile.Exists(
os.path.join(
compat.as_bytes(export_dir),
compat.as_bytes('variables/variables.index'))))
self.assertTrue(
gfile.Exists(
os.path.join(
compat.as_bytes(export_dir),
compat.as_bytes('variables/variables.data-00000-of-00001'))))
# Restore, to validate that the export was well-formed.
with ops.Graph().as_default() as graph:
with session_lib.Session(graph=graph) as sess:
loader.load(sess, [tag_constants.SERVING], export_dir)
graph_ops = [x.name for x in graph.get_operations()]
self.assertTrue('input_example_tensor' in graph_ops)
self.assertTrue('ParseExample/ParseExample' in graph_ops)
self.assertTrue('LookupTableModel' in graph_ops)
self.assertFalse('LookupTableTrainingState' in graph_ops)
# cleanup
gfile.DeleteRecursively(tmpdir)
class InferRealValuedColumnsTest(test.TestCase):
def testInvalidArgs(self):
with self.assertRaisesRegexp(ValueError, 'x or input_fn must be provided'):
estimator.infer_real_valued_columns_from_input(None)
with self.assertRaisesRegexp(ValueError, 'cannot be tensors'):
estimator.infer_real_valued_columns_from_input(constant_op.constant(1.0))
def _assert_single_feature_column(self, expected_shape, expected_dtype,
feature_columns):
self.assertEqual(1, len(feature_columns))
feature_column = feature_columns[0]
self.assertEqual('', feature_column.name)
self.assertEqual(
{
'':
parsing_ops.FixedLenFeature(
shape=expected_shape, dtype=expected_dtype)
},
feature_column.config)
def testInt32Input(self):
feature_columns = estimator.infer_real_valued_columns_from_input(
np.ones(
shape=[7, 8], dtype=np.int32))
self._assert_single_feature_column([8], dtypes.int32, feature_columns)
def testInt32InputFn(self):
feature_columns = estimator.infer_real_valued_columns_from_input_fn(
lambda: (array_ops.ones(shape=[7, 8], dtype=dtypes.int32), None))
self._assert_single_feature_column([8], dtypes.int32, feature_columns)
def testInt64Input(self):
feature_columns = estimator.infer_real_valued_columns_from_input(
np.ones(
shape=[7, 8], dtype=np.int64))
self._assert_single_feature_column([8], dtypes.int64, feature_columns)
def testInt64InputFn(self):
feature_columns = estimator.infer_real_valued_columns_from_input_fn(
lambda: (array_ops.ones(shape=[7, 8], dtype=dtypes.int64), None))
self._assert_single_feature_column([8], dtypes.int64, feature_columns)
def testFloat32Input(self):
feature_columns = estimator.infer_real_valued_columns_from_input(
np.ones(
shape=[7, 8], dtype=np.float32))
self._assert_single_feature_column([8], dtypes.float32, feature_columns)
def testFloat32InputFn(self):
feature_columns = estimator.infer_real_valued_columns_from_input_fn(
lambda: (array_ops.ones(shape=[7, 8], dtype=dtypes.float32), None))
self._assert_single_feature_column([8], dtypes.float32, feature_columns)
def testFloat64Input(self):
feature_columns = estimator.infer_real_valued_columns_from_input(
np.ones(
shape=[7, 8], dtype=np.float64))
self._assert_single_feature_column([8], dtypes.float64, feature_columns)
def testFloat64InputFn(self):
feature_columns = estimator.infer_real_valued_columns_from_input_fn(
lambda: (array_ops.ones(shape=[7, 8], dtype=dtypes.float64), None))
self._assert_single_feature_column([8], dtypes.float64, feature_columns)
def testBoolInput(self):
with self.assertRaisesRegexp(
ValueError, 'on integer or non floating types are not supported'):
estimator.infer_real_valued_columns_from_input(
np.array([[False for _ in xrange(8)] for _ in xrange(7)]))
def testBoolInputFn(self):
with self.assertRaisesRegexp(
ValueError, 'on integer or non floating types are not supported'):
# pylint: disable=g-long-lambda
estimator.infer_real_valued_columns_from_input_fn(
lambda: (constant_op.constant(False, shape=[7, 8], dtype=dtypes.bool),
None))
def testStringInput(self):
with self.assertRaisesRegexp(
ValueError, 'on integer or non floating types are not supported'):
# pylint: disable=g-long-lambda
estimator.infer_real_valued_columns_from_input(
np.array([['%d.0' % i for i in xrange(8)] for _ in xrange(7)]))
def testStringInputFn(self):
with self.assertRaisesRegexp(
ValueError, 'on integer or non floating types are not supported'):
# pylint: disable=g-long-lambda
estimator.infer_real_valued_columns_from_input_fn(
lambda: (
constant_op.constant([['%d.0' % i
for i in xrange(8)]
for _ in xrange(7)]),
None))
def testBostonInputFn(self):
feature_columns = estimator.infer_real_valued_columns_from_input_fn(
boston_input_fn)
self._assert_single_feature_column([_BOSTON_INPUT_DIM], dtypes.float64,
feature_columns)
def testIrisInputFn(self):
feature_columns = estimator.infer_real_valued_columns_from_input_fn(
iris_input_fn)
self._assert_single_feature_column([_IRIS_INPUT_DIM], dtypes.float64,
feature_columns)
class ReplicaDeviceSetterTest(test.TestCase):
def testVariablesAreOnPs(self):
tf_config = {'cluster': {run_config.TaskType.PS: ['fake_ps_0']}}
with test.mock.patch.dict('os.environ',
{'TF_CONFIG': json.dumps(tf_config)}):
config = run_config.RunConfig()
with ops.device(estimator._get_replica_device_setter(config)):
v = variables_lib.Variable([1, 2])
w = variables_lib.Variable([2, 1])
a = v + w
self.assertDeviceEqual('/job:ps/task:0', v.device)
self.assertDeviceEqual('/job:ps/task:0', v.initializer.device)
self.assertDeviceEqual('/job:ps/task:0', w.device)
self.assertDeviceEqual('/job:ps/task:0', w.initializer.device)
self.assertDeviceEqual('/job:worker', a.device)
def testVariablesAreLocal(self):
with ops.device(
estimator._get_replica_device_setter(run_config.RunConfig())):
v = variables_lib.Variable([1, 2])
w = variables_lib.Variable([2, 1])
a = v + w
self.assertDeviceEqual('', v.device)
self.assertDeviceEqual('', v.initializer.device)
self.assertDeviceEqual('', w.device)
self.assertDeviceEqual('', w.initializer.device)
self.assertDeviceEqual('', a.device)
def testMutableHashTableIsOnPs(self):
tf_config = {'cluster': {run_config.TaskType.PS: ['fake_ps_0']}}
with test.mock.patch.dict('os.environ',
{'TF_CONFIG': json.dumps(tf_config)}):
config = run_config.RunConfig()
with ops.device(estimator._get_replica_device_setter(config)):
default_val = constant_op.constant([-1, -1], dtypes.int64)
table = lookup.MutableHashTable(dtypes.string, dtypes.int64,
default_val)
input_string = constant_op.constant(['brain', 'salad', 'tank'])
output = table.lookup(input_string)
self.assertDeviceEqual('/job:ps/task:0', table._table_ref.device)
self.assertDeviceEqual('/job:ps/task:0', output.device)
def testMutableHashTableIsLocal(self):
with ops.device(
estimator._get_replica_device_setter(run_config.RunConfig())):
default_val = constant_op.constant([-1, -1], dtypes.int64)
table = lookup.MutableHashTable(dtypes.string, dtypes.int64,
default_val)
input_string = constant_op.constant(['brain', 'salad', 'tank'])
output = table.lookup(input_string)
self.assertDeviceEqual('', table._table_ref.device)
self.assertDeviceEqual('', output.device)
def testTaskIsSetOnWorkerWhenJobNameIsSet(self):
tf_config = {
'cluster': {
run_config.TaskType.PS: ['fake_ps_0']
},
'task': {
'type': run_config.TaskType.WORKER,
'index': 3
}
}
with test.mock.patch.dict('os.environ',
{'TF_CONFIG': json.dumps(tf_config)}):
config = run_config.RunConfig()
with ops.device(estimator._get_replica_device_setter(config)):
v = variables_lib.Variable([1, 2])
w = variables_lib.Variable([2, 1])
a = v + w
self.assertDeviceEqual('/job:ps/task:0', v.device)
self.assertDeviceEqual('/job:ps/task:0', v.initializer.device)
self.assertDeviceEqual('/job:ps/task:0', w.device)
self.assertDeviceEqual('/job:ps/task:0', w.initializer.device)
self.assertDeviceEqual('/job:worker/task:3', a.device)
if __name__ == '__main__':
test.main()
| apache-2.0 |
ShanghaiTimes/Audacity2015 | lib-src/portaudio-v19/test/patest_suggested_vs_streaminfo_latency.py | 30 | 5504 | #!/usr/bin/env python
"""
Run and graph the results of patest_suggested_vs_streaminfo_latency.c
Requires matplotlib for plotting: http://matplotlib.sourceforge.net/
"""
import os
from pylab import *
import numpy
from matplotlib.backends.backend_pdf import PdfPages
testExeName = "PATest.exe" # rename to whatever the compiled patest_suggested_vs_streaminfo_latency.c binary is
dataFileName = "patest_suggested_vs_streaminfo_latency.csv" # code below calls the exe to generate this file
inputDeviceIndex = -1 # -1 means default
outputDeviceIndex = -1 # -1 means default
sampleRate = 44100
pdfFilenameSuffix = "_wmme"
pdfFile = PdfPages("patest_suggested_vs_streaminfo_latency_" + str(sampleRate) + pdfFilenameSuffix +".pdf") #output this pdf file
def loadCsvData( dataFileName ):
params= ""
inputDevice = ""
outputDevice = ""
startLines = file(dataFileName).readlines(1024)
for line in startLines:
if "output device" in line:
outputDevice = line.strip(" \t\n\r#")
if "input device" in line:
inputDevice = line.strip(" \t\n\r#")
params = startLines[0].strip(" \t\n\r#")
data = numpy.loadtxt(dataFileName, delimiter=",", skiprows=4).transpose()
class R(object): pass
result = R()
result.params = params
for s in params.split(','):
if "sample rate" in s:
result.sampleRate = s
result.inputDevice = inputDevice
result.outputDevice = outputDevice
result.suggestedLatency = data[0]
result.halfDuplexOutputLatency = data[1]
result.halfDuplexInputLatency = data[2]
result.fullDuplexOutputLatency = data[3]
result.fullDuplexInputLatency = data[4]
return result;
def setFigureTitleAndAxisLabels( framesPerBufferString ):
title("PortAudio suggested (requested) vs. resulting (reported) stream latency\n" + framesPerBufferString)
ylabel("PaStreamInfo::{input,output}Latency (s)")
xlabel("Pa_OpenStream suggestedLatency (s)")
grid(True)
legend(loc="upper left")
def setDisplayRangeSeconds( maxSeconds ):
xlim(0, maxSeconds)
ylim(0, maxSeconds)
# run the test with different frames per buffer values:
compositeTestFramesPerBufferValues = [0]
# powers of two
for i in range (1,11):
compositeTestFramesPerBufferValues.append( pow(2,i) )
# multiples of 50
for i in range (1,20):
compositeTestFramesPerBufferValues.append( i * 50 )
# 10ms buffer sizes
compositeTestFramesPerBufferValues.append( 441 )
compositeTestFramesPerBufferValues.append( 882 )
# large primes
#compositeTestFramesPerBufferValues.append( 39209 )
#compositeTestFramesPerBufferValues.append( 37537 )
#compositeTestFramesPerBufferValues.append( 26437 )
individualPlotFramesPerBufferValues = [0,64,128,256,512] #output separate plots for these
isFirst = True
for framesPerBuffer in compositeTestFramesPerBufferValues:
commandString = testExeName + " " + str(inputDeviceIndex) + " " + str(outputDeviceIndex) + " " + str(sampleRate) + " " + str(framesPerBuffer) + ' > ' + dataFileName
print commandString
os.system(commandString)
d = loadCsvData(dataFileName)
if isFirst:
figure(1) # title sheet
gcf().text(0.1, 0.0,
"patest_suggested_vs_streaminfo_latency\n%s\n%s\n%s\n"%(d.inputDevice,d.outputDevice,d.sampleRate))
pdfFile.savefig()
figure(2) # composite plot, includes all compositeTestFramesPerBufferValues
if isFirst:
plot( d.suggestedLatency, d.suggestedLatency, label="Suggested latency" )
plot( d.suggestedLatency, d.halfDuplexOutputLatency )
plot( d.suggestedLatency, d.halfDuplexInputLatency )
plot( d.suggestedLatency, d.fullDuplexOutputLatency )
plot( d.suggestedLatency, d.fullDuplexInputLatency )
if framesPerBuffer in individualPlotFramesPerBufferValues: # individual plots
figure( 3 + individualPlotFramesPerBufferValues.index(framesPerBuffer) )
plot( d.suggestedLatency, d.suggestedLatency, label="Suggested latency" )
plot( d.suggestedLatency, d.halfDuplexOutputLatency, label="Half-duplex output latency" )
plot( d.suggestedLatency, d.halfDuplexInputLatency, label="Half-duplex input latency" )
plot( d.suggestedLatency, d.fullDuplexOutputLatency, label="Full-duplex output latency" )
plot( d.suggestedLatency, d.fullDuplexInputLatency, label="Full-duplex input latency" )
if framesPerBuffer == 0:
framesPerBufferText = "paFramesPerBufferUnspecified"
else:
framesPerBufferText = str(framesPerBuffer)
setFigureTitleAndAxisLabels( "user frames per buffer: "+str(framesPerBufferText) )
setDisplayRangeSeconds(2.2)
pdfFile.savefig()
setDisplayRangeSeconds(0.1)
setFigureTitleAndAxisLabels( "user frames per buffer: "+str(framesPerBufferText)+" (detail)" )
pdfFile.savefig()
isFirst = False
figure(2)
setFigureTitleAndAxisLabels( "composite of frames per buffer values:\n"+str(compositeTestFramesPerBufferValues) )
setDisplayRangeSeconds(2.2)
pdfFile.savefig()
setDisplayRangeSeconds(0.1)
setFigureTitleAndAxisLabels( "composite of frames per buffer values:\n"+str(compositeTestFramesPerBufferValues)+" (detail)" )
pdfFile.savefig()
pdfFile.close()
#uncomment this to display interactively, otherwise we just output a pdf
#show()
| gpl-2.0 |
AmineEch/BrainCNN | test.py | 1 | 9359 | from __future__ import print_function, division
import matplotlib.pyplot as plt
plt.interactive(False)
import tensorflow as tf
import h5py
from scipy.stats import pearsonr
from keras.models import Sequential
from keras.layers import Convolution2D
from keras.layers import Dense, Dropout, Flatten
from keras.layers.advanced_activations import LeakyReLU
from keras import optimizers, callbacks, regularizers, initializers
from E2E_conv import *
from injury import ConnectomeInjury
import numpy as np
batch_size = 14
dropout = 0.5
momentum = 0.9
lr = 0.01
decay = 0.0005
noise_weight = 0.0625
reg = regularizers.l2(decay)
kernel_init = initializers.he_uniform()
# Model architecture
model = Sequential()
model.add(E2E_conv(2,32,(2,90),kernel_regularizer=reg,input_shape=(90,90,1),input_dtype='float32',data_format="channels_last"))
print("First layer output shape :"+str(model.output_shape))
model.add(LeakyReLU(alpha=0.33))
#print(model.output_shape)
model.add(E2E_conv(2,32,(2,90),kernel_regularizer=reg,data_format="channels_last"))
print(model.output_shape)
model.add(LeakyReLU(alpha=0.33))
model.add(Convolution2D(64,(1,90),kernel_regularizer=reg,data_format="channels_last"))
model.add(LeakyReLU(alpha=0.33))
model.add(Convolution2D(256,(90,1),kernel_regularizer=reg,data_format="channels_last"))
model.add(LeakyReLU(alpha=0.33))
#print(model.output_shape)
model.add(Dropout(0.5))
model.add(Dense(128,kernel_regularizer=reg,kernel_initializer=kernel_init))
#print(model.output_shape)
model.add(LeakyReLU(alpha=0.33))
#print(model.output_shape)
model.add(Dropout(0.5))
model.add(Dense(30,kernel_regularizer=reg,kernel_initializer=kernel_init))
model.add(LeakyReLU(alpha=0.33))
#print(model.output_shape)
model.add(Dropout(0.5))
model.add(Dense(2,kernel_regularizer=reg,kernel_initializer=kernel_init))
model.add(Flatten())
model.add(LeakyReLU(alpha=0.33))
model.summary()
#print(model.output_shape)
opt = optimizers.SGD(momentum=momentum,nesterov=True,lr=lr)
model.compile(optimizer=opt,loss='mean_squared_error',metrics=['mae'])
def get_symmetric_noise(m, n):
"""Return a random noise image of size m x n with values between 0 and 1."""
# Generate random noise image.
noise_img = np.random.rand(m, n)
# Make the noise image symmetric.
noise_img = noise_img + noise_img.T
# Normalize between 0 and 1.
noise_img = (noise_img - noise_img.min()) / (noise_img.max() - noise_img.min())
assert noise_img.max() == 1 # Make sure is between 0 and 1.
assert noise_img.min() == 0
assert (noise_img.T == noise_img).all() # Make sure symmetric.
return noise_img
def simulate_injury(X, weight_A, sig_A, weight_B, sig_B):
denom = (np.ones(X.shape) + (weight_A * sig_A)) * (np.ones(X.shape) + (weight_B * sig_B))
X_sig_AB = np.divide(X, denom)
return X_sig_AB
def apply_injury_and_noise(X, Sig_A, weight_A, Sig_B, weight_B, noise_weight):
"""Returns a symmetric, signed, noisy, adjacency matrix with simulated injury from two sources."""
X_sig_AB = simulate_injury(X, weight_A, Sig_A, weight_B, Sig_B)
# Get the noise image.
noise_img = get_symmetric_noise(X.shape[0], X.shape[1])
# Weight the noise image.
weighted_noise_img = noise_img * noise_weight
# Add the noise to the original image.
X_sig_AB_noise = X_sig_AB + weighted_noise_img
assert (X_sig_AB_noise.T == X_sig_AB_noise).all() # Make sure still is symmetric.
return X_sig_AB_noise
def generate_injury_signatures(X_mn, n_injuries, r_state):
"""Generates the signatures that represent the underlying signal in our synthetic experiments.
d : (integer) the size of the input matrix (assumes is size dxd)
"""
# Get the strongest regions, which we will apply simulated injuries
sig_indexes = [2, 50]
d = X_mn.shape[0]
S = []
# Create a signature for
for idx, sig_idx in enumerate(sig_indexes):
# Okay, let's make some signature noise vectors.
A_vec = r_state.rand((d))
# B_vec = np.random.random((n))
# Create the signature matrix.
A = np.zeros((d, d))
A[:, sig_idx] = A_vec
A[sig_idx, :] = A_vec
S.append(A)
assert (A.T == A).all() # Check if matrix is symmetric.
return np.asarray(S)
def sample_injury_strengths(n_samples, X_mn, A, B, noise_weight):
"""Returns n_samples connectomes with simulated injury from two sources."""
mult_factor = 10
n_classes = 2
# Range of values to predict.
n_start = 0.5
n_end = 1.4
# amt_increase = 0.1
# These will be our Y.
A_weights = np.random.uniform(n_start, n_end, [n_samples])
B_weights = np.random.uniform(n_start, n_end, [n_samples])
X_h5 = np.zeros((n_samples, 1, X_mn.shape[0], X_mn.shape[1]), dtype=np.float32)
Y_h5 = np.zeros((n_samples, n_classes), dtype=np.float32)
for idx in range(n_samples):
w_A = A_weights[idx]
w_B = B_weights[idx]
# Get the matrix.
X_sig = apply_injury_and_noise(X_mn, A, w_A * mult_factor, B, w_B * mult_factor, noise_weight)
# Normalize.
X_sig = (X_sig - X_sig.min()) / (X_sig.max() - X_sig.min())
# Put in h5 format.
X_h5[idx, 0, :, :] = X_sig
Y_h5[idx, :] = [w_A, w_B]
return X_h5, Y_h5
def load_base_connectome():
X_mn = scipy.io.loadmat("data/base.mat")
X_mn = X_mn['X_mn']
return X_mn
def get_symmetric_noise(m, n):
"""Return a random noise image of size m x n with values between 0 and 1."""
# Generate random noise image.
noise_img = np.random.rand(m, n)
# Make the noise image symmetric.
noise_img = noise_img + noise_img.T
# Normalize between 0 and 1.
noise_img = (noise_img - noise_img.min()) / (noise_img.max() - noise_img.min())
assert noise_img.max() == 1 # Make sure is between 0 and 1.
assert noise_img.min() == 0
assert (noise_img.T == noise_img).all() # Make sure symmetric.
return noise_img
def simulate_injury(X, weight_A, sig_A, weight_B, sig_B):
denom = (np.ones(X.shape) + (weight_A * sig_A)) * (np.ones(X.shape) + (weight_B * sig_B))
X_sig_AB = np.divide(X, denom)
return X_sig_AB
def apply_injury_and_noise(X, Sig_A, weight_A, Sig_B, weight_B, noise_weight):
"""Returns a symmetric, signed, noisy, adjacency matrix with simulated injury from two sources."""
X_sig_AB = simulate_injury(X, weight_A, Sig_A, weight_B, Sig_B)
# Get the noise image.
noise_img = get_symmetric_noise(X.shape[0], X.shape[1])
# Weight the noise image.
weighted_noise_img = noise_img * noise_weight
# Add the noise to the original image.
X_sig_AB_noise = X_sig_AB + weighted_noise_img
assert (X_sig_AB_noise.T == X_sig_AB_noise).all() # Make sure still is symmetric.
return X_sig_AB_noise
def generate_injury_signatures(X_mn, n_injuries, r_state):
"""Generates the signatures that represent the underlying signal in our synthetic experiments.
d : (integer) the size of the input matrix (assumes is size dxd)
"""
# Get the strongest regions, which we will apply simulated injuries
sig_indexes = [2,50]
d = X_mn.shape[0]
S = []
# Create a signature for
for idx, sig_idx in enumerate(sig_indexes):
# Okay, let's make some signature noise vectors.
A_vec = r_state.rand((d))
# B_vec = np.random.random((n))
# Create the signature matrix.
A = np.zeros((d, d))
A[:, sig_idx] = A_vec
A[sig_idx, :] = A_vec
S.append(A)
assert (A.T == A).all() # Check if matrix is symmetric.
return np.asarray(S)
def sample_injury_strengths(n_samples, X_mn, A, B, noise_weight):
"""Returns n_samples connectomes with simulated injury from two sources."""
mult_factor = 10
n_classes = 2
# Range of values to predict.
n_start = 0.5
n_end = 1.4
# amt_increase = 0.1
# These will be our Y.
A_weights = np.random.uniform(n_start, n_end, [n_samples])
B_weights = np.random.uniform(n_start, n_end, [n_samples])
X_h5 = np.zeros((n_samples, 1, X_mn.shape[0], X_mn.shape[1]), dtype=np.float32)
Y_h5 = np.zeros((n_samples, n_classes), dtype=np.float32)
for idx in range(n_samples):
w_A = A_weights[idx]
w_B = B_weights[idx]
# Get the matrix.
X_sig = apply_injury_and_noise(X_mn, A, w_A * mult_factor, B, w_B * mult_factor, noise_weight)
# Normalize.
X_sig = (X_sig - X_sig.min()) / (X_sig.max() - X_sig.min())
# Put in h5 format.
X_h5[idx, 0, :, :] = X_sig
Y_h5[idx, :] = [w_A, w_B]
return X_h5, Y_h5
import numpy as np
import scipy
r_state = np.random.RandomState(41)
X_mn = load_base_connectome()
S = generate_injury_signatures(X_mn=X_mn,n_injuries=2,r_state=r_state)
X,Y = sample_injury_strengths(1000,X_mn,S[0],S[1],noise_weight)
print(X.shape)
print(Y.shape)
def load_base_connectome():
X_mn = scipy.io.loadmat("data/base.mat")
X_mn = X_mn['X_mn']
return X_mn
X = X.reshape(X.shape[0],X.shape[3],X.shape[2],X.shape[1])
model.fit(X,Y,nb_epoch=1000,verbose=1)
model.save_weights("Weights/BrainCNNWeights_Visualization.h5") | mit |
ramansbach/cluster_analysis | clustering/scripts/corrdim_timing_het.py | 1 | 2601 | # -*- coding: utf-8 -*-
"""
Created on Mon Oct 16 08:18:15 2017
@author: rachael
Compute the correlation integral over the COMs of peptides.
"""
from __future__ import absolute_import, division, print_function
from time import time
import clustering as cl
import gsd.hoomd
import os.path as op
import numpy as np
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
#compute corrdims for final frame for each run
import pdb
save_path=SSS
data_path=save_path
#Matlab setup
plt.ioff()
font = {'weight' : 'bold',
'size' : 22}
matplotlib.rc('font', **font)
runs = 5
ats = {'contact':17,'optical':12}
#molno = 4
molnolabel = 10000
AAdlabel = AAA
SCdlabel = SCSCSC
BBdlabel = BBBB
dt = 1.0
emax = 294 #maximum length scale to compute correlation integral on
estep = 0.147 #distance steps to compute correlation integral at
tstart = 10 #timestep where to begin
tmax = 999 #final timestep at which to compute correlation integral
tskip = 100 #compute correlation integral at every 100 timesteps
combeadtypes = ['EA','EB']
markers = ['o','x','^','v','s']
fbase = 'mols'+str(molnolabel)+'_' + str(AAdlabel)+'-02-'\
+str(SCdlabel)+'-150-'+str(BBdlabel)+'_small_run'
framets = range(tstart,tmax,tskip)
fnames = []
for i in range(runs):
fname = op.join(data_path,fbase + str(i+1) + '.gsd')
fnames.append(fname)
start = time()
cemats = np.zeros([int(emax/estep),1+runs])
corrfig = plt.figure()
corrax = corrfig.add_subplot(111)
for t in framets:
for runi in range(runs):
#pdb.set_trace()
traj = gsd.hoomd.open(fnames[runi])
finalFrame = traj[t]
ind = []
for combeadtype in combeadtypes:
tind = finalFrame.particles.types.index(combeadtype)
ind += list(np.where(finalFrame.particles.typeid==tind)[0])
comlist = finalFrame.particles.position[ind]
cemat = cl.corrcalc(comlist,emax,estep)
corrax.plot(np.log(cemat[0,:]),np.log(cemat[1,:]),markers[runi])
cemats[:,0] = cemat[0,:]
cemats[:,runi+1] = cemat[1,:]
corrax.grid('on')
corrax.set_xlabel(r'$\log(\epsilon/\epsilon_0)$ $(d^*)$')
corrax.set_ylabel(r'$ \log(C(\epsilon))$')
corrfig.savefig(op.join(save_path,fbase+'-corrcalc'+str(t)),
bbox_inches='tight')
corrfi = open(op.join(save_path,fbase+'-corrcalc'+str(t)+'.dat'),'w')
for e in range(np.shape(cemats)[0]):
for runi in range(np.shape(cemats)[1]):
corrfi.write('{0} '.format(cemats[e,runi]))
corrfi.write('\n')
corrfi.close()
end = time()
print("Time to compute correlation integral: ",end-start)
| mit |
ltiao/scikit-learn | sklearn/decomposition/tests/test_pca.py | 21 | 11810 | import numpy as np
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_greater
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_no_warnings
from sklearn import datasets
from sklearn.decomposition import PCA
from sklearn.decomposition import RandomizedPCA
from sklearn.decomposition.pca import _assess_dimension_
from sklearn.decomposition.pca import _infer_dimension_
iris = datasets.load_iris()
def test_pca():
# PCA on dense arrays
pca = PCA(n_components=2)
X = iris.data
X_r = pca.fit(X).transform(X)
np.testing.assert_equal(X_r.shape[1], 2)
X_r2 = pca.fit_transform(X)
assert_array_almost_equal(X_r, X_r2)
pca = PCA()
pca.fit(X)
assert_almost_equal(pca.explained_variance_ratio_.sum(), 1.0, 3)
X_r = pca.transform(X)
X_r2 = pca.fit_transform(X)
assert_array_almost_equal(X_r, X_r2)
# Test get_covariance and get_precision with n_components == n_features
# with n_components < n_features and with n_components == 0
for n_components in [0, 2, X.shape[1]]:
pca.n_components = n_components
pca.fit(X)
cov = pca.get_covariance()
precision = pca.get_precision()
assert_array_almost_equal(np.dot(cov, precision),
np.eye(X.shape[1]), 12)
def test_no_empty_slice_warning():
# test if we avoid numpy warnings for computing over empty arrays
n_components = 10
n_features = n_components + 2 # anything > n_comps triggerred it in 0.16
X = np.random.uniform(-1, 1, size=(n_components, n_features))
pca = PCA(n_components=n_components)
assert_no_warnings(pca.fit, X)
def test_whitening():
# Check that PCA output has unit-variance
rng = np.random.RandomState(0)
n_samples = 100
n_features = 80
n_components = 30
rank = 50
# some low rank data with correlated features
X = np.dot(rng.randn(n_samples, rank),
np.dot(np.diag(np.linspace(10.0, 1.0, rank)),
rng.randn(rank, n_features)))
# the component-wise variance of the first 50 features is 3 times the
# mean component-wise variance of the remaingin 30 features
X[:, :50] *= 3
assert_equal(X.shape, (n_samples, n_features))
# the component-wise variance is thus highly varying:
assert_almost_equal(X.std(axis=0).std(), 43.9, 1)
for this_PCA, copy in [(x, y) for x in (PCA, RandomizedPCA)
for y in (True, False)]:
# whiten the data while projecting to the lower dim subspace
X_ = X.copy() # make sure we keep an original across iterations.
pca = this_PCA(n_components=n_components, whiten=True, copy=copy)
if hasattr(pca, 'random_state'):
pca.random_state = rng
# test fit_transform
X_whitened = pca.fit_transform(X_.copy())
assert_equal(X_whitened.shape, (n_samples, n_components))
X_whitened2 = pca.transform(X_)
assert_array_almost_equal(X_whitened, X_whitened2)
assert_almost_equal(X_whitened.std(axis=0), np.ones(n_components),
decimal=4)
assert_almost_equal(X_whitened.mean(axis=0), np.zeros(n_components))
X_ = X.copy()
pca = this_PCA(n_components=n_components, whiten=False,
copy=copy).fit(X_)
X_unwhitened = pca.transform(X_)
assert_equal(X_unwhitened.shape, (n_samples, n_components))
# in that case the output components still have varying variances
assert_almost_equal(X_unwhitened.std(axis=0).std(), 74.1, 1)
# we always center, so no test for non-centering.
def test_explained_variance():
# Check that PCA output has unit-variance
rng = np.random.RandomState(0)
n_samples = 100
n_features = 80
X = rng.randn(n_samples, n_features)
pca = PCA(n_components=2).fit(X)
rpca = RandomizedPCA(n_components=2, random_state=rng).fit(X)
assert_array_almost_equal(pca.explained_variance_ratio_,
rpca.explained_variance_ratio_, 1)
# compare to empirical variances
X_pca = pca.transform(X)
assert_array_almost_equal(pca.explained_variance_,
np.var(X_pca, axis=0))
X_rpca = rpca.transform(X)
assert_array_almost_equal(rpca.explained_variance_, np.var(X_rpca, axis=0),
decimal=1)
# Same with correlated data
X = datasets.make_classification(n_samples, n_features,
n_informative=n_features-2,
random_state=rng)[0]
pca = PCA(n_components=2).fit(X)
rpca = RandomizedPCA(n_components=2, random_state=rng).fit(X)
assert_array_almost_equal(pca.explained_variance_ratio_,
rpca.explained_variance_ratio_, 5)
def test_pca_check_projection():
# Test that the projection of data is correct
rng = np.random.RandomState(0)
n, p = 100, 3
X = rng.randn(n, p) * .1
X[:10] += np.array([3, 4, 5])
Xt = 0.1 * rng.randn(1, p) + np.array([3, 4, 5])
Yt = PCA(n_components=2).fit(X).transform(Xt)
Yt /= np.sqrt((Yt ** 2).sum())
assert_almost_equal(np.abs(Yt[0][0]), 1., 1)
def test_pca_inverse():
# Test that the projection of data can be inverted
rng = np.random.RandomState(0)
n, p = 50, 3
X = rng.randn(n, p) # spherical data
X[:, 1] *= .00001 # make middle component relatively small
X += [5, 4, 3] # make a large mean
# same check that we can find the original data from the transformed
# signal (since the data is almost of rank n_components)
pca = PCA(n_components=2).fit(X)
Y = pca.transform(X)
Y_inverse = pca.inverse_transform(Y)
assert_almost_equal(X, Y_inverse, decimal=3)
# same as above with whitening (approximate reconstruction)
pca = PCA(n_components=2, whiten=True)
pca.fit(X)
Y = pca.transform(X)
Y_inverse = pca.inverse_transform(Y)
assert_almost_equal(X, Y_inverse, decimal=3)
def test_pca_validation():
X = [[0, 1], [1, 0]]
for n_components in [-1, 3]:
assert_raises(ValueError, PCA(n_components).fit, X)
def test_randomized_pca_check_projection():
# Test that the projection by RandomizedPCA on dense data is correct
rng = np.random.RandomState(0)
n, p = 100, 3
X = rng.randn(n, p) * .1
X[:10] += np.array([3, 4, 5])
Xt = 0.1 * rng.randn(1, p) + np.array([3, 4, 5])
Yt = RandomizedPCA(n_components=2, random_state=0).fit(X).transform(Xt)
Yt /= np.sqrt((Yt ** 2).sum())
assert_almost_equal(np.abs(Yt[0][0]), 1., 1)
def test_randomized_pca_check_list():
# Test that the projection by RandomizedPCA on list data is correct
X = [[1.0, 0.0], [0.0, 1.0]]
X_transformed = RandomizedPCA(n_components=1,
random_state=0).fit(X).transform(X)
assert_equal(X_transformed.shape, (2, 1))
assert_almost_equal(X_transformed.mean(), 0.00, 2)
assert_almost_equal(X_transformed.std(), 0.71, 2)
def test_randomized_pca_inverse():
# Test that RandomizedPCA is inversible on dense data
rng = np.random.RandomState(0)
n, p = 50, 3
X = rng.randn(n, p) # spherical data
X[:, 1] *= .00001 # make middle component relatively small
X += [5, 4, 3] # make a large mean
# same check that we can find the original data from the transformed signal
# (since the data is almost of rank n_components)
pca = RandomizedPCA(n_components=2, random_state=0).fit(X)
Y = pca.transform(X)
Y_inverse = pca.inverse_transform(Y)
assert_almost_equal(X, Y_inverse, decimal=2)
# same as above with whitening (approximate reconstruction)
pca = RandomizedPCA(n_components=2, whiten=True,
random_state=0).fit(X)
Y = pca.transform(X)
Y_inverse = pca.inverse_transform(Y)
relative_max_delta = (np.abs(X - Y_inverse) / np.abs(X).mean()).max()
assert_almost_equal(relative_max_delta, 0.11, decimal=2)
def test_pca_dim():
# Check automated dimensionality setting
rng = np.random.RandomState(0)
n, p = 100, 5
X = rng.randn(n, p) * .1
X[:10] += np.array([3, 4, 5, 1, 2])
pca = PCA(n_components='mle').fit(X)
assert_equal(pca.n_components, 'mle')
assert_equal(pca.n_components_, 1)
def test_infer_dim_1():
# TODO: explain what this is testing
# Or at least use explicit variable names...
n, p = 1000, 5
rng = np.random.RandomState(0)
X = (rng.randn(n, p) * .1 + rng.randn(n, 1) * np.array([3, 4, 5, 1, 2])
+ np.array([1, 0, 7, 4, 6]))
pca = PCA(n_components=p)
pca.fit(X)
spect = pca.explained_variance_
ll = []
for k in range(p):
ll.append(_assess_dimension_(spect, k, n, p))
ll = np.array(ll)
assert_greater(ll[1], ll.max() - .01 * n)
def test_infer_dim_2():
# TODO: explain what this is testing
# Or at least use explicit variable names...
n, p = 1000, 5
rng = np.random.RandomState(0)
X = rng.randn(n, p) * .1
X[:10] += np.array([3, 4, 5, 1, 2])
X[10:20] += np.array([6, 0, 7, 2, -1])
pca = PCA(n_components=p)
pca.fit(X)
spect = pca.explained_variance_
assert_greater(_infer_dimension_(spect, n, p), 1)
def test_infer_dim_3():
n, p = 100, 5
rng = np.random.RandomState(0)
X = rng.randn(n, p) * .1
X[:10] += np.array([3, 4, 5, 1, 2])
X[10:20] += np.array([6, 0, 7, 2, -1])
X[30:40] += 2 * np.array([-1, 1, -1, 1, -1])
pca = PCA(n_components=p)
pca.fit(X)
spect = pca.explained_variance_
assert_greater(_infer_dimension_(spect, n, p), 2)
def test_infer_dim_by_explained_variance():
X = iris.data
pca = PCA(n_components=0.95)
pca.fit(X)
assert_equal(pca.n_components, 0.95)
assert_equal(pca.n_components_, 2)
pca = PCA(n_components=0.01)
pca.fit(X)
assert_equal(pca.n_components, 0.01)
assert_equal(pca.n_components_, 1)
rng = np.random.RandomState(0)
# more features than samples
X = rng.rand(5, 20)
pca = PCA(n_components=.5).fit(X)
assert_equal(pca.n_components, 0.5)
assert_equal(pca.n_components_, 2)
def test_pca_score():
# Test that probabilistic PCA scoring yields a reasonable score
n, p = 1000, 3
rng = np.random.RandomState(0)
X = rng.randn(n, p) * .1 + np.array([3, 4, 5])
pca = PCA(n_components=2)
pca.fit(X)
ll1 = pca.score(X)
h = -0.5 * np.log(2 * np.pi * np.exp(1) * 0.1 ** 2) * p
np.testing.assert_almost_equal(ll1 / h, 1, 0)
def test_pca_score2():
# Test that probabilistic PCA correctly separated different datasets
n, p = 100, 3
rng = np.random.RandomState(0)
X = rng.randn(n, p) * .1 + np.array([3, 4, 5])
pca = PCA(n_components=2)
pca.fit(X)
ll1 = pca.score(X)
ll2 = pca.score(rng.randn(n, p) * .2 + np.array([3, 4, 5]))
assert_greater(ll1, ll2)
# Test that it gives the same scores if whiten=True
pca = PCA(n_components=2, whiten=True)
pca.fit(X)
ll2 = pca.score(X)
assert_almost_equal(ll1, ll2)
def test_pca_score3():
# Check that probabilistic PCA selects the right model
n, p = 200, 3
rng = np.random.RandomState(0)
Xl = (rng.randn(n, p) + rng.randn(n, 1) * np.array([3, 4, 5])
+ np.array([1, 0, 7]))
Xt = (rng.randn(n, p) + rng.randn(n, 1) * np.array([3, 4, 5])
+ np.array([1, 0, 7]))
ll = np.zeros(p)
for k in range(p):
pca = PCA(n_components=k)
pca.fit(Xl)
ll[k] = pca.score(Xt)
assert_true(ll.argmax() == 1)
| bsd-3-clause |
jacobmarks/QTop | src/rg_thresh.py | 1 | 1246 | #
# QTop
#
# Copyright (c) 2016 Jacob Marks ([email protected])
#
# This file is part of QTop.
#
# QTop is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
import matplotlib.pyplot as plt
import numpy as np
from scipy.optimize import curve_fit
L = np.array([2,3,8,13,25,50])
thresh = np.array([.12,.157,.204,.213,.223,.227])
plt.plot(L, thresh, '.', label="Empirical Data")
# plt.plot(L, thresh)
def func(x, a, b, c):
return a - float(b)/(c + x)
# def func(x, a, b, c):
# return a * np.exp(-b * x) + c
# def func(x, a, b, c):
# return float(a) /(1 + np.exp(-b* (x - c)))
# def func(x, a, b, c):
# return a * ( 1 - np.exp(-b * x)) + c
xs = np.linspace(2,60,100)
popt, pcov = curve_fit(func, L, thresh)
plt.plot(xs, func(xs, *popt), label="Fitted Curve")
ys = [popt[0]] * 100
thr = round(popt[0],3)
plt.plot(xs, ys, 'r--', label="Plateau at " + str(thr))
title = "Threshold vs Qudit dimension"
plt.title(str(title))
plt.xlabel("Qudit dimension d")
plt.ylabel("Threshold")
plt.legend(loc=4)
plt.savefig('../plots/rg_thresh.png')
plt.show()
| gpl-3.0 |
khkaminska/scikit-learn | examples/cluster/plot_segmentation_toy.py | 258 | 3336 | """
===========================================
Spectral clustering for image segmentation
===========================================
In this example, an image with connected circles is generated and
spectral clustering is used to separate the circles.
In these settings, the :ref:`spectral_clustering` approach solves the problem
know as 'normalized graph cuts': the image is seen as a graph of
connected voxels, and the spectral clustering algorithm amounts to
choosing graph cuts defining regions while minimizing the ratio of the
gradient along the cut, and the volume of the region.
As the algorithm tries to balance the volume (ie balance the region
sizes), if we take circles with different sizes, the segmentation fails.
In addition, as there is no useful information in the intensity of the image,
or its gradient, we choose to perform the spectral clustering on a graph
that is only weakly informed by the gradient. This is close to performing
a Voronoi partition of the graph.
In addition, we use the mask of the objects to restrict the graph to the
outline of the objects. In this example, we are interested in
separating the objects one from the other, and not from the background.
"""
print(__doc__)
# Authors: Emmanuelle Gouillart <[email protected]>
# Gael Varoquaux <[email protected]>
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
from sklearn.feature_extraction import image
from sklearn.cluster import spectral_clustering
###############################################################################
l = 100
x, y = np.indices((l, l))
center1 = (28, 24)
center2 = (40, 50)
center3 = (67, 58)
center4 = (24, 70)
radius1, radius2, radius3, radius4 = 16, 14, 15, 14
circle1 = (x - center1[0]) ** 2 + (y - center1[1]) ** 2 < radius1 ** 2
circle2 = (x - center2[0]) ** 2 + (y - center2[1]) ** 2 < radius2 ** 2
circle3 = (x - center3[0]) ** 2 + (y - center3[1]) ** 2 < radius3 ** 2
circle4 = (x - center4[0]) ** 2 + (y - center4[1]) ** 2 < radius4 ** 2
###############################################################################
# 4 circles
img = circle1 + circle2 + circle3 + circle4
mask = img.astype(bool)
img = img.astype(float)
img += 1 + 0.2 * np.random.randn(*img.shape)
# Convert the image into a graph with the value of the gradient on the
# edges.
graph = image.img_to_graph(img, mask=mask)
# Take a decreasing function of the gradient: we take it weakly
# dependent from the gradient the segmentation is close to a voronoi
graph.data = np.exp(-graph.data / graph.data.std())
# Force the solver to be arpack, since amg is numerically
# unstable on this example
labels = spectral_clustering(graph, n_clusters=4, eigen_solver='arpack')
label_im = -np.ones(mask.shape)
label_im[mask] = labels
plt.matshow(img)
plt.matshow(label_im)
###############################################################################
# 2 circles
img = circle1 + circle2
mask = img.astype(bool)
img = img.astype(float)
img += 1 + 0.2 * np.random.randn(*img.shape)
graph = image.img_to_graph(img, mask=mask)
graph.data = np.exp(-graph.data / graph.data.std())
labels = spectral_clustering(graph, n_clusters=2, eigen_solver='arpack')
label_im = -np.ones(mask.shape)
label_im[mask] = labels
plt.matshow(img)
plt.matshow(label_im)
plt.show()
| bsd-3-clause |
ben-hopps/nupic | external/linux32/lib/python2.6/site-packages/matplotlib/pylab.py | 70 | 10245 | """
This is a procedural interface to the matplotlib object-oriented
plotting library.
The following plotting commands are provided; the majority have
Matlab(TM) analogs and similar argument.
_Plotting commands
acorr - plot the autocorrelation function
annotate - annotate something in the figure
arrow - add an arrow to the axes
axes - Create a new axes
axhline - draw a horizontal line across axes
axvline - draw a vertical line across axes
axhspan - draw a horizontal bar across axes
axvspan - draw a vertical bar across axes
axis - Set or return the current axis limits
bar - make a bar chart
barh - a horizontal bar chart
broken_barh - a set of horizontal bars with gaps
box - set the axes frame on/off state
boxplot - make a box and whisker plot
cla - clear current axes
clabel - label a contour plot
clf - clear a figure window
clim - adjust the color limits of the current image
close - close a figure window
colorbar - add a colorbar to the current figure
cohere - make a plot of coherence
contour - make a contour plot
contourf - make a filled contour plot
csd - make a plot of cross spectral density
delaxes - delete an axes from the current figure
draw - Force a redraw of the current figure
errorbar - make an errorbar graph
figlegend - make legend on the figure rather than the axes
figimage - make a figure image
figtext - add text in figure coords
figure - create or change active figure
fill - make filled polygons
findobj - recursively find all objects matching some criteria
gca - return the current axes
gcf - return the current figure
gci - get the current image, or None
getp - get a handle graphics property
grid - set whether gridding is on
hist - make a histogram
hold - set the axes hold state
ioff - turn interaction mode off
ion - turn interaction mode on
isinteractive - return True if interaction mode is on
imread - load image file into array
imshow - plot image data
ishold - return the hold state of the current axes
legend - make an axes legend
loglog - a log log plot
matshow - display a matrix in a new figure preserving aspect
pcolor - make a pseudocolor plot
pcolormesh - make a pseudocolor plot using a quadrilateral mesh
pie - make a pie chart
plot - make a line plot
plot_date - plot dates
plotfile - plot column data from an ASCII tab/space/comma delimited file
pie - pie charts
polar - make a polar plot on a PolarAxes
psd - make a plot of power spectral density
quiver - make a direction field (arrows) plot
rc - control the default params
rgrids - customize the radial grids and labels for polar
savefig - save the current figure
scatter - make a scatter plot
setp - set a handle graphics property
semilogx - log x axis
semilogy - log y axis
show - show the figures
specgram - a spectrogram plot
spy - plot sparsity pattern using markers or image
stem - make a stem plot
subplot - make a subplot (numrows, numcols, axesnum)
subplots_adjust - change the params controlling the subplot positions of current figure
subplot_tool - launch the subplot configuration tool
suptitle - add a figure title
table - add a table to the plot
text - add some text at location x,y to the current axes
thetagrids - customize the radial theta grids and labels for polar
title - add a title to the current axes
xcorr - plot the autocorrelation function of x and y
xlim - set/get the xlimits
ylim - set/get the ylimits
xticks - set/get the xticks
yticks - set/get the yticks
xlabel - add an xlabel to the current axes
ylabel - add a ylabel to the current axes
autumn - set the default colormap to autumn
bone - set the default colormap to bone
cool - set the default colormap to cool
copper - set the default colormap to copper
flag - set the default colormap to flag
gray - set the default colormap to gray
hot - set the default colormap to hot
hsv - set the default colormap to hsv
jet - set the default colormap to jet
pink - set the default colormap to pink
prism - set the default colormap to prism
spring - set the default colormap to spring
summer - set the default colormap to summer
winter - set the default colormap to winter
spectral - set the default colormap to spectral
_Event handling
connect - register an event handler
disconnect - remove a connected event handler
_Matrix commands
cumprod - the cumulative product along a dimension
cumsum - the cumulative sum along a dimension
detrend - remove the mean or besdt fit line from an array
diag - the k-th diagonal of matrix
diff - the n-th differnce of an array
eig - the eigenvalues and eigen vectors of v
eye - a matrix where the k-th diagonal is ones, else zero
find - return the indices where a condition is nonzero
fliplr - flip the rows of a matrix up/down
flipud - flip the columns of a matrix left/right
linspace - a linear spaced vector of N values from min to max inclusive
logspace - a log spaced vector of N values from min to max inclusive
meshgrid - repeat x and y to make regular matrices
ones - an array of ones
rand - an array from the uniform distribution [0,1]
randn - an array from the normal distribution
rot90 - rotate matrix k*90 degress counterclockwise
squeeze - squeeze an array removing any dimensions of length 1
tri - a triangular matrix
tril - a lower triangular matrix
triu - an upper triangular matrix
vander - the Vandermonde matrix of vector x
svd - singular value decomposition
zeros - a matrix of zeros
_Probability
levypdf - The levy probability density function from the char. func.
normpdf - The Gaussian probability density function
rand - random numbers from the uniform distribution
randn - random numbers from the normal distribution
_Statistics
corrcoef - correlation coefficient
cov - covariance matrix
amax - the maximum along dimension m
mean - the mean along dimension m
median - the median along dimension m
amin - the minimum along dimension m
norm - the norm of vector x
prod - the product along dimension m
ptp - the max-min along dimension m
std - the standard deviation along dimension m
asum - the sum along dimension m
_Time series analysis
bartlett - M-point Bartlett window
blackman - M-point Blackman window
cohere - the coherence using average periodiogram
csd - the cross spectral density using average periodiogram
fft - the fast Fourier transform of vector x
hamming - M-point Hamming window
hanning - M-point Hanning window
hist - compute the histogram of x
kaiser - M length Kaiser window
psd - the power spectral density using average periodiogram
sinc - the sinc function of array x
_Dates
date2num - convert python datetimes to numeric representation
drange - create an array of numbers for date plots
num2date - convert numeric type (float days since 0001) to datetime
_Other
angle - the angle of a complex array
griddata - interpolate irregularly distributed data to a regular grid
load - load ASCII data into array
polyfit - fit x, y to an n-th order polynomial
polyval - evaluate an n-th order polynomial
roots - the roots of the polynomial coefficients in p
save - save an array to an ASCII file
trapz - trapezoidal integration
__end
"""
import sys, warnings
from cbook import flatten, is_string_like, exception_to_str, popd, \
silent_list, iterable, dedent
import numpy as np
from numpy import ma
from matplotlib import mpl # pulls in most modules
from matplotlib.dates import date2num, num2date,\
datestr2num, strpdate2num, drange,\
epoch2num, num2epoch, mx2num,\
DateFormatter, IndexDateFormatter, DateLocator,\
RRuleLocator, YearLocator, MonthLocator, WeekdayLocator,\
DayLocator, HourLocator, MinuteLocator, SecondLocator,\
rrule, MO, TU, WE, TH, FR, SA, SU, YEARLY, MONTHLY,\
WEEKLY, DAILY, HOURLY, MINUTELY, SECONDLY, relativedelta
import matplotlib.dates
# bring all the symbols in so folks can import them from
# pylab in one fell swoop
from matplotlib.mlab import window_hanning, window_none,\
conv, detrend, detrend_mean, detrend_none, detrend_linear,\
polyfit, polyval, entropy, normpdf, griddata,\
levypdf, find, trapz, prepca, rem, norm, orth, rank,\
sqrtm, prctile, center_matrix, rk4, exp_safe, amap,\
sum_flat, mean_flat, rms_flat, l1norm, l2norm, norm, frange,\
diagonal_matrix, base_repr, binary_repr, log2, ispower2,\
bivariate_normal, load, save
from matplotlib.mlab import stineman_interp, slopes, \
stineman_interp, inside_poly, poly_below, poly_between, \
is_closed_polygon, path_length, distances_along_curve, vector_lengths
from numpy import *
from numpy.fft import *
from numpy.random import *
from numpy.linalg import *
from matplotlib.mlab import window_hanning, window_none, conv, detrend, demean, \
detrend_mean, detrend_none, detrend_linear, entropy, normpdf, levypdf, \
find, longest_contiguous_ones, longest_ones, prepca, prctile, prctile_rank, \
center_matrix, rk4, bivariate_normal, get_xyz_where, get_sparse_matrix, dist, \
dist_point_to_segment, segments_intersect, fftsurr, liaupunov, movavg, \
save, load, exp_safe, \
amap, rms_flat, l1norm, l2norm, norm_flat, frange, diagonal_matrix, identity, \
base_repr, binary_repr, log2, ispower2, fromfunction_kw, rem, norm, orth, rank, sqrtm,\
mfuncC, approx_real, rec_append_field, rec_drop_fields, rec_join, csv2rec, rec2csv, isvector
from matplotlib.pyplot import *
# provide the recommended module abbrevs in the pylab namespace
import matplotlib.pyplot as plt
import numpy as np
| agpl-3.0 |
aemerick/galaxy_analysis | method_paper_plots/metal_retention.py | 1 | 2439 | from galaxy_analysis.plot.plot_styles import *
from galaxy_analysis.utilities import utilities
#----------------------------------------------
import matplotlib.pyplot as plt
import numpy as np
import glob as glob
import deepdish as dd
TMAX = 500.0
line_width = 3.0
# would be nice to start making gather functions
# for all of these plot functions to not have to
# do any more looping over ALL data sets to gather
# wdir = '/mnt/ceph/users/emerick/enzo_runs/pleiades/starIC/run11_30km/'
def plot_metal_retention(workdir = './', outdir = './'):
labels = {'Halo' : 'CGM' , 'Disk' : 'Disk', 'Outside Halo' : 'Outside Halo'}
lstyle = {'Halo' : '-', 'Disk' : '--', 'Outside Halo' : ':'}
gather_keys = {'Disk' : ['gas_meta_data', 'masses', 'Disk', 'Total Tracked Metals'],
'Halo' : ['gas_meta_data', 'masses', 'Halo', 'Total Tracked Metals'],
'FB' : ['gas_meta_data', 'masses', 'FullBox', 'Total Tracked Metals'],
'Outside Box' : ['gas_meta_data', 'masses', 'OutsideBox', 'Total Tracked Metals']}
all_data = {}
data_list, times = utilities.select_data_by_time(dir = workdir,
tmin=0.0,tmax= 650.0)
all_data['times'] = times
for k in gather_keys.keys():
all_data[k] = utilities.extract_nested_dict_asarray(None, gather_keys[k], data_list, False)
fig, ax = plt.subplots()
fig.set_size_inches(8,8)
total = all_data['FB'] + all_data['Outside Box']
disk_frac = all_data['Disk'] / total
halo_frac = all_data['Halo'] / total
outside_halo_frac = (all_data['FB'] - all_data['Halo'] - all_data['Disk'] + all_data['Outside Box']) / total
t = all_data['times'] - all_data['times'][0]
ax.plot(t, halo_frac, lw = line_width, ls = lstyle['Halo'], color = 'black', label = labels['Halo'])
ax.plot(t, disk_frac, lw = line_width, ls = lstyle['Disk'], color = 'black', label = labels['Disk'])
ax.plot(t, outside_halo_frac, lw = line_width, ls = lstyle['Outside Halo'], color = 'black', label = labels['Outside Halo'])
ax.set_xlabel(r'Time (Myr)')
ax.set_ylabel(r'Fraction of Metals')
ax.set_xlim(0.0, TMAX)
ax.set_ylim(0.0, 1.0)
ax.legend(loc = 'best')
plt.minorticks_on()
plt.tight_layout()
fig.savefig(outdir + 'metal_retention.png')
plt.close()
return
if __name__ == "__main__":
plot_metal_retention()
| mit |
blab/stability | augur/src/H1N1pdm_process.py | 1 | 17813 | import matplotlib as mpl
mpl.use('pdf')
import time, re, os
from virus_filter import flu_filter, fix_name
from virus_clean import virus_clean
from tree_refine import tree_refine
from tree_titer import HI_tree
from fitness_model import fitness_model
from H3N2_process import H3N2_refine as H1N1pdm_refine
from process import process, virus_config
from Bio import SeqIO
from Bio.Seq import Seq
from Bio.Align import MultipleSeqAlignment
import numpy as np
from itertools import izip
# HA2 AA sites are shifted by +327 relative to HA1
# So HA2:174E is 501E in HA1 numbering
# numbering starting at methionine including the signal peptide
sp = 17
epitope_mask = np.array(['1' if pos in [141,142,145,146,172,176,178,179,180,181,183,184,185, #Sa
170,173,174,177,206,207,210,211,212,214,216, #Sb
183,187,191,196,221,225,254,258,288, #Ca1
154,157,158,159,161,163,238,239,242,243, #Ca2
87, 88, 90, 91, 92, 95, 96, 98, 99, 100, 132, 139 #Cb
]
else '0' for pos in xrange(1,1725)])
receptor_binding_sites = [x-1 for x in [159,169,170,172,173,203,207]]
virus_config.update({
# data source and sequence parsing/cleaning/processing
'virus':'H1N1pdm',
'alignment_file':'data/H1N1pdm_gisaid_epiflu_sequence.fasta',
'outgroup':'A/Swine/Indiana/P12439/00',
'force_include':'data/H1N1pdm_HI_strains.txt',
'force_include_all':False,
'date_spec':'year',
'max_global':True, # sample as evenly as possible from different geographic regions
'cds':[0,None], # define the HA start i n 0 numbering
# define relevant clades in canonical HA1 numbering (+1)
# numbering starting at methionine including the signal peptide
'clade_designations': {
'2': [('HA1', 125, 'N'), ('HA1', 134 ,'A'), ('HA1', 183, 'S'), ('HA1', 31,'D'), ('HA1', 172,'N'), ('HA1', 186,'T')],
'3': [('HA1', 134 ,'T'), ('HA1', 183, 'P')],
'4': [('HA1', 125, 'D'), ('HA1', 134 ,'A'), ('HA1', 183, 'S')],
'5': [('HA1', 87, 'N'), ('HA1', 205, 'K'), ('HA1', 216, 'V'), ('HA1', 149, 'L')],
'6': [('HA1', 185,'T'), ('HA1', 97, 'N'), ('HA1', 197, 'A')],
'6c':[('HA1', 234,'I'), ('HA1', 97, 'N'), ('HA1', 197, 'A'), ('HA1', 283,'E')],
'6b':[('HA1', 163,'Q'), ('HA1', 256, 'T'), ('HA1', 197, 'A'), ('HA1', 283,'E')],
'7': [('HA1', 143,'G'), ('HA1', 97, 'D'), ('HA1', 197, 'T')],
'8': [('HA1', 186,'T'), ('HA1', 272,'A')],
'6b.1':[('HA1', 163,'Q'), ('HA1', 256, 'T'), ('HA1', 197, 'A'), ('HA1', 283, 'E'), ('SigPep', 13, 'T'), ('HA1', 84, 'N'), ('HA1', 162, 'N')],
'6b.2':[('HA1', 163,'Q'), ('HA1', 256, 'T'), ('HA1', 197, 'A'), ('HA1', 283, 'E'), ('HA2', 164, 'G'), ('HA1', 152, 'T'), ('HA2', 174, 'E')]
},
'HI_fname':'data/H1N1pdm_HI_titers.txt',
'html_vars': {'coloring': 'ep, ne, rb, lbi, dfreq, region, date, cHI, HI_dist',
'gtplaceholder': 'HA1 positions...',
'freqdefault': '6b, 6c'},
'js_vars': {'LBItau': 0.0005, 'LBItime_window': 0.5, 'dfreq_dn':2},
'layout':'auspice',
})
class H1N1pdm_filter(flu_filter):
def __init__(self,min_length = 987, **kwargs):
'''
parameters
min_length -- minimal length for a sequence to be acceptable
'''
flu_filter.__init__(self, **kwargs)
self.min_length = min_length
self.vaccine_strains =[{
'strain':'A/California/07/2009',
'isolate_id':'EPI_ISL_31553',
'date':'2009-04-09',
'lab':'Naval Health Research Center',
'country':'USA',
'region':'NorthAmerica',
'seq':'ATGAAGGCAATACTAGTAGTTCTGCTATATACATTTGCAACCGCAAATGCAGACACATTATGTATAGGTTATCATGCGAACAATTCAACAGACACTGTAGACACAGTACTAGAAAAGAATGTAACAGTAACACACTCTGTTAACCTTCTAGAAGACAAGCATAACGGGAAACTATGCAAACTAAGAGGGGTAGCCCCATTGCATTTGGGTAAATGTAACATTGCTGGCTGGATCCTGGGAAATCCAGAGTGTGAATCACTCTCCACAGCAAGCTCATGGTCCTACATTGTGGAAACACCTAGTTCAGACAATGGAACGTGTTACCCAGGAGATTTCATCGATTATGAGGAGCTAAGAGAGCAATTGAGCTCAGTGTCATCATTTGAAAGGTTTGAGATATTCCCCAAGACAAGTTCATGGCCCAATCATGACTCGAACAAAGGTGTAACGGCAGCATGTCCTCATGCTGGAGCAAAAAGCTTCTACAAAAATTTAATATGGCTAGTTAAAAAAGGAAATTCATACCCAAAGCTCAGCAAATCCTACATTAATGATAAAGGGAAAGAAGTCCTCGTGCTATGGGGCATTCACCATCCATCTACTAGTGCTGACCAACAAAGTCTCTATCAGAATGCAGATGCATATGTTTTTGTGGGGTCATCAAGATACAGCAAGAAGTTCAAGCCGGAAATAGCAATAAGACCCAAAGTGAGGGATCAAGAAGGGAGAATGAACTATTACTGGACACTAGTAGAGCCGGGAGACAAAATAACATTCGAAGCAACTGGAAATCTAGTGGTACCGAGATATGCATTCGCAATGGAAAGAAATGCTGGATCTGGTATTATCATTTCAGATACACCAGTCCACGATTGCAATACAACTTGTCAAACACCCAAGGGTGCTATAAACACCAGCCTCCCATTTCAGAATATACATCCGATCACAATTGGAAAATGTCCAAAATATGTAAAAAGCACAAAATTGAGACTGGCCACAGGATTGAGGAATATCCCGTCTATTCAATCTAGAGGCCTATTTGGGGCCATTGCCGGTTTCATTGAAGGGGGGTGGACAGGGATGGTAGATGGATGGTACGGTTATCACCATCAAAATGAGCAGGGGTCAGGATATGCAGCCGACCTGAAGAGCACACAGAATGCCATTGACGAGATTACTAACAAAGTAAATTCTGTTATTGAAAAGATGAATACACAGTTCACAGCAGTAGGTAAAGAGTTCAACCACCTGGAAAAAAGAATAGAGAATTTAAATAAAAAAGTTGATGATGGTTTCCTGGACATTTGGACTTACAATGCCGAACTGTTGGTTCTATTGGAAAATGAAAGAACTTTGGACTACCACGATTCAAATGTGAAGAACTTATATGAAAAGGTAAGAAGCCAGCTAAAAAACAATGCCAAGGAAATTGGAAACGGCTGCTTTGAATTTTACCACAAATGCGATAACACGTGCATGGAAAGTGTCAAAAATGGGACTTATGACTACCCAAAATACTCAGAGGAAGCAAAATTAAACAGAGAAGAAATAGATGGGGTAAAGCTGGAATCAACAAGGATTTACCAGATTTTGGCGATCTATTCAACTGTCGCCAGTTCATTGGTACTGGTAGTCTCCCTGGGGGCAATCAGTTTCTGGATGTGCTCTAATGGGTCTCTACAGTGTAGAATATGTATTTAA',
}]
tmp_outgroup = SeqIO.read('source-data/H1N1pdm_outgroup.gb', 'genbank')
genome_annotation = tmp_outgroup.features
self.cds = {x.qualifiers['gene'][0]:x for x in genome_annotation
if 'gene' in x.qualifiers and x.type=='CDS' and
x.qualifiers['gene'][0] in ['SigPep', 'HA1', 'HA2']}
self.outgroup = {
'strain': 'A/Swine/Indiana/P12439/00',
'db': 'IRD',
'accession': 'AF455680',
'date': '2002-03-14',
'country': 'USA',
'region': 'NorthAmerica',
'seq': str(tmp_outgroup.seq).upper()
}
class H1N1pdm_clean(virus_clean):
def __init__(self,**kwargs):
virus_clean.__init__(self, **kwargs)
def clean_outbreaks(self):
"""Remove duplicate strains, where the geographic location, date of sampling and sequence are identical"""
virus_hashes = set()
new_viruses = []
for v in self.viruses:
geo = re.search(r'A/([^/]+)/', v.strain).group(1)
if geo:
vhash = (geo, v.date, str(v.seq))
if vhash not in virus_hashes:
new_viruses.append(v)
virus_hashes.add(vhash)
self.viruses = MultipleSeqAlignment(new_viruses)
return new_viruses
def clean_outliers(self):
from seq_util import hamming_distance as distance
"""Remove outlier viruses"""
remove_viruses = []
outlier_seqs = [
"ATGAAAGCAATACTAGTAGTCCTGCTATATACATTTACAACCGCAAATGCCGACACATTATGTATAGGTTATCATGCAAACAATTCAACTGACACCGTAGACACAGTACTAGAAAAGAATGTAACAGTAACACACTCTGTCAACCTTCTAGAAAACAGGCATAATGGGAAACTATGTAAACTAAGAGGGGTAGCTCCATTGCATTTGGGTAAATGTAACATTGCTGGCTGGCTTCTGGGAAATCCAGAGTGTGAATCACTCTCCACAGCAAGCTCATGGTCCTACATTGTGGAAACATCTAATTCAGACAATGGGACGTGTTACCCAGGAGATTTCATCAATTATGAGGAGCTAAGAGAGCAGTTGAGCTCAGTGTCATCATTTGAAAGATTTGAGATATTCCCCAAGACAAGTTCATGGCCCAATCATGACACGAACAGAGGTGTGACGGCAGCATGTCCTCATGCTGGGGCAAACAGCTTCTACAGAAATTTAGTATGGCTAGTAAAAAAGGGAAATTCATACCCAAAGATCAACAAATCCTACATTAACAATAAAGAGAAGGAAGTTCTCGTGCTATGGGCCATTCACCATCCATCTACCAGTGCCGACCAACAAAGTCTCTACCAAAATGCAGATGCCTATGTGTTTGTGGGGTCATCAAGATACAGCAGGAAGTTCGAGCCAGAAATAGCAACAAGACCTAAGGTGAGAGACCAAGCAGGGAGAATGAACTATTACTGGACACTAGTAGAGCCTGGTGACAAGATAACATTCGAAGCAACTGGAAATCTAGTGGCACCGAGATATGCCTTCGCATTGAAAAGAAATTCTGGATCTGGTATTATCATTTCAGATACATCAGTCCACGATTGTGATACGACTTGTCAGACACCCAATGGTGCTATAAACACCAGCCTCCCATTTCAAAATATACATCCAGTCACAATTGGAGAATGTCCAAAATATGTAAAAAGTACTAAACTGAGAATGGCCACAGGTTTAAGGAATATCCCGTCTATCCAATCTAGAGGCCTGTTTGGTGCCATTGCTGGCTTTATCGAAGGGGGTTGGACAGGAATGATAGATGGATGGTACGGTTATCACCATCAAAATGAGCAGGGATCAGGATATGCAGCCGACCTGAAGAGCACACAGAATGCCATTGACGGGATCACTAACAAGGTAAACTCTGTTATTGAAAAGATGAACACACAATTCACGGCAGTAGGTAAAGAGTTCAGCCACTTGGAAAGAAGAATAGAGAATTTAAATAAAAAAGTAGATGATGGTTTTCTAGATATTTGGACTTACAATGCCGAACTATTGGTTCTATTGGAAAATGAAAGAACTTTGGATTACCACGACTCAAATGTGAAAAACTTGTATGAAAAAGTAAGAAGCCAACTAAAAAACAATGCCAAGGAAATTGGAAATGGCTGCTTTGAATTTTACCACAAATGTGATGACATGTGCATGGAAAGCGTCAAAAATGGAACTTATGATTACCCTAAATACTCAGAGGAAGCAAAACTAAACAGAGAAGAAATAGATGGGGTAAAGTTGGAATCAACAAGGATTTACCAAATTTTGGCTATCTATTCAACGGTCGCCAGTTCATTGGTACTGGTAGTCTCCCTGGGGGCAATCAGTTTCTGGATGTGCTCTAATGGGTCGCTACAGTGCAGAATATGTATTTAA",
"----------------------TGATATATACATTTACAACCGCAAATGCAGACACATTATGTATAGGTTATCATGCGAACAACTCAACTGACACCGTAGACACAGTACTAGAAAAGAATGTAACAGTAACACACTCTGTTAACCTTCTAGAAGACAGGCATAATGGGAAACTATGTAAACTAAGAGGGGTAGCTCCATTGCATTTGGGTAAATGTAACATTGCTGGCTGGCTCCTGGGAAATCCAGAGTGTGAATCACTCTTCACAGCAAGCTCATGGTCCTACATTGTGGAAACATCTAATTCAGACAATGGGACGTGTTACCCAGGAGATTTCATCAATTATGAGGAGCTAAGAGAGCAGTTGAGCTCAGTGTCATCATTTGAAAGATTTGAGATATTCCCCAAGACAAGTTCATGGCCCAATCATGACACGAACAGAGGTGTGACGGCGGCATGCCCTCATGCTGGAACAAATAGCTTCTACAGAAATTTAATATGGCTGGTCAAAAAAGGAAATTCATACCCAAAGATCAGCAAATCCTACATTAACAATAAGGAGAAGGAAGTTCTCGTGCTATGGGGCATTCACCATCCATCTACCAGTGCCGACCAACAAAGTCTCTATCAGAATGCAGATGCCTATGTTTTTGTGGGGTCATCAAGATACAGCAGGAAGTTCGAGCCAGAAATAGCAACAAGACCCAAGGTGAGGGACCAAGCAGGGAGAATGAACTATTACTGGACACTAGTAGAGCCTGGAGACAAAATAACATTCGAAGCAACTGGAAATCTAGTGGCACCGAGATATGCCTTCGCATTGAAAAGAAATTCTGGATCTGGTATTATCATTTCAGATACACCAATCCACGATTGTAATACGACTTGTCAGACACCCAAGGGTGCTATAAACACCAGCCTCCCATTTCAAAATATACATCCAGTCACAATTGGAGAATGTCCAAAGTATGTAAAAAGCACAAAATTGAGAATGGCCACAGGATTAAGGAATATCCCGTCTATTCAATCTAGGGGCCTGTTTGGGGCCATTGCCGGCTTTATTGAGGGGGGATGGACAGGAATGATAGATGGATGGTACGGTTATCACCATCAAAATGAGCAGGGATCAGGATATGCAGCAGACCTGAAGAGCACACAGAATGCCATTGACGGGATCACTAACAAGGTAAATTCTGTTATTGAAAAGATGAACACACAATTCACAGCAGTAGGTACAGAGTTCAGCCACTTGGAAAAAAGAATAGAGAATTTAAATAAGAAGGTTGATGATGGTTTTCTGGATATTTGGACTTACAATGCCGAACTGTTGGTTCTGTTGGAAAATGAAAGAACTTTGGATTACCACGACTCAAATGTGAAAACCTTATATGAAAAGGTGAGAAGCCAACTAAGAAACAATGCCAAGGAAATTGGAAATGGCTGCTTTGAATTTTACCACAAATGTGATGACACGTGCATGGAAAGCGTCAGAAATGGGACTTATGATTACCCAAAATACTCAGAAGAAGCAAAACTAAACAGAGAGGAAATAGATGGGGTAAAGCTGGAATCAACAAGGATTTTCCAAATTTTGGCGATCTATTCAACTGCCGCCAGTTCATTGGTACTGGTAGTCTCCCTGGGGGCAATCAGTTTCTGGATGTGCTCTAATGGGTCTCTACAGTGCAGAATATGTATTTAA",
"ATGAAGGCAATACTAATAGTCCTGCTATATACATTTACAACCGCAAATGCCGACAAAATATGTATAGGTTATCATGCGAACAATTCAACTGACACCGTAGACACAGTACTAGAAAAGAATGTAACAGTAACACACTCTGTCAACCTTCTAGAAAACAAGCATAATGGAAAACTATGTAAACTAAGAGGGGTAGCTCCATTGCATTTGGGTAAATGTAACATTGCTGGCTGGCTCCTGGGAAATCCAGAGTGTGAATCACTCGCCACAGCAAGCTCATGGTCCTACATTGTTGAAACTTCTAGTTCGAACAATGGGACGTGTTACCCAGGAGATTTCATCAATTATGAAGAGCTAAGAGAACAGTTAAGCTCAGTGTCATCATTTGAAAAATTTGAGATATTCCCCAAGACGAGTTCATGGCCCAATCATGAAACAAACAAAGGTGTAACGGCAGCATGTCCACATGCTGGGACAAACAGCTTCTACAAAAATTTAATATGGCTGGTCAAAAAAGAGAATTCATACCCAAAGATCAACATATCCTACACTAACAATAGAGGGAAGGAAGTTCTCGTGTTATGGGCCATTCACCATCCACCTACCAGCACCGATCAACAAAGTCTCTACCAAAATGCAAATTCCTATGTTTTTGTGGGGTCATCAAGATACAGCAGGAAGTTCGAGCCAGAAATAGCAACAAGACCCAAGGTGAGGGGCCAAGCAGGGAGAATGAACTATTACTGGACATTAGTAGAGCCTGGAGACAAGATAACATTCGAAGCAACTGGAAATTTGGTGGTACCGAGATATGCCTTCGCATTGAAAAGAAATTCTGGATCTGGTATTATCATTTCAGAGACACCAGTCCACGATTGTGATACGACTTGTCAGACACCCAATGGTGCTATTAACACCAGCCTCCCATTTCAGAATATACATCCAGTCACAATTGGGGAATGCCCAAAATATGTAAAAAGTACTAAATTGAGAATGGCCACAGGATTGAGGAACATCCCGTCCATTCAATCTAGAGGCCTGTTTGGGGCCATTGCCGGCTTTATTGAAGGGGGCTGGACAGGAATGATAGATGGGTGGTACGGTTATCACCATCAAAATGAGCAAGGATCAGGATATGCAGCCGACCTGAAGAGCACACAGAATGCCATTGACGGGATCACTAATAAGGTAAATTCTGTTATTGAAAAGATGAATACACAATTCACAGCAGTAGGTAAAGAGTTCAGCCACTTGGAAAGAAGAATAGAGAATTTAAATAAAAAGGTTGATGATGGGTTTATAGATATTTGGACTTACAATGCCGAACTGTTGGTTCTGTTGGAAAATGAAAGAACTTTGGATTACCACGACTCAAATGTGAAAACCTTATATGAAAAAGTAAGAAGCCAACTAAAAAACAATGCCAAGGAAATTGGAAACGGCTGCTTTGAATTTTACCACAAATGTGATGACACGTGCATGGAGAGCGTCAAAAATGGAACTTATGATTACCCAAAATACTCAGAGGAAGCAAAACTAAACAGAGAGGAAATAGATGGGATAAAGTTGGAATCAACAAGGATTTACCAAATTTTGGCGATCTATTCAACTGTCGCCAGTTCATTGGTACTGG-----------------------------------------------------------------------"
]
for outlier_seq in outlier_seqs:
for v in self.viruses:
dist = distance(Seq(outlier_seq), v)
if (dist < 0.02):
remove_viruses.append(v)
if self.verbose>1:
print "\tremoving", v.strain
self.viruses = MultipleSeqAlignment([v for v in self.viruses if v not in remove_viruses])
def clean_outlier_strains(self):
"""Remove single outlying viruses"""
remove_viruses = []
outlier_strains = ["A/Kenya/264/2012", "A/Iowa/39/2015", "A/Asturias/RR6898/2010", "A/Wisconsin/28/2011", "A/Brest/1161/2014", "A/Tomsk/273-MA1/2010", "A/Minnesota/46/2015", "A/Poland/16/2013", "A/Hungary/02/2013", "A/Hungary/16/2013", "A/California/07/2009NYMC-X18113/198", "A/Christchurch/16/2010NIB-74xp13/202"]
for outlier_strain in outlier_strains:
for v in self.viruses:
if (v.strain == outlier_strain):
remove_viruses.append(v)
if self.verbose > 1:
print "\tremoving", v.strain
self.viruses = MultipleSeqAlignment([v for v in self.viruses if v not in remove_viruses])
def clean(self):
self.clean_generic()
self.clean_outbreaks()
print "Number of viruses after outbreak filtering:",len(self.viruses)
self.clean_outliers()
self.clean_outlier_strains()
print "Number of viruses after outlier filtering:",len(self.viruses)
class H1N1pdm_process(process, H1N1pdm_filter, H1N1pdm_clean, H1N1pdm_refine, HI_tree, fitness_model):
"""docstring for H1N1pdm_process, H1N1pdm_filter"""
def __init__(self,verbose = 0, force_include = None,
force_include_all = False, max_global= True, **kwargs):
self.force_include = force_include
self.force_include_all = force_include_all
self.max_global = max_global
process.__init__(self, **kwargs)
H1N1pdm_filter.__init__(self,**kwargs)
H1N1pdm_clean.__init__(self,**kwargs)
H1N1pdm_refine.__init__(self,**kwargs)
HI_tree.__init__(self,**kwargs)
fitness_model.__init__(self,**kwargs)
self.verbose = verbose
def run(self, steps, viruses_per_month=50, raxml_time_limit=1.0, lam_HI=2.0, lam_pot=0.3, lam_avi=2.0):
if 'filter' in steps:
print "--- Virus filtering at " + time.strftime("%H:%M:%S") + " ---"
self.filter()
if self.force_include is not None and os.path.isfile(self.force_include):
with open(self.force_include) as infile:
forced_strains = [fix_name(line.strip().split('\t')[0]).upper() for line in infile]
else:
forced_strains = []
self.subsample(viruses_per_month,
prioritize=forced_strains, all_priority=self.force_include_all,
region_specific = self.max_global)
self.add_older_vaccine_viruses(dt = 6)
self.dump()
else:
self.load()
if 'align' in steps:
self.align() # -> self.viruses is an alignment object
if 'clean' in steps:
print "--- Clean at " + time.strftime("%H:%M:%S") + " ---"
self.clean() # -> every node as a numerical date
self.dump()
if 'tree' in steps:
print "--- Tree infer at " + time.strftime("%H:%M:%S") + " ---"
self.infer_tree(raxml_time_limit) # -> self has a tree
self.dump()
if 'ancestral' in steps:
print "--- Infer ancestral sequences " + time.strftime("%H:%M:%S") + " ---"
self.infer_ancestral() # -> every node has a sequence
self.dump()
if 'refine' in steps:
print "--- Tree refine at " + time.strftime("%H:%M:%S") + " ---"
self.refine()
self.dump()
if 'frequencies' in steps:
print "--- Estimating frequencies at " + time.strftime("%H:%M:%S") + " ---"
self.determine_variable_positions()
self.estimate_frequencies(tasks = ["mutations","tree"])
if 'genotype_frequencies' in steps:
self.estimate_frequencies(tasks = ["genotypes"])
self.dump()
if 'HI' in steps:
print "--- Adding HI titers to the tree " + time.strftime("%H:%M:%S") + " ---"
try:
self.determine_variable_positions()
self.map_HI(training_fraction=1.0, method = 'nnl1reg',
lam_HI=lam_HI, lam_avi=lam_avi, lam_pot=lam_pot, map_to_tree=True)
self.map_HI(training_fraction=1.0, method = 'nnl1reg', force_redo=True,
lam_HI=lam_HI, lam_avi=lam_avi, lam_pot=lam_pot, map_to_tree=False)
self.dump()
except:
print("HI modeling failed!")
if 'export' in steps:
self.add_titers()
self.temporal_regional_statistics()
# exporting to json, including the H1N1pdm specific fields
self.export_to_auspice(tree_fields = [
'ep', 'ne', 'rb', 'aa_muts','accession','isolate_id', 'lab','db', 'country',
'dHI', 'cHI', 'mean_HI_titers','HI_titers','HI_titers_raw', 'serum', 'HI_info',
'avidity_tree','avidity_mut', 'potency_mut', 'potency_tree', 'mean_potency_mut', 'mean_potency_tree', 'autologous_titers'],
annotations = ['5', '6', '6b', '6c', '7', '6b.1', '6b.2'])
if params.html:
self.generate_indexHTML()
self.export_HI_mutation_effects()
if 'HIvalidate' in steps:
print "--- generating validation figures " + time.strftime("%H:%M:%S") + " ---"
self.generate_validation_figures()
if __name__=="__main__":
all_steps = ['filter', 'align', 'clean', 'tree', 'ancestral', 'refine',
'frequencies', 'HI', 'export'] + ['HIvalidate']
from process import parser
params = parser.parse_args()
lt = time.localtime()
num_date = round(lt.tm_year+(lt.tm_yday-1.0)/365.0,2)
params.time_interval = (num_date-params.years_back, num_date)
if params.interval is not None and len(params.interval)==2 and params.interval[0]<params.interval[1]:
params.time_interval = (params.interval[0], params.interval[1])
dt= params.time_interval[1]-params.time_interval[0]
params.pivots_per_year = 12.0 if dt<5 else 6.0 if dt<10 else 3.0
steps = all_steps[all_steps.index(params.start):(all_steps.index(params.stop)+1)]
if params.skip is not None:
for tmp_step in params.skip:
if tmp_step in steps:
print "skipping",tmp_step
steps.remove(tmp_step)
# add all arguments to virus_config (possibly overriding)
virus_config.update(params.__dict__)
# pass all these arguments to the processor: will be passed down as kwargs through all classes
myH1N1pdm = H1N1pdm_process(**virus_config)
if params.test:
myH1N1pdm.load()
else:
myH1N1pdm.run(steps, viruses_per_month = virus_config['viruses_per_month'],
raxml_time_limit = virus_config['raxml_time_limit'],
lam_HI = virus_config['lam_HI'],
lam_avi = virus_config['lam_avi'],
lam_pot = virus_config['lam_pot'],
)
| agpl-3.0 |
eclee25/flu-SDI-simulations-age | age_time_T-age_epitime_viz.py | 1 | 12844 | #!/usr/bin/python
##############################################
###Python template
###Author: Elizabeth Lee
###Date: 2/23/14
###Purpose: visualize results of time-based epidemic simulations when aligned by epidemic time, which is defined as aligning tsteps at which simulation attained 5% of cumulative infections during the epidemic
#### pairs with age_time_T-age.py
###Import data:
###Command Line: python age_time_T-age_epitime_viz.py
##############################################
####### notes #######
### codebook of age class codes
# '1' - Toddlers: 0-2
# '2' - Preschool: 3-4
# '3' - Children: 5-18
# '4' - Adults: 19-64
# '5' - Seniors: 65+ (community)
# '6' - Elders: 65+ (nursing home)
# There are only 94 "elders" in the Vancouver network, and they all reside in one nursing home, so they can be combined with the seniors for analysis purposes (all_elderly).
### packages/modules ###
import matplotlib.pyplot as plt
import numpy as np
from collections import defaultdict
import zipfile
from time import clock
import bisect
## local modules ##
import percolations as perc
import pretty_print as pp
### plotting settings ###
colorvec = ['black', 'red', 'orange', 'gold', 'green', 'blue', 'cyan', 'darkviolet', 'hotpink']
### data processing parameters ###
align_prop = 0.05
### simulation parameters ###
numsims = 800 # number of simulations
size_epi = 515 # threshold value that designates an epidemic in the network (5% of network)
# gamma = probability of recovery at each time step
# on avg, assume 5 days till recovery
gamma = 1/float(5) # 5 days recovery here
T = 0.0643 # total epidemic size = 20%
# T = 0.075 # total epidemic size = 30%
# T = beta / (beta + gamma)
# when T = 0.0643 and gamma = 1/5, b = 0.0137
# when T = 0.075 and gamma = 1/5, b = 0.0162
b = (-T * gamma)/(T - 1)
# define different child transmissibility multipliers
# Cauchemez 2004 cites that household risk when there is a child infected vs when there is an adult infected is 1.85 times greater (0.48/0.26)
m1, m2 = 1, 2
Tmult_list = np.linspace(m1, m2, num=11, endpoint=True)
### data structures ###
# d_node_age[nodenumber] = ageclass
d_node_age = {}
### ziparchive to read and write results ###
zipname = '/home/elee/Dropbox/Elizabeth_Bansal_Lab/Age_Based_Simulations/Results/adultT-age_time_%ssims_beta%.3f_Tmult%.1f-%.1f_vax0.zip' %(numsims, b, m1, m2)
#############################################
# age data processing
graph_ages = open('/home/elee/Dropbox/Elizabeth_Bansal_Lab/Age_Based_Simulations/Data/urban_ages_Sarah.csv') # node number and age class
for line in graph_ages:
new_line = line.split()
for line in new_line:
node, age = line.split(',')
d_node_age[node] = age # node-ageclass dictionary
# define network size
N = len(d_node_age)
# create binary lists to indicate children and adults
ch = [1 if d_node_age[str(node)] == '3' else 0 for node in xrange(1, int(N) + 1)]
ad = [1 if d_node_age[str(node)] == '4' else 0 for node in xrange(1, int(N) + 1)]
# child and adult population sizes
chsz = float(sum(ch))
adsz = float(sum(ad))
# high risk groups: toddlers (0-2), seniors & elderly (65+)
to = [1 if d_node_age[str(node)] == '1' else 0 for node in xrange(1, int(N) + 1)]
sr = [1 if d_node_age[str(node)] == '5' or d_node_age[str(node)] == '6' else 0 for node in xrange(1, int(N) + 1)]
tosz = float(sum(to))
srsz = float(sum(sr))
print 'children, adults, toddlers, seniors', chsz, adsz, tosz, srsz
##############################################
# data processing - convert tstep info into dictionaries
# storage dictionaries need to be declared outside the loop
# dict_epiincid[(m, simnumber, 'T', 'C' or 'A')] = [T, C or A incid at tstep 0, T, C or A incid at tstep 1...], where incidence is simply number of new cases (raw)
# dict_epiAR[(m, simnumber, 'T', 'C' or 'A')] = [T, C or A attack rate at tstep 0, T, C or A attack rate at tstep 1...], where attack rate is number of new cases per population size
# dict_epiOR[(m, simnumber)] = [OR at tstep0, OR at tstep1...]
# dict_epiOR_filt[(m, simnum)] = [OR for each time step for epidemics only where OR is nan when we want to exclude the time point due to small infected numbers]
# dict_epiresults[(m, simnumber)] = (episize, c_episize, a_episize)
# d_totepiOR[m] = [OR at sim1, OR at sim 2...]
d_epiincid, d_epiOR, d_epiresults, d_epiAR, d_epiOR_filt, d_totepiOR = defaultdict(list), defaultdict(list), {}, defaultdict(list), defaultdict(list), defaultdict(list)
for m in Tmult_list:
processing = clock()
# reference filenames in zipfolder
Itstep_file = 'Results/Itstep_adultT-age_time_%ssims_beta%.3f_Tmult%.1f_vax0.txt' %(numsims, b, m)
Rtstep_file = 'Results/Rtstep_adultT-age_time_%ssims_beta%.3f_Tmult%.1f_vax0.txt' %(numsims, b, m)
# recreate epidata from zip archive
d_epiincid, d_epiOR, d_epiresults, d_epiAR, d_epiOR_filt = perc.recreate_epidata2(Itstep_file, Rtstep_file, zipname, m, size_epi, ch, ad, to, sr, d_epiincid, d_epiOR, d_epiresults, d_epiAR, d_epiOR_filt)
# calculate OR over entire simulation
d_totepiOR[m] = perc.OR_sim(numsims, d_epiresults, m, chsz, adsz)
print m, "processed", clock() - processing
# grab unique list of Tmult values that produced at least one epidemic
Tmult_epi = list(set([key[0] for key in d_epiincid]))
##############################################
### plot total simulation AR with SD bars for children, adults, toddlers and the elderly vs T multiplier value
c_mns, c_sds, a_mns, a_sds = [],[],[],[]
d_mns, d_sds, s_mns, s_sds = [],[],[],[]
for m in sorted(Tmult_epi):
# attack rate per 100 by age group
C_episz_allsims = [sum(d_epiincid[key])/chsz for key in d_epiincid if key[0] == m and key[2] == 'C']
A_episz_allsims = [sum(d_epiincid[key])/adsz for key in d_epiincid if key[0] == m and key[2] == 'A']
D_episz_allsims = [sum(d_epiincid[key])/tosz for key in d_epiincid if key[0] == m and key[2] == 'D']
S_episz_allsims = [sum(d_epiincid[key])/srsz for key in d_epiincid if key[0] == m and key[2] == 'S']
# add mean and SD attack rates to list for each Tmult value
c_mns.append(np.mean(C_episz_allsims))
a_mns.append(np.mean(A_episz_allsims))
d_mns.append(np.mean(D_episz_allsims))
s_mns.append(np.mean(S_episz_allsims))
c_sds.append(np.std(C_episz_allsims))
a_sds.append(np.std(A_episz_allsims))
d_sds.append(np.std(D_episz_allsims))
s_sds.append(np.std(S_episz_allsims))
# plot AR by age group with errorbars
CH = plt.errorbar(sorted(Tmult_epi), c_mns, yerr = c_sds, marker = 'o', color = 'red', linestyle = 'None')
AD = plt.errorbar(sorted(Tmult_epi), a_mns, yerr = a_sds, marker = 'o', color = 'blue', linestyle = 'None')
TO = plt.errorbar(sorted(Tmult_epi), d_mns, yerr = d_sds, marker = 'o', color = 'orange', linestyle = 'None')
SR = plt.errorbar(sorted(Tmult_epi), s_mns, yerr = s_sds, marker = 'o', color = 'green', linestyle = 'None')
plt.xlabel('adult T multiplier (epidemics only)')
plt.ylabel('Attack Rate')
lines = [CH, AD, TO, SR]
plt.legend(lines, ['children (5-18)', 'adults (19-64)', 'toddlers (0-2)', 'seniors (65+)'], loc = 'upper left')
plt.xlim([1, 2])
plt.ylim([0, 1])
figname = 'Figures/HR-AR_adultT-age_time_%ssims_beta%.3f_Tmult%.1f_vax0.png' %(numsims, b, m)
plt.savefig(figname)
plt.close()
pp.compress_to_ziparchive(zipname, figname)
# plt.show()
##############################################
### plot total simulation OR with std bars vs T multiplier value
plt.errorbar(sorted(Tmult_epi), [np.mean(d_totepiOR[m]) for m in sorted(Tmult_epi)], yerr = [np.std(d_totepiOR[m]) for m in sorted(Tmult_epi)], marker = 'o', color = 'black', linestyle = 'None')
plt.xlabel('adult T multiplier (epidemics only)')
plt.ylabel('simulation OR, child:adult')
plt.ylim([0, 4])
plt.xlim([1, 2])
figname = 'Figures/totepiOR_adultT-age_time_%ssims_beta%.3f_Tmult_vax0.png' %(numsims, b)
plt.savefig(figname)
plt.close()
pp.compress_to_ziparchive(zipname, figname)
##############################################
### plot avg of ORs at each tstep with std bars vs T multiplier value
mns, sds = [],[]
for m in sorted(Tmult_epi):
mns_allsims = [np.mean(np.ma.masked_array(d_epiOR[key], np.isnan(d_epiOR[key]))) for key in d_epiOR if key[0] == m]
mns.append(np.mean(mns_allsims))
sds.append(np.mean(mns_allsims))
plt.errorbar(sorted(Tmult_epi), mns, yerr = sds, marker = 'o', color = 'black', linestyle = 'None')
plt.xlabel('adult T multiplier (epidemics only)')
plt.ylabel('simulation OR (avg of avgs), child:adult')
plt.ylim([0, 5])
plt.xlim([1, 2])
figname = 'Figures/totepiOR-avgs_adultT-age_time_%ssims_beta%.3f_Tmult%.1f_vax0.png' %(numsims, b, m)
plt.savefig(figname)
plt.close()
pp.compress_to_ziparchive(zipname, figname)
# plt.show()
##############################################
### plot filtered and aligned OR by time for each suscep value ###
# alignment at tstep where sim reaches 5% of total episize
# starting tstep on plot is mode of tsteps where sim reaches 5% of total episize
# each sim is one line, each susc is a diff color on one plot
for m in Tmult_epi:
ORonly = clock()
# PROCESS X-AXIS: identify tstep at which sim reaches 5% of cum infections for the epidemic
# d_dummyalign_tstep[s] = [5%cum-inf_tstep_sim1, 5%cum-inf_tstep_sim2..]
d_dummyalign_tstep, avg_align_tstep, dummyk = perc.define_epi_time(d_epiincid, m, align_prop)
# TEST (11/19/13): realign plots for epitime to start at t = 0 by reassigning avg_align_tstep
avg_align_tstep = 0
# plot aligned data
# zip beta, episim number, and tstep for 5% cum-inf for sims where (s, episim number) is the key for d_epiOR_filt
for k0, k1, t5 in zip((k[0] for k in dummyk), (k[1] for k in dummyk), d_dummyalign_tstep[m]):
plt.plot(xrange(avg_align_tstep, avg_align_tstep+len(d_epiOR_filt[(k0, k1)][t5:])), d_epiOR_filt[(k0, k1)][t5:], marker = 'None', color = 'grey')
plt.plot(xrange(250), [1] * len(xrange(250)), marker = 'None', color = 'red', linewidth = 2)
plt.xlabel('epidemic time step, adult T mult: ' + str(m) + ', 5-95% cum infections')
plt.ylabel('OR, child:adult')
plt.ylim([0, 20])
plt.xlim([-1, 150])
figname = 'Figures/epiORalign_adultT-age_time_%ssims_beta%.3f_Tmult%.1f_vax0.png' %(numsims, b, m)
plt.savefig(figname)
plt.close()
pp.compress_to_ziparchive(zipname, figname)
print "ORonly plotting time", m, clock() - ORonly
# plt.show()
##############################################
### plot filtered and aligned OR by time for each suscep value ###
### secondary axis with child and adult incidence ###
# alignment at tstep where sim reaches 5% of total episize
# starting tstep on plot is mode of tsteps where sim reaches 5% of total episize
# each sim is one line, each beta is a diff color on one plot
for m in Tmult_epi:
ORincid = clock()
# PROCESS X-AXIS: identify tstep at which sim reaches 5% of cum infections for the epidemic
# d_dummyalign_tstep[suscept_val] = [5%cum-inf_tstep_sim1, 5%cum-inf_tstep_sim2..]
d_dummyalign_tstep, avg_align_tstep, dummyk = perc.define_epi_time(d_epiincid, m, align_prop)
# TEST (11/19/13): realign plots for epitime to start at t = 0 by reassigning avg_align_tstep
avg_align_tstep = 0
# PROCESS YAX_AR:
# call upon d_epiAR dictionary
# dict_epiAR[(m, simnumber, 'T', 'C' or 'A')] = [T, C or A attack rate at tstep 0, T, C or A attack rate at tstep 1...], where attack rate is number of new cases per 100 individuals
# plot data
# create two y-axes
fig, yax_OR = plt.subplots()
yax_AR = yax_OR.twinx()
# zip s, episim number, and tstep for 5% cum-inf for sims where (s, episim number) is the key for d_epiOR_filt
for k0, k1, t5 in zip((k[0] for k in dummyk), (k[1] for k in dummyk), d_dummyalign_tstep[m]):
## OR y-axis
OR, = yax_OR.plot(xrange(avg_align_tstep, avg_align_tstep+len(d_epiOR_filt[(k0, k1)][t5:])), d_epiOR_filt[(k0, k1)][t5:], marker = 'None', color = 'grey')
## AR y-axis
child, = yax_AR.plot(xrange(avg_align_tstep, avg_align_tstep+len(d_epiAR[(k0, k1, 'C')][t5:])), [AR * 100 for AR in d_epiAR[(k0, k1, 'C')][t5:]], marker = 'None', color = 'red')
adult, = yax_AR.plot(xrange(avg_align_tstep, avg_align_tstep+len(d_epiAR[(k0, k1, 'A')][t5:])), [AR * 100 for AR in d_epiAR[(k0, k1, 'A')][t5:]], marker = 'None', color = 'blue')
# plot settings
lines = [OR, child, adult]
yax_OR.legend(lines, ['Odds Ratio', 'Child Incidence', 'Adult Incidence'], loc = 'upper right')
yax_OR.set_ylabel('OR, child:adult')
yax_OR.set_ylim([0, 20])
yax_OR.set_xlim([-1, 150])
yax_OR.set_xlabel('epidemic time step, adult T multiplier: ' + str(m) + ', 5-95% cum infections')
yax_AR.set_ylabel('Incidence per 100')
yax_AR.set_ylim([0, 4])
# save plot
figname = 'Figures/epiORincid_adultT-age_time_%ssims_beta%.3f_Tmult%.1f_vax0.png' %(numsims, b, m)
plt.savefig(figname)
plt.close()
pp.compress_to_ziparchive(zipname, figname)
print "ORincid plotting time", m, clock() - ORincid
# plt.show()
| mit |
dquartul/BLonD | __EXAMPLES/mpi_main_files/EX_05_Wake_impedance.py | 2 | 12145 |
# Copyright 2014-2017 CERN. This software is distributed under the
# terms of the GNU General Public Licence version 3 (GPL Version 3),
# copied verbatim in the file LICENCE.md.
# In applying this licence, CERN does not waive the privileges and immunities
# granted to it by virtue of its status as an Intergovernmental Organization or
# submit itself to any jurisdiction.
# Project website: http://blond.web.cern.ch/
'''
SPS simulation with intensity effects in time and frequency domains using
a table of resonators. The input beam has been cloned to show that the two
methods are equivalent (compare the two figure folders). Note that to create an
exact clone of the beam, the option seed=0 in the generation has been used.
This script shows also an example of how to use the class SliceMonitor (check
the corresponding h5 files).
:Authors: **Danilo Quartullo**
'''
from __future__ import division, print_function
import numpy as np
import matplotlib.pyplot as plt
from blond.input_parameters.ring import Ring
from blond.input_parameters.rf_parameters import RFStation
from blond.trackers.tracker import RingAndRFTracker
from blond.beam.distributions import bigaussian
from blond.monitors.monitors import BunchMonitor
from blond.beam.profile import Profile, CutOptions, FitOptions
from blond.impedances.impedance import InducedVoltageTime, InducedVoltageFreq
from blond.impedances.impedance import InducedVoltageResonator, TotalInducedVoltage
from blond.impedances.induced_voltage_analytical import analytical_gaussian_resonator
from blond.beam.beam import Beam, Proton
from blond.plots.plot import Plot
from blond.plots.plot_impedance import plot_induced_voltage_vs_bin_centers
from blond.impedances.impedance_sources import Resonators
import os
from blond.utils import bmath as bm
from blond.utils.mpi_config import worker, mpiprint
bm.use_mpi()
print = mpiprint
this_directory = os.path.dirname(os.path.realpath(__file__)) + '/'
try:
os.mkdir(this_directory + '../mpi_output_files')
except:
pass
try:
os.mkdir(this_directory + '../mpi_output_files/EX_05_fig')
except:
pass
# SIMULATION PARAMETERS -------------------------------------------------------
# Beam parameters
n_particles = 1e10
n_macroparticles = 5*1e6
tau_0 = 2e-9 # [s]
# Machine and RF parameters
gamma_transition = 1/np.sqrt(0.00192) # [1]
C = 6911.56 # [m]
# Tracking details
n_turns = 2
dt_plt = 1
# Derived parameters
sync_momentum = 25.92e9 # [eV / c]
momentum_compaction = 1 / gamma_transition**2 # [1]
# Cavities parameters
n_rf_systems = 1
harmonic_number = 4620
voltage_program = 0.9e6 # [V]
phi_offset = 0.0
# DEFINE RING------------------------------------------------------------------
general_params = Ring(C, momentum_compaction,
sync_momentum, Proton(), n_turns)
general_params_freq = Ring(C, momentum_compaction,
sync_momentum, Proton(), n_turns)
general_params_res = Ring(C, momentum_compaction,
sync_momentum, Proton(), n_turns)
RF_sct_par = RFStation(general_params, [harmonic_number],
[voltage_program], [phi_offset], n_rf_systems)
RF_sct_par_freq = RFStation(general_params_freq,
[harmonic_number], [voltage_program],
[phi_offset], n_rf_systems)
RF_sct_par_res = RFStation(general_params_res,
[harmonic_number], [voltage_program],
[phi_offset], n_rf_systems)
my_beam = Beam(general_params, n_macroparticles, n_particles)
my_beam_freq = Beam(general_params_freq, n_macroparticles, n_particles)
my_beam_res = Beam(general_params_res, n_macroparticles, n_particles)
ring_RF_section = RingAndRFTracker(RF_sct_par, my_beam)
ring_RF_section_freq = RingAndRFTracker(RF_sct_par_freq, my_beam_freq)
ring_RF_section_res = RingAndRFTracker(RF_sct_par_res, my_beam_res)
# DEFINE BEAM------------------------------------------------------------------
bigaussian(general_params, RF_sct_par, my_beam, tau_0/4,
seed=1)
bigaussian(general_params_freq, RF_sct_par_freq, my_beam_freq,
tau_0/4, seed=1)
bigaussian(general_params_res, RF_sct_par_res, my_beam_res,
tau_0/4, seed=1)
number_slices = 2**8
cut_options = CutOptions(cut_left= 0, cut_right=2*np.pi, n_slices=number_slices,
RFSectionParameters=RF_sct_par, cuts_unit = 'rad')
slice_beam = Profile(my_beam, cut_options, FitOptions(fit_option='gaussian'))
cut_options_freq = CutOptions(cut_left= 0, cut_right=2*np.pi, n_slices=number_slices,
RFSectionParameters=RF_sct_par_freq, cuts_unit = 'rad')
slice_beam_freq = Profile(my_beam_freq, cut_options_freq, FitOptions(fit_option='gaussian'))
cut_options_res = CutOptions(cut_left= 0, cut_right=2*np.pi, n_slices=number_slices,
RFSectionParameters=ring_RF_section_res, cuts_unit = 'rad')
slice_beam_res = Profile(my_beam_res, cut_options_res, FitOptions(fit_option='gaussian'))
slice_beam.track()
slice_beam_freq.track()
slice_beam_res.track()
# LOAD IMPEDANCE TABLE--------------------------------------------------------
table = np.loadtxt(this_directory + '../input_files/EX_05_new_HQ_table.dat', comments = '!')
R_shunt = table[:, 2] * 10**6
f_res = table[:, 0] * 10**9
Q_factor = table[:, 1]
resonator = Resonators(R_shunt, f_res, Q_factor)
ind_volt_time = InducedVoltageTime(my_beam, slice_beam, [resonator])
ind_volt_freq = InducedVoltageFreq(my_beam_freq, slice_beam_freq, [resonator], 1e5)
ind_volt_res = InducedVoltageResonator(my_beam_res,slice_beam_res,resonator)
tot_vol = TotalInducedVoltage(my_beam, slice_beam, [ind_volt_time])
tot_vol_freq = TotalInducedVoltage(my_beam_freq, slice_beam_freq,
[ind_volt_freq])
tot_vol_res = TotalInducedVoltage(my_beam_res, slice_beam_res,
[ind_volt_res])
# Analytic result-----------------------------------------------------------
VindGauss = np.zeros(len(slice_beam.bin_centers))
for r in range(len(Q_factor)):
# Notice that the time-argument of inducedVoltageGauss is shifted by
# mean(my_slices.bin_centers), because the analytical equation assumes the
# Gauss to be centered at t=0, but the line density is centered at
# mean(my_slices.bin_centers)
tmp = analytical_gaussian_resonator(tau_0/4, \
Q_factor[r],R_shunt[r],2*np.pi*f_res[r], \
slice_beam.bin_centers - np.mean(slice_beam.bin_centers), \
my_beam.intensity)
VindGauss += tmp.real
# ACCELERATION MAP-------------------------------------------------------------
map_ = [tot_vol] + [ring_RF_section] + [slice_beam]
map_freq = [tot_vol_freq] + [ring_RF_section_freq] + [slice_beam_freq]
map_res = [tot_vol_res] + [ring_RF_section_res] + [slice_beam_res]
if worker.isMaster:
# MONITOR----------------------------------------------------------------------
bunchmonitor = BunchMonitor(general_params, ring_RF_section, my_beam,
this_directory + '../mpi_output_files/EX_05_output_data',
Profile=slice_beam, buffer_time=1)
bunchmonitor_freq = BunchMonitor(general_params_freq, ring_RF_section_freq,
my_beam_freq, this_directory + '../mpi_output_files/EX_05_output_data_freq',
Profile=slice_beam_freq, buffer_time=1)
bunchmonitor_res = BunchMonitor(general_params_res, ring_RF_section_res,
my_beam_res, this_directory + '../mpi_output_files/EX_05_output_data_res',
Profile=slice_beam_res, buffer_time=1)
# PLOTS
format_options = {'dirname': this_directory + '../mpi_output_files/EX_05_fig/1', 'linestyle': '.'}
plots = Plot(general_params, RF_sct_par, my_beam, dt_plt, n_turns, 0,
0.0014*harmonic_number, -1.5e8, 1.5e8, xunit='rad',
separatrix_plot=True, Profile=slice_beam,
h5file=this_directory + '../mpi_output_files/EX_05_output_data',
histograms_plot=True, sampling=50, format_options=format_options)
format_options = {'dirname': this_directory + '../mpi_output_files/EX_05_fig/2', 'linestyle': '.'}
plots_freq = Plot(general_params_freq, RF_sct_par_freq, my_beam_freq, dt_plt,
n_turns, 0, 0.0014*harmonic_number, -1.5e8, 1.5e8,
xunit='rad', separatrix_plot=True, Profile=slice_beam_freq,
h5file=this_directory + '../mpi_output_files/EX_05_output_data_freq',
histograms_plot=True, sampling=50,
format_options=format_options)
format_options = {'dirname': this_directory + '../mpi_output_files/EX_05_fig/3', 'linestyle': '.'}
plots_res = Plot(general_params_res, RF_sct_par_res, my_beam_res, dt_plt,
n_turns, 0, 0.0014*harmonic_number, -1.5e8, 1.5e8,
xunit='rad', separatrix_plot=True, Profile=slice_beam_res,
h5file=this_directory + '../mpi_output_files/EX_05_output_data_res',
histograms_plot=True, sampling=50,
format_options=format_options)
map_ += [bunchmonitor, plots]
map_freq += [bunchmonitor_freq, plots_freq]
map_res += [bunchmonitor_res, plots_res]
# For testing purposes
test_string = ''
test_string += '{:<17}\t{:<17}\t{:<17}\t{:<17}\n'.format(
'mean_dE', 'std_dE', 'mean_dt', 'std_dt')
test_string += '{:+10.10e}\t{:+10.10e}\t{:+10.10e}\t{:+10.10e}\n'.format(
np.mean(my_beam.dE), np.std(my_beam.dE), np.mean(my_beam.dt), np.std(my_beam.dt))
# TRACKING + PLOTS-------------------------------------------------------------
my_beam.split()
my_beam_freq.split()
my_beam_res.split()
for i in np.arange(1, n_turns+1):
print(i)
for m in map_:
m.track()
for m in map_freq:
m.track()
for m in map_res:
m.track()
# Plots
if (i % dt_plt) == 0 and (worker.isMaster):
plot_induced_voltage_vs_bin_centers(i, general_params, tot_vol,
style='.', dirname=this_directory + '../mpi_output_files/EX_05_fig/1')
plot_induced_voltage_vs_bin_centers(i, general_params_freq,
tot_vol_freq, style='.', dirname=this_directory + '../mpi_output_files/EX_05_fig/2')
plot_induced_voltage_vs_bin_centers(i, general_params_res,
tot_vol_res, style='.', dirname=this_directory + '../mpi_output_files/EX_05_fig/3')
my_beam.gather()
my_beam_freq.gather()
my_beam_res.gather()
worker.finalize()
# Plotting induced voltages---------------------------------------------------
plt.clf()
plt.ylabel("induced voltage [arb. unit]")
plt.xlabel("time [ns]")
plt.plot(1e9*slice_beam.bin_centers,tot_vol.induced_voltage,label='Time')
plt.plot(1e9*slice_beam_freq.bin_centers,tot_vol_freq.induced_voltage,\
label='Freq')
plt.plot(1e9*slice_beam_res.bin_centers,tot_vol_res.induced_voltage,\
label='Resonator')
plt.plot(1e9*slice_beam.bin_centers,VindGauss,label='Analytic')
plt.legend()
dirname=this_directory + '../mpi_output_files/EX_05_fig'
fign = dirname +'/comparison_induced_voltage.png'
plt.savefig(fign)
# For testing purposes
test_string += '{:+10.10e}\t{:+10.10e}\t{:+10.10e}\t{:+10.10e}\n'.format(
np.mean(my_beam.dE), np.std(my_beam.dE), np.mean(my_beam.dt), np.std(my_beam.dt))
with open(this_directory + '../mpi_output_files/EX_05_test_data.txt', 'w') as f:
f.write(test_string)
print("Done!")
| gpl-3.0 |
devincornell/semanticanlysis | sentiment.py | 1 | 1029 | #import nltk
import sys
import pandas as pd
import numpy as np
import empath
import spacy
import topicmodels
import preprocessing
# configure program settings
if len(sys.argv) > 2:
datadir = sys.argv[1]
outfile = sys.argv[2]
else:
print('format: lda.py data_folder/ outreport.xlsx')
exit()
# use tmutil to load and pre-process the data
texts, fnames = preprocessing.parsetextfilesfromdir(datadir)
bows = preprocessing.tokenize_bow(texts)
dscores = [anlz.analyze(bow) for bow in bows]
#print(dscores)
scats = list(dscores[0].keys())
df = pd.DataFrame(index=fnames, columns=scats, dtype=np.int32)
for fname, dscore in zip(fnames,dscores):
df.loc[fname,:] = dscore
cdf = pd.DataFrame(index=scats, columns=range(max([len(anlz.cats[c]) for c in scats])))
for cat in scats:
words = anlz.cats[cat]
for i in range(len(words)):
cdf.loc[cat,i] = words[i]
writer = pd.ExcelWriter(outfile)
cdf.to_excel(writer,'categories')
df.to_excel(writer,'documents')
writer.save()
print(outfile, 'saved.')
| mit |
airbnb/caravel | tests/viz_tests.py | 1 | 42827 | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from datetime import datetime
import uuid
from mock import Mock, patch
import pandas as pd
from superset import app
from superset.exceptions import SpatialException
from superset.utils.core import DTTM_ALIAS
import superset.viz as viz
from .base_tests import SupersetTestCase
from .utils import load_fixture
class BaseVizTestCase(SupersetTestCase):
def test_constructor_exception_no_datasource(self):
form_data = {}
datasource = None
with self.assertRaises(Exception):
viz.BaseViz(datasource, form_data)
def test_process_metrics(self):
# test TableViz metrics in correct order
form_data = {
'url_params': {},
'row_limit': 500,
'metric': 'sum__SP_POP_TOTL',
'entity': 'country_code',
'secondary_metric': 'sum__SP_POP_TOTL',
'granularity_sqla': 'year',
'page_length': 0,
'all_columns': [],
'viz_type': 'table',
'since': '2014-01-01',
'until': '2014-01-02',
'metrics': [
'sum__SP_POP_TOTL',
'SUM(SE_PRM_NENR_MA)',
'SUM(SP_URB_TOTL)',
],
'country_fieldtype': 'cca3',
'percent_metrics': [
'count',
],
'slice_id': 74,
'time_grain_sqla': None,
'order_by_cols': [],
'groupby': [
'country_name',
],
'compare_lag': '10',
'limit': '25',
'datasource': '2__table',
'table_timestamp_format': '%Y-%m-%d %H:%M:%S',
'markup_type': 'markdown',
'where': '',
'compare_suffix': 'o10Y',
}
datasource = Mock()
datasource.type = 'table'
test_viz = viz.BaseViz(datasource, form_data)
expect_metric_labels = [u'sum__SP_POP_TOTL',
u'SUM(SE_PRM_NENR_MA)',
u'SUM(SP_URB_TOTL)',
u'count',
]
self.assertEqual(test_viz.metric_labels, expect_metric_labels)
self.assertEqual(test_viz.all_metrics, expect_metric_labels)
def test_get_fillna_returns_default_on_null_columns(self):
form_data = {
'viz_type': 'table',
'token': '12345',
}
datasource = self.get_datasource_mock()
test_viz = viz.BaseViz(datasource, form_data)
self.assertEqual(
test_viz.default_fillna,
test_viz.get_fillna_for_columns(),
)
def test_get_df_returns_empty_df(self):
form_data = {'dummy': 123}
query_obj = {'granularity': 'day'}
datasource = self.get_datasource_mock()
test_viz = viz.BaseViz(datasource, form_data)
result = test_viz.get_df(query_obj)
self.assertEqual(type(result), pd.DataFrame)
self.assertTrue(result.empty)
def test_get_df_handles_dttm_col(self):
form_data = {'dummy': 123}
query_obj = {'granularity': 'day'}
results = Mock()
results.query = Mock()
results.status = Mock()
results.error_message = Mock()
datasource = Mock()
datasource.type = 'table'
datasource.query = Mock(return_value=results)
mock_dttm_col = Mock()
datasource.get_col = Mock(return_value=mock_dttm_col)
test_viz = viz.BaseViz(datasource, form_data)
test_viz.df_metrics_to_num = Mock()
test_viz.get_fillna_for_columns = Mock(return_value=0)
results.df = pd.DataFrame(data={DTTM_ALIAS: ['1960-01-01 05:00:00']})
datasource.offset = 0
mock_dttm_col = Mock()
datasource.get_col = Mock(return_value=mock_dttm_col)
mock_dttm_col.python_date_format = 'epoch_ms'
result = test_viz.get_df(query_obj)
print(result)
import logging
logging.info(result)
pd.testing.assert_series_equal(
result[DTTM_ALIAS],
pd.Series([datetime(1960, 1, 1, 5, 0)], name=DTTM_ALIAS),
)
mock_dttm_col.python_date_format = None
result = test_viz.get_df(query_obj)
pd.testing.assert_series_equal(
result[DTTM_ALIAS],
pd.Series([datetime(1960, 1, 1, 5, 0)], name=DTTM_ALIAS),
)
datasource.offset = 1
result = test_viz.get_df(query_obj)
pd.testing.assert_series_equal(
result[DTTM_ALIAS],
pd.Series([datetime(1960, 1, 1, 6, 0)], name=DTTM_ALIAS),
)
datasource.offset = 0
results.df = pd.DataFrame(data={DTTM_ALIAS: ['1960-01-01']})
mock_dttm_col.python_date_format = '%Y-%m-%d'
result = test_viz.get_df(query_obj)
pd.testing.assert_series_equal(
result[DTTM_ALIAS],
pd.Series([datetime(1960, 1, 1, 0, 0)], name=DTTM_ALIAS),
)
def test_cache_timeout(self):
datasource = self.get_datasource_mock()
datasource.cache_timeout = 0
test_viz = viz.BaseViz(datasource, form_data={})
self.assertEqual(0, test_viz.cache_timeout)
datasource.cache_timeout = 156
test_viz = viz.BaseViz(datasource, form_data={})
self.assertEqual(156, test_viz.cache_timeout)
datasource.cache_timeout = None
datasource.database.cache_timeout = 0
self.assertEqual(0, test_viz.cache_timeout)
datasource.database.cache_timeout = 1666
self.assertEqual(1666, test_viz.cache_timeout)
datasource.database.cache_timeout = None
test_viz = viz.BaseViz(datasource, form_data={})
self.assertEqual(app.config['CACHE_DEFAULT_TIMEOUT'], test_viz.cache_timeout)
class TableVizTestCase(SupersetTestCase):
def test_get_data_applies_percentage(self):
form_data = {
'percent_metrics': [{
'expressionType': 'SIMPLE',
'aggregate': 'SUM',
'label': 'SUM(value1)',
'column': {'column_name': 'value1', 'type': 'DOUBLE'},
}, 'avg__B'],
'metrics': [{
'expressionType': 'SIMPLE',
'aggregate': 'SUM',
'label': 'SUM(value1)',
'column': {'column_name': 'value1', 'type': 'DOUBLE'},
}, 'count', 'avg__C'],
}
datasource = self.get_datasource_mock()
raw = {}
raw['SUM(value1)'] = [15, 20, 25, 40]
raw['avg__B'] = [10, 20, 5, 15]
raw['avg__C'] = [11, 22, 33, 44]
raw['count'] = [6, 7, 8, 9]
raw['groupA'] = ['A', 'B', 'C', 'C']
raw['groupB'] = ['x', 'x', 'y', 'z']
df = pd.DataFrame(raw)
test_viz = viz.TableViz(datasource, form_data)
data = test_viz.get_data(df)
# Check method correctly transforms data and computes percents
self.assertEqual(set([
'groupA', 'groupB', 'count',
'SUM(value1)', 'avg__C',
'%SUM(value1)', '%avg__B',
]), set(data['columns']))
expected = [
{
'groupA': 'A', 'groupB': 'x',
'count': 6, 'SUM(value1)': 15, 'avg__C': 11,
'%SUM(value1)': 0.15, '%avg__B': 0.2,
},
{
'groupA': 'B', 'groupB': 'x',
'count': 7, 'SUM(value1)': 20, 'avg__C': 22,
'%SUM(value1)': 0.2, '%avg__B': 0.4,
},
{
'groupA': 'C', 'groupB': 'y',
'count': 8, 'SUM(value1)': 25, 'avg__C': 33,
'%SUM(value1)': 0.25, '%avg__B': 0.1,
},
{
'groupA': 'C', 'groupB': 'z',
'count': 9, 'SUM(value1)': 40, 'avg__C': 44,
'%SUM(value1)': 0.40, '%avg__B': 0.3,
},
]
self.assertEqual(expected, data['records'])
def test_parse_adhoc_filters(self):
form_data = {
'metrics': [{
'expressionType': 'SIMPLE',
'aggregate': 'SUM',
'label': 'SUM(value1)',
'column': {'column_name': 'value1', 'type': 'DOUBLE'},
}],
'adhoc_filters': [
{
'expressionType': 'SIMPLE',
'clause': 'WHERE',
'subject': 'value2',
'operator': '>',
'comparator': '100',
},
{
'expressionType': 'SIMPLE',
'clause': 'HAVING',
'subject': 'SUM(value1)',
'operator': '<',
'comparator': '10',
},
{
'expressionType': 'SQL',
'clause': 'HAVING',
'sqlExpression': 'SUM(value1) > 5',
},
{
'expressionType': 'SQL',
'clause': 'WHERE',
'sqlExpression': 'value3 in (\'North America\')',
},
],
}
datasource = self.get_datasource_mock()
test_viz = viz.TableViz(datasource, form_data)
query_obj = test_viz.query_obj()
self.assertEqual(
[{'col': 'value2', 'val': '100', 'op': '>'}],
query_obj['filter'],
)
self.assertEqual(
[{'op': '<', 'val': '10', 'col': 'SUM(value1)'}],
query_obj['extras']['having_druid'],
)
self.assertEqual('(value3 in (\'North America\'))', query_obj['extras']['where'])
self.assertEqual('(SUM(value1) > 5)', query_obj['extras']['having'])
def test_adhoc_filters_overwrite_legacy_filters(self):
form_data = {
'metrics': [{
'expressionType': 'SIMPLE',
'aggregate': 'SUM',
'label': 'SUM(value1)',
'column': {'column_name': 'value1', 'type': 'DOUBLE'},
}],
'adhoc_filters': [
{
'expressionType': 'SIMPLE',
'clause': 'WHERE',
'subject': 'value2',
'operator': '>',
'comparator': '100',
},
{
'expressionType': 'SQL',
'clause': 'WHERE',
'sqlExpression': 'value3 in (\'North America\')',
},
],
'having': 'SUM(value1) > 5',
}
datasource = self.get_datasource_mock()
test_viz = viz.TableViz(datasource, form_data)
query_obj = test_viz.query_obj()
self.assertEqual(
[{'col': 'value2', 'val': '100', 'op': '>'}],
query_obj['filter'],
)
self.assertEqual(
[],
query_obj['extras']['having_druid'],
)
self.assertEqual('(value3 in (\'North America\'))', query_obj['extras']['where'])
self.assertEqual('', query_obj['extras']['having'])
@patch('superset.viz.BaseViz.query_obj')
def test_query_obj_merges_percent_metrics(self, super_query_obj):
datasource = self.get_datasource_mock()
form_data = {
'percent_metrics': ['sum__A', 'avg__B', 'max__Y'],
'metrics': ['sum__A', 'count', 'avg__C'],
}
test_viz = viz.TableViz(datasource, form_data)
f_query_obj = {
'metrics': form_data['metrics'],
}
super_query_obj.return_value = f_query_obj
query_obj = test_viz.query_obj()
self.assertEqual([
'sum__A', 'count', 'avg__C',
'avg__B', 'max__Y',
], query_obj['metrics'])
@patch('superset.viz.BaseViz.query_obj')
def test_query_obj_throws_columns_and_metrics(self, super_query_obj):
datasource = self.get_datasource_mock()
form_data = {
'all_columns': ['A', 'B'],
'metrics': ['x', 'y'],
}
super_query_obj.return_value = {}
test_viz = viz.TableViz(datasource, form_data)
with self.assertRaises(Exception):
test_viz.query_obj()
del form_data['metrics']
form_data['groupby'] = ['B', 'C']
test_viz = viz.TableViz(datasource, form_data)
with self.assertRaises(Exception):
test_viz.query_obj()
@patch('superset.viz.BaseViz.query_obj')
def test_query_obj_merges_all_columns(self, super_query_obj):
datasource = self.get_datasource_mock()
form_data = {
'all_columns': ['colA', 'colB', 'colC'],
'order_by_cols': ['["colA", "colB"]', '["colC"]'],
}
super_query_obj.return_value = {
'columns': ['colD', 'colC'],
'groupby': ['colA', 'colB'],
}
test_viz = viz.TableViz(datasource, form_data)
query_obj = test_viz.query_obj()
self.assertEqual(form_data['all_columns'], query_obj['columns'])
self.assertEqual([], query_obj['groupby'])
self.assertEqual([['colA', 'colB'], ['colC']], query_obj['orderby'])
@patch('superset.viz.BaseViz.query_obj')
def test_query_obj_uses_sortby(self, super_query_obj):
datasource = self.get_datasource_mock()
form_data = {
'timeseries_limit_metric': '__time__',
'order_desc': False,
}
super_query_obj.return_value = {
'metrics': ['colA', 'colB'],
}
test_viz = viz.TableViz(datasource, form_data)
query_obj = test_viz.query_obj()
self.assertEqual([
'colA', 'colB', '__time__',
], query_obj['metrics'])
self.assertEqual([(
'__time__', True,
)], query_obj['orderby'])
def test_should_be_timeseries_raises_when_no_granularity(self):
datasource = self.get_datasource_mock()
form_data = {'include_time': True}
test_viz = viz.TableViz(datasource, form_data)
with self.assertRaises(Exception):
test_viz.should_be_timeseries()
class PairedTTestTestCase(SupersetTestCase):
def test_get_data_transforms_dataframe(self):
form_data = {
'groupby': ['groupA', 'groupB', 'groupC'],
'metrics': ['metric1', 'metric2', 'metric3'],
}
datasource = self.get_datasource_mock()
# Test data
raw = {}
raw[DTTM_ALIAS] = [100, 200, 300, 100, 200, 300, 100, 200, 300]
raw['groupA'] = ['a1', 'a1', 'a1', 'b1', 'b1', 'b1', 'c1', 'c1', 'c1']
raw['groupB'] = ['a2', 'a2', 'a2', 'b2', 'b2', 'b2', 'c2', 'c2', 'c2']
raw['groupC'] = ['a3', 'a3', 'a3', 'b3', 'b3', 'b3', 'c3', 'c3', 'c3']
raw['metric1'] = [1, 2, 3, 4, 5, 6, 7, 8, 9]
raw['metric2'] = [10, 20, 30, 40, 50, 60, 70, 80, 90]
raw['metric3'] = [100, 200, 300, 400, 500, 600, 700, 800, 900]
df = pd.DataFrame(raw)
pairedTTestViz = viz.viz_types['paired_ttest'](datasource, form_data)
data = pairedTTestViz.get_data(df)
# Check method correctly transforms data
expected = {
'metric1': [
{
'values': [
{'x': 100, 'y': 1},
{'x': 200, 'y': 2},
{'x': 300, 'y': 3}],
'group': ('a1', 'a2', 'a3'),
},
{
'values': [
{'x': 100, 'y': 4},
{'x': 200, 'y': 5},
{'x': 300, 'y': 6}],
'group': ('b1', 'b2', 'b3'),
},
{
'values': [
{'x': 100, 'y': 7},
{'x': 200, 'y': 8},
{'x': 300, 'y': 9}],
'group': ('c1', 'c2', 'c3'),
},
],
'metric2': [
{
'values': [
{'x': 100, 'y': 10},
{'x': 200, 'y': 20},
{'x': 300, 'y': 30}],
'group': ('a1', 'a2', 'a3'),
},
{
'values': [
{'x': 100, 'y': 40},
{'x': 200, 'y': 50},
{'x': 300, 'y': 60}],
'group': ('b1', 'b2', 'b3'),
},
{
'values': [
{'x': 100, 'y': 70},
{'x': 200, 'y': 80},
{'x': 300, 'y': 90}],
'group': ('c1', 'c2', 'c3'),
},
],
'metric3': [
{
'values': [
{'x': 100, 'y': 100},
{'x': 200, 'y': 200},
{'x': 300, 'y': 300}],
'group': ('a1', 'a2', 'a3'),
},
{
'values': [
{'x': 100, 'y': 400},
{'x': 200, 'y': 500},
{'x': 300, 'y': 600}],
'group': ('b1', 'b2', 'b3'),
},
{
'values': [
{'x': 100, 'y': 700},
{'x': 200, 'y': 800},
{'x': 300, 'y': 900}],
'group': ('c1', 'c2', 'c3'),
},
],
}
self.assertEqual(data, expected)
def test_get_data_empty_null_keys(self):
form_data = {
'groupby': [],
'metrics': ['', None],
}
datasource = self.get_datasource_mock()
# Test data
raw = {}
raw[DTTM_ALIAS] = [100, 200, 300]
raw[''] = [1, 2, 3]
raw[None] = [10, 20, 30]
df = pd.DataFrame(raw)
pairedTTestViz = viz.viz_types['paired_ttest'](datasource, form_data)
data = pairedTTestViz.get_data(df)
# Check method correctly transforms data
expected = {
'N/A': [
{
'values': [
{'x': 100, 'y': 1},
{'x': 200, 'y': 2},
{'x': 300, 'y': 3}],
'group': 'All',
},
],
'NULL': [
{
'values': [
{'x': 100, 'y': 10},
{'x': 200, 'y': 20},
{'x': 300, 'y': 30}],
'group': 'All',
},
],
}
self.assertEqual(data, expected)
class PartitionVizTestCase(SupersetTestCase):
@patch('superset.viz.BaseViz.query_obj')
def test_query_obj_time_series_option(self, super_query_obj):
datasource = self.get_datasource_mock()
form_data = {}
test_viz = viz.PartitionViz(datasource, form_data)
super_query_obj.return_value = {}
query_obj = test_viz.query_obj()
self.assertFalse(query_obj['is_timeseries'])
test_viz.form_data['time_series_option'] = 'agg_sum'
query_obj = test_viz.query_obj()
self.assertTrue(query_obj['is_timeseries'])
def test_levels_for_computes_levels(self):
raw = {}
raw[DTTM_ALIAS] = [100, 200, 300, 100, 200, 300, 100, 200, 300]
raw['groupA'] = ['a1', 'a1', 'a1', 'b1', 'b1', 'b1', 'c1', 'c1', 'c1']
raw['groupB'] = ['a2', 'a2', 'a2', 'b2', 'b2', 'b2', 'c2', 'c2', 'c2']
raw['groupC'] = ['a3', 'a3', 'a3', 'b3', 'b3', 'b3', 'c3', 'c3', 'c3']
raw['metric1'] = [1, 2, 3, 4, 5, 6, 7, 8, 9]
raw['metric2'] = [10, 20, 30, 40, 50, 60, 70, 80, 90]
raw['metric3'] = [100, 200, 300, 400, 500, 600, 700, 800, 900]
df = pd.DataFrame(raw)
groups = ['groupA', 'groupB', 'groupC']
time_op = 'agg_sum'
test_viz = viz.PartitionViz(Mock(), {})
levels = test_viz.levels_for(time_op, groups, df)
self.assertEqual(4, len(levels))
expected = {
DTTM_ALIAS: 1800,
'metric1': 45,
'metric2': 450,
'metric3': 4500,
}
self.assertEqual(expected, levels[0].to_dict())
expected = {
DTTM_ALIAS: {'a1': 600, 'b1': 600, 'c1': 600},
'metric1': {'a1': 6, 'b1': 15, 'c1': 24},
'metric2': {'a1': 60, 'b1': 150, 'c1': 240},
'metric3': {'a1': 600, 'b1': 1500, 'c1': 2400},
}
self.assertEqual(expected, levels[1].to_dict())
self.assertEqual(['groupA', 'groupB'], levels[2].index.names)
self.assertEqual(
['groupA', 'groupB', 'groupC'],
levels[3].index.names,
)
time_op = 'agg_mean'
levels = test_viz.levels_for(time_op, groups, df)
self.assertEqual(4, len(levels))
expected = {
DTTM_ALIAS: 200.0,
'metric1': 5.0,
'metric2': 50.0,
'metric3': 500.0,
}
self.assertEqual(expected, levels[0].to_dict())
expected = {
DTTM_ALIAS: {'a1': 200, 'c1': 200, 'b1': 200},
'metric1': {'a1': 2, 'b1': 5, 'c1': 8},
'metric2': {'a1': 20, 'b1': 50, 'c1': 80},
'metric3': {'a1': 200, 'b1': 500, 'c1': 800},
}
self.assertEqual(expected, levels[1].to_dict())
self.assertEqual(['groupA', 'groupB'], levels[2].index.names)
self.assertEqual(
['groupA', 'groupB', 'groupC'],
levels[3].index.names,
)
def test_levels_for_diff_computes_difference(self):
raw = {}
raw[DTTM_ALIAS] = [100, 200, 300, 100, 200, 300, 100, 200, 300]
raw['groupA'] = ['a1', 'a1', 'a1', 'b1', 'b1', 'b1', 'c1', 'c1', 'c1']
raw['groupB'] = ['a2', 'a2', 'a2', 'b2', 'b2', 'b2', 'c2', 'c2', 'c2']
raw['groupC'] = ['a3', 'a3', 'a3', 'b3', 'b3', 'b3', 'c3', 'c3', 'c3']
raw['metric1'] = [1, 2, 3, 4, 5, 6, 7, 8, 9]
raw['metric2'] = [10, 20, 30, 40, 50, 60, 70, 80, 90]
raw['metric3'] = [100, 200, 300, 400, 500, 600, 700, 800, 900]
df = pd.DataFrame(raw)
groups = ['groupA', 'groupB', 'groupC']
test_viz = viz.PartitionViz(Mock(), {})
time_op = 'point_diff'
levels = test_viz.levels_for_diff(time_op, groups, df)
expected = {
'metric1': 6,
'metric2': 60,
'metric3': 600,
}
self.assertEqual(expected, levels[0].to_dict())
expected = {
'metric1': {'a1': 2, 'b1': 2, 'c1': 2},
'metric2': {'a1': 20, 'b1': 20, 'c1': 20},
'metric3': {'a1': 200, 'b1': 200, 'c1': 200},
}
self.assertEqual(expected, levels[1].to_dict())
self.assertEqual(4, len(levels))
self.assertEqual(['groupA', 'groupB', 'groupC'], levels[3].index.names)
def test_levels_for_time_calls_process_data_and_drops_cols(self):
raw = {}
raw[DTTM_ALIAS] = [100, 200, 300, 100, 200, 300, 100, 200, 300]
raw['groupA'] = ['a1', 'a1', 'a1', 'b1', 'b1', 'b1', 'c1', 'c1', 'c1']
raw['groupB'] = ['a2', 'a2', 'a2', 'b2', 'b2', 'b2', 'c2', 'c2', 'c2']
raw['groupC'] = ['a3', 'a3', 'a3', 'b3', 'b3', 'b3', 'c3', 'c3', 'c3']
raw['metric1'] = [1, 2, 3, 4, 5, 6, 7, 8, 9]
raw['metric2'] = [10, 20, 30, 40, 50, 60, 70, 80, 90]
raw['metric3'] = [100, 200, 300, 400, 500, 600, 700, 800, 900]
df = pd.DataFrame(raw)
groups = ['groupA', 'groupB', 'groupC']
test_viz = viz.PartitionViz(Mock(), {'groupby': groups})
def return_args(df_drop, aggregate):
return df_drop
test_viz.process_data = Mock(side_effect=return_args)
levels = test_viz.levels_for_time(groups, df)
self.assertEqual(4, len(levels))
cols = [DTTM_ALIAS, 'metric1', 'metric2', 'metric3']
self.assertEqual(sorted(cols), sorted(levels[0].columns.tolist()))
cols += ['groupA']
self.assertEqual(sorted(cols), sorted(levels[1].columns.tolist()))
cols += ['groupB']
self.assertEqual(sorted(cols), sorted(levels[2].columns.tolist()))
cols += ['groupC']
self.assertEqual(sorted(cols), sorted(levels[3].columns.tolist()))
self.assertEqual(4, len(test_viz.process_data.mock_calls))
def test_nest_values_returns_hierarchy(self):
raw = {}
raw['groupA'] = ['a1', 'a1', 'a1', 'b1', 'b1', 'b1', 'c1', 'c1', 'c1']
raw['groupB'] = ['a2', 'a2', 'a2', 'b2', 'b2', 'b2', 'c2', 'c2', 'c2']
raw['groupC'] = ['a3', 'a3', 'a3', 'b3', 'b3', 'b3', 'c3', 'c3', 'c3']
raw['metric1'] = [1, 2, 3, 4, 5, 6, 7, 8, 9]
raw['metric2'] = [10, 20, 30, 40, 50, 60, 70, 80, 90]
raw['metric3'] = [100, 200, 300, 400, 500, 600, 700, 800, 900]
df = pd.DataFrame(raw)
test_viz = viz.PartitionViz(Mock(), {})
groups = ['groupA', 'groupB', 'groupC']
levels = test_viz.levels_for('agg_sum', groups, df)
nest = test_viz.nest_values(levels)
self.assertEqual(3, len(nest))
for i in range(0, 3):
self.assertEqual('metric' + str(i + 1), nest[i]['name'])
self.assertEqual(3, len(nest[0]['children']))
self.assertEqual(1, len(nest[0]['children'][0]['children']))
self.assertEqual(1, len(nest[0]['children'][0]['children'][0]['children']))
def test_nest_procs_returns_hierarchy(self):
raw = {}
raw[DTTM_ALIAS] = [100, 200, 300, 100, 200, 300, 100, 200, 300]
raw['groupA'] = ['a1', 'a1', 'a1', 'b1', 'b1', 'b1', 'c1', 'c1', 'c1']
raw['groupB'] = ['a2', 'a2', 'a2', 'b2', 'b2', 'b2', 'c2', 'c2', 'c2']
raw['groupC'] = ['a3', 'a3', 'a3', 'b3', 'b3', 'b3', 'c3', 'c3', 'c3']
raw['metric1'] = [1, 2, 3, 4, 5, 6, 7, 8, 9]
raw['metric2'] = [10, 20, 30, 40, 50, 60, 70, 80, 90]
raw['metric3'] = [100, 200, 300, 400, 500, 600, 700, 800, 900]
df = pd.DataFrame(raw)
test_viz = viz.PartitionViz(Mock(), {})
groups = ['groupA', 'groupB', 'groupC']
metrics = ['metric1', 'metric2', 'metric3']
procs = {}
for i in range(0, 4):
df_drop = df.drop(groups[i:], 1)
pivot = df_drop.pivot_table(
index=DTTM_ALIAS,
columns=groups[:i],
values=metrics,
)
procs[i] = pivot
nest = test_viz.nest_procs(procs)
self.assertEqual(3, len(nest))
for i in range(0, 3):
self.assertEqual('metric' + str(i + 1), nest[i]['name'])
self.assertEqual(None, nest[i].get('val'))
self.assertEqual(3, len(nest[0]['children']))
self.assertEqual(3, len(nest[0]['children'][0]['children']))
self.assertEqual(1, len(nest[0]['children'][0]['children'][0]['children']))
self.assertEqual(
1,
len(nest[0]['children']
[0]['children']
[0]['children']
[0]['children']),
)
def test_get_data_calls_correct_method(self):
test_viz = viz.PartitionViz(Mock(), {})
df = Mock()
with self.assertRaises(ValueError):
test_viz.get_data(df)
test_viz.levels_for = Mock(return_value=1)
test_viz.nest_values = Mock(return_value=1)
test_viz.form_data['groupby'] = ['groups']
test_viz.form_data['time_series_option'] = 'not_time'
test_viz.get_data(df)
self.assertEqual('agg_sum', test_viz.levels_for.mock_calls[0][1][0])
test_viz.form_data['time_series_option'] = 'agg_sum'
test_viz.get_data(df)
self.assertEqual('agg_sum', test_viz.levels_for.mock_calls[1][1][0])
test_viz.form_data['time_series_option'] = 'agg_mean'
test_viz.get_data(df)
self.assertEqual('agg_mean', test_viz.levels_for.mock_calls[2][1][0])
test_viz.form_data['time_series_option'] = 'point_diff'
test_viz.levels_for_diff = Mock(return_value=1)
test_viz.get_data(df)
self.assertEqual('point_diff', test_viz.levels_for_diff.mock_calls[0][1][0])
test_viz.form_data['time_series_option'] = 'point_percent'
test_viz.get_data(df)
self.assertEqual('point_percent', test_viz.levels_for_diff.mock_calls[1][1][0])
test_viz.form_data['time_series_option'] = 'point_factor'
test_viz.get_data(df)
self.assertEqual('point_factor', test_viz.levels_for_diff.mock_calls[2][1][0])
test_viz.levels_for_time = Mock(return_value=1)
test_viz.nest_procs = Mock(return_value=1)
test_viz.form_data['time_series_option'] = 'adv_anal'
test_viz.get_data(df)
self.assertEqual(1, len(test_viz.levels_for_time.mock_calls))
self.assertEqual(1, len(test_viz.nest_procs.mock_calls))
test_viz.form_data['time_series_option'] = 'time_series'
test_viz.get_data(df)
self.assertEqual('agg_sum', test_viz.levels_for.mock_calls[3][1][0])
self.assertEqual(7, len(test_viz.nest_values.mock_calls))
class RoseVisTestCase(SupersetTestCase):
def test_rose_vis_get_data(self):
raw = {}
t1 = pd.Timestamp('2000')
t2 = pd.Timestamp('2002')
t3 = pd.Timestamp('2004')
raw[DTTM_ALIAS] = [t1, t2, t3, t1, t2, t3, t1, t2, t3]
raw['groupA'] = ['a1', 'a1', 'a1', 'b1', 'b1', 'b1', 'c1', 'c1', 'c1']
raw['groupB'] = ['a2', 'a2', 'a2', 'b2', 'b2', 'b2', 'c2', 'c2', 'c2']
raw['groupC'] = ['a3', 'a3', 'a3', 'b3', 'b3', 'b3', 'c3', 'c3', 'c3']
raw['metric1'] = [1, 2, 3, 4, 5, 6, 7, 8, 9]
df = pd.DataFrame(raw)
fd = {
'metrics': ['metric1'],
'groupby': ['groupA'],
}
test_viz = viz.RoseViz(Mock(), fd)
test_viz.metrics = fd['metrics']
res = test_viz.get_data(df)
expected = {
946684800000000000: [
{'time': t1, 'value': 1, 'key': ('a1',), 'name': ('a1',)},
{'time': t1, 'value': 4, 'key': ('b1',), 'name': ('b1',)},
{'time': t1, 'value': 7, 'key': ('c1',), 'name': ('c1',)},
],
1009843200000000000: [
{'time': t2, 'value': 2, 'key': ('a1',), 'name': ('a1',)},
{'time': t2, 'value': 5, 'key': ('b1',), 'name': ('b1',)},
{'time': t2, 'value': 8, 'key': ('c1',), 'name': ('c1',)},
],
1072915200000000000: [
{'time': t3, 'value': 3, 'key': ('a1',), 'name': ('a1',)},
{'time': t3, 'value': 6, 'key': ('b1',), 'name': ('b1',)},
{'time': t3, 'value': 9, 'key': ('c1',), 'name': ('c1',)},
],
}
self.assertEqual(expected, res)
class TimeSeriesTableVizTestCase(SupersetTestCase):
def test_get_data_metrics(self):
form_data = {
'metrics': ['sum__A', 'count'],
'groupby': [],
}
datasource = self.get_datasource_mock()
raw = {}
t1 = pd.Timestamp('2000')
t2 = pd.Timestamp('2002')
raw[DTTM_ALIAS] = [t1, t2]
raw['sum__A'] = [15, 20]
raw['count'] = [6, 7]
df = pd.DataFrame(raw)
test_viz = viz.TimeTableViz(datasource, form_data)
data = test_viz.get_data(df)
# Check method correctly transforms data
self.assertEqual(set(['count', 'sum__A']), set(data['columns']))
time_format = '%Y-%m-%d %H:%M:%S'
expected = {
t1.strftime(time_format): {
'sum__A': 15,
'count': 6,
},
t2.strftime(time_format): {
'sum__A': 20,
'count': 7,
},
}
self.assertEqual(expected, data['records'])
def test_get_data_group_by(self):
form_data = {
'metrics': ['sum__A'],
'groupby': ['groupby1'],
}
datasource = self.get_datasource_mock()
raw = {}
t1 = pd.Timestamp('2000')
t2 = pd.Timestamp('2002')
raw[DTTM_ALIAS] = [t1, t1, t1, t2, t2, t2]
raw['sum__A'] = [15, 20, 25, 30, 35, 40]
raw['groupby1'] = ['a1', 'a2', 'a3', 'a1', 'a2', 'a3']
df = pd.DataFrame(raw)
test_viz = viz.TimeTableViz(datasource, form_data)
data = test_viz.get_data(df)
# Check method correctly transforms data
self.assertEqual(set(['a1', 'a2', 'a3']), set(data['columns']))
time_format = '%Y-%m-%d %H:%M:%S'
expected = {
t1.strftime(time_format): {
'a1': 15,
'a2': 20,
'a3': 25,
},
t2.strftime(time_format): {
'a1': 30,
'a2': 35,
'a3': 40,
},
}
self.assertEqual(expected, data['records'])
@patch('superset.viz.BaseViz.query_obj')
def test_query_obj_throws_metrics_and_groupby(self, super_query_obj):
datasource = self.get_datasource_mock()
form_data = {
'groupby': ['a'],
}
super_query_obj.return_value = {}
test_viz = viz.TimeTableViz(datasource, form_data)
with self.assertRaises(Exception):
test_viz.query_obj()
form_data['metrics'] = ['x', 'y']
test_viz = viz.TimeTableViz(datasource, form_data)
with self.assertRaises(Exception):
test_viz.query_obj()
class BaseDeckGLVizTestCase(SupersetTestCase):
def test_get_metrics(self):
form_data = load_fixture('deck_path_form_data.json')
datasource = self.get_datasource_mock()
test_viz_deckgl = viz.BaseDeckGLViz(datasource, form_data)
result = test_viz_deckgl.get_metrics()
assert result == [form_data.get('size')]
form_data = {}
test_viz_deckgl = viz.BaseDeckGLViz(datasource, form_data)
result = test_viz_deckgl.get_metrics()
assert result == []
def test_scatterviz_get_metrics(self):
form_data = load_fixture('deck_path_form_data.json')
datasource = self.get_datasource_mock()
form_data = {}
test_viz_deckgl = viz.DeckScatterViz(datasource, form_data)
test_viz_deckgl.point_radius_fixed = {'type': 'metric', 'value': 'int'}
result = test_viz_deckgl.get_metrics()
assert result == ['int']
form_data = {}
test_viz_deckgl = viz.DeckScatterViz(datasource, form_data)
test_viz_deckgl.point_radius_fixed = {}
result = test_viz_deckgl.get_metrics()
assert result is None
def test_get_js_columns(self):
form_data = load_fixture('deck_path_form_data.json')
datasource = self.get_datasource_mock()
mock_d = {
'a': 'dummy1',
'b': 'dummy2',
'c': 'dummy3',
}
test_viz_deckgl = viz.BaseDeckGLViz(datasource, form_data)
result = test_viz_deckgl.get_js_columns(mock_d)
assert result == {'color': None}
def test_get_properties(self):
mock_d = {}
form_data = load_fixture('deck_path_form_data.json')
datasource = self.get_datasource_mock()
test_viz_deckgl = viz.BaseDeckGLViz(datasource, form_data)
with self.assertRaises(NotImplementedError) as context:
test_viz_deckgl.get_properties(mock_d)
self.assertTrue('' in str(context.exception))
def test_process_spatial_query_obj(self):
form_data = load_fixture('deck_path_form_data.json')
datasource = self.get_datasource_mock()
mock_key = 'spatial_key'
mock_gb = []
test_viz_deckgl = viz.BaseDeckGLViz(datasource, form_data)
with self.assertRaises(ValueError) as context:
test_viz_deckgl.process_spatial_query_obj(mock_key, mock_gb)
self.assertTrue('Bad spatial key' in str(context.exception))
test_form_data = {
'latlong_key': {
'type': 'latlong',
'lonCol': 'lon',
'latCol': 'lat',
},
'delimited_key': {
'type': 'delimited',
'lonlatCol': 'lonlat',
},
'geohash_key': {
'type': 'geohash',
'geohashCol': 'geo',
},
}
datasource = self.get_datasource_mock()
expected_results = {
'latlong_key': ['lon', 'lat'],
'delimited_key': ['lonlat'],
'geohash_key': ['geo'],
}
for mock_key in ['latlong_key', 'delimited_key', 'geohash_key']:
mock_gb = []
test_viz_deckgl = viz.BaseDeckGLViz(datasource, test_form_data)
test_viz_deckgl.process_spatial_query_obj(mock_key, mock_gb)
assert expected_results.get(mock_key) == mock_gb
def test_geojson_query_obj(self):
form_data = load_fixture('deck_geojson_form_data.json')
datasource = self.get_datasource_mock()
test_viz_deckgl = viz.DeckGeoJson(datasource, form_data)
results = test_viz_deckgl.query_obj()
assert results['metrics'] == []
assert results['groupby'] == []
assert results['columns'] == ['test_col']
def test_parse_coordinates(self):
form_data = load_fixture('deck_path_form_data.json')
datasource = self.get_datasource_mock()
viz_instance = viz.BaseDeckGLViz(datasource, form_data)
coord = viz_instance.parse_coordinates('1.23, 3.21')
self.assertEquals(coord, (1.23, 3.21))
coord = viz_instance.parse_coordinates('1.23 3.21')
self.assertEquals(coord, (1.23, 3.21))
self.assertEquals(viz_instance.parse_coordinates(None), None)
self.assertEquals(viz_instance.parse_coordinates(''), None)
def test_parse_coordinates_raises(self):
form_data = load_fixture('deck_path_form_data.json')
datasource = self.get_datasource_mock()
test_viz_deckgl = viz.BaseDeckGLViz(datasource, form_data)
with self.assertRaises(SpatialException):
test_viz_deckgl.parse_coordinates('NULL')
with self.assertRaises(SpatialException):
test_viz_deckgl.parse_coordinates('fldkjsalkj,fdlaskjfjadlksj')
@patch('superset.utils.core.uuid.uuid4')
def test_filter_nulls(self, mock_uuid4):
mock_uuid4.return_value = uuid.UUID('12345678123456781234567812345678')
test_form_data = {
'latlong_key': {
'type': 'latlong',
'lonCol': 'lon',
'latCol': 'lat',
},
'delimited_key': {
'type': 'delimited',
'lonlatCol': 'lonlat',
},
'geohash_key': {
'type': 'geohash',
'geohashCol': 'geo',
},
}
datasource = self.get_datasource_mock()
expected_results = {
'latlong_key': [{
'clause': 'WHERE',
'expressionType': 'SIMPLE',
'filterOptionName': '12345678-1234-5678-1234-567812345678',
'comparator': '',
'operator': 'IS NOT NULL',
'subject': 'lat',
}, {
'clause': 'WHERE',
'expressionType': 'SIMPLE',
'filterOptionName': '12345678-1234-5678-1234-567812345678',
'comparator': '',
'operator': 'IS NOT NULL',
'subject': 'lon',
}],
'delimited_key': [{
'clause': 'WHERE',
'expressionType': 'SIMPLE',
'filterOptionName': '12345678-1234-5678-1234-567812345678',
'comparator': '',
'operator': 'IS NOT NULL',
'subject': 'lonlat',
}],
'geohash_key': [{
'clause': 'WHERE',
'expressionType': 'SIMPLE',
'filterOptionName': '12345678-1234-5678-1234-567812345678',
'comparator': '',
'operator': 'IS NOT NULL',
'subject': 'geo',
}],
}
for mock_key in ['latlong_key', 'delimited_key', 'geohash_key']:
test_viz_deckgl = viz.BaseDeckGLViz(
datasource, test_form_data.copy())
test_viz_deckgl.spatial_control_keys = [mock_key]
test_viz_deckgl.add_null_filters()
adhoc_filters = test_viz_deckgl.form_data['adhoc_filters']
assert expected_results.get(mock_key) == adhoc_filters
class TimeSeriesVizTestCase(SupersetTestCase):
def test_timeseries_unicode_data(self):
datasource = self.get_datasource_mock()
form_data = {
'groupby': ['name'],
'metrics': ['sum__payout'],
}
raw = {}
raw['name'] = [
'Real Madrid C.F.🇺🇸🇬🇧', 'Real Madrid C.F.🇺🇸🇬🇧',
'Real Madrid Basket', 'Real Madrid Basket',
]
raw['__timestamp'] = [
'2018-02-20T00:00:00', '2018-03-09T00:00:00',
'2018-02-20T00:00:00', '2018-03-09T00:00:00',
]
raw['sum__payout'] = [2, 2, 4, 4]
df = pd.DataFrame(raw)
test_viz = viz.NVD3TimeSeriesViz(datasource, form_data)
viz_data = {}
viz_data = test_viz.get_data(df)
expected = [
{u'values': [
{u'y': 4, u'x': u'2018-02-20T00:00:00'},
{u'y': 4, u'x': u'2018-03-09T00:00:00'}],
u'key': (u'Real Madrid Basket',)},
{u'values': [
{u'y': 2, u'x': u'2018-02-20T00:00:00'},
{u'y': 2, u'x': u'2018-03-09T00:00:00'}],
u'key': (u'Real Madrid C.F.\U0001f1fa\U0001f1f8\U0001f1ec\U0001f1e7',)},
]
self.assertEqual(expected, viz_data)
| apache-2.0 |
arborh/tensorflow | tensorflow/lite/experimental/micro/examples/micro_speech/apollo3/captured_data_to_wav.py | 11 | 1442 | # Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Converts values pulled from the microcontroller into audio files."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import struct
# import matplotlib.pyplot as plt
import numpy as np
import soundfile as sf
def new_data_to_array(fn):
vals = []
with open(fn) as f:
for n, line in enumerate(f):
if n != 0:
vals.extend([int(v, 16) for v in line.split()])
b = ''.join(map(chr, vals))
y = struct.unpack('<' + 'h' * int(len(b) / 2), b)
return y
data = 'captured_data.txt'
values = np.array(new_data_to_array(data)).astype(float)
# plt.plot(values, 'o-')
# plt.show(block=False)
wav = values / np.max(np.abs(values))
sf.write('captured_data.wav', wav, 16000)
| apache-2.0 |
fbagirov/scikit-learn | sklearn/neighbors/classification.py | 106 | 13987 | """Nearest Neighbor Classification"""
# Authors: Jake Vanderplas <[email protected]>
# Fabian Pedregosa <[email protected]>
# Alexandre Gramfort <[email protected]>
# Sparseness support by Lars Buitinck <[email protected]>
# Multi-output support by Arnaud Joly <[email protected]>
#
# License: BSD 3 clause (C) INRIA, University of Amsterdam
import numpy as np
from scipy import stats
from ..utils.extmath import weighted_mode
from .base import \
_check_weights, _get_weights, \
NeighborsBase, KNeighborsMixin,\
RadiusNeighborsMixin, SupervisedIntegerMixin
from ..base import ClassifierMixin
from ..utils import check_array
class KNeighborsClassifier(NeighborsBase, KNeighborsMixin,
SupervisedIntegerMixin, ClassifierMixin):
"""Classifier implementing the k-nearest neighbors vote.
Read more in the :ref:`User Guide <classification>`.
Parameters
----------
n_neighbors : int, optional (default = 5)
Number of neighbors to use by default for :meth:`k_neighbors` queries.
weights : str or callable
weight function used in prediction. Possible values:
- 'uniform' : uniform weights. All points in each neighborhood
are weighted equally.
- 'distance' : weight points by the inverse of their distance.
in this case, closer neighbors of a query point will have a
greater influence than neighbors which are further away.
- [callable] : a user-defined function which accepts an
array of distances, and returns an array of the same shape
containing the weights.
Uniform weights are used by default.
algorithm : {'auto', 'ball_tree', 'kd_tree', 'brute'}, optional
Algorithm used to compute the nearest neighbors:
- 'ball_tree' will use :class:`BallTree`
- 'kd_tree' will use :class:`KDTree`
- 'brute' will use a brute-force search.
- 'auto' will attempt to decide the most appropriate algorithm
based on the values passed to :meth:`fit` method.
Note: fitting on sparse input will override the setting of
this parameter, using brute force.
leaf_size : int, optional (default = 30)
Leaf size passed to BallTree or KDTree. This can affect the
speed of the construction and query, as well as the memory
required to store the tree. The optimal value depends on the
nature of the problem.
metric : string or DistanceMetric object (default = 'minkowski')
the distance metric to use for the tree. The default metric is
minkowski, and with p=2 is equivalent to the standard Euclidean
metric. See the documentation of the DistanceMetric class for a
list of available metrics.
p : integer, optional (default = 2)
Power parameter for the Minkowski metric. When p = 1, this is
equivalent to using manhattan_distance (l1), and euclidean_distance
(l2) for p = 2. For arbitrary p, minkowski_distance (l_p) is used.
metric_params: dict, optional (default = None)
additional keyword arguments for the metric function.
Examples
--------
>>> X = [[0], [1], [2], [3]]
>>> y = [0, 0, 1, 1]
>>> from sklearn.neighbors import KNeighborsClassifier
>>> neigh = KNeighborsClassifier(n_neighbors=3)
>>> neigh.fit(X, y) # doctest: +ELLIPSIS
KNeighborsClassifier(...)
>>> print(neigh.predict([[1.1]]))
[0]
>>> print(neigh.predict_proba([[0.9]]))
[[ 0.66666667 0.33333333]]
See also
--------
RadiusNeighborsClassifier
KNeighborsRegressor
RadiusNeighborsRegressor
NearestNeighbors
Notes
-----
See :ref:`Nearest Neighbors <neighbors>` in the online documentation
for a discussion of the choice of ``algorithm`` and ``leaf_size``.
.. warning::
Regarding the Nearest Neighbors algorithms, if it is found that two
neighbors, neighbor `k+1` and `k`, have identical distances but
but different labels, the results will depend on the ordering of the
training data.
http://en.wikipedia.org/wiki/K-nearest_neighbor_algorithm
"""
def __init__(self, n_neighbors=5,
weights='uniform', algorithm='auto', leaf_size=30,
p=2, metric='minkowski', metric_params=None, **kwargs):
self._init_params(n_neighbors=n_neighbors,
algorithm=algorithm,
leaf_size=leaf_size, metric=metric, p=p,
metric_params=metric_params, **kwargs)
self.weights = _check_weights(weights)
def predict(self, X):
"""Predict the class labels for the provided data
Parameters
----------
X : array of shape [n_samples, n_features]
A 2-D array representing the test points.
Returns
-------
y : array of shape [n_samples] or [n_samples, n_outputs]
Class labels for each data sample.
"""
X = check_array(X, accept_sparse='csr')
neigh_dist, neigh_ind = self.kneighbors(X)
classes_ = self.classes_
_y = self._y
if not self.outputs_2d_:
_y = self._y.reshape((-1, 1))
classes_ = [self.classes_]
n_outputs = len(classes_)
n_samples = X.shape[0]
weights = _get_weights(neigh_dist, self.weights)
y_pred = np.empty((n_samples, n_outputs), dtype=classes_[0].dtype)
for k, classes_k in enumerate(classes_):
if weights is None:
mode, _ = stats.mode(_y[neigh_ind, k], axis=1)
else:
mode, _ = weighted_mode(_y[neigh_ind, k], weights, axis=1)
mode = np.asarray(mode.ravel(), dtype=np.intp)
y_pred[:, k] = classes_k.take(mode)
if not self.outputs_2d_:
y_pred = y_pred.ravel()
return y_pred
def predict_proba(self, X):
"""Return probability estimates for the test data X.
Parameters
----------
X : array, shape = (n_samples, n_features)
A 2-D array representing the test points.
Returns
-------
p : array of shape = [n_samples, n_classes], or a list of n_outputs
of such arrays if n_outputs > 1.
The class probabilities of the input samples. Classes are ordered
by lexicographic order.
"""
X = check_array(X, accept_sparse='csr')
neigh_dist, neigh_ind = self.kneighbors(X)
classes_ = self.classes_
_y = self._y
if not self.outputs_2d_:
_y = self._y.reshape((-1, 1))
classes_ = [self.classes_]
n_samples = X.shape[0]
weights = _get_weights(neigh_dist, self.weights)
if weights is None:
weights = np.ones_like(neigh_ind)
all_rows = np.arange(X.shape[0])
probabilities = []
for k, classes_k in enumerate(classes_):
pred_labels = _y[:, k][neigh_ind]
proba_k = np.zeros((n_samples, classes_k.size))
# a simple ':' index doesn't work right
for i, idx in enumerate(pred_labels.T): # loop is O(n_neighbors)
proba_k[all_rows, idx] += weights[:, i]
# normalize 'votes' into real [0,1] probabilities
normalizer = proba_k.sum(axis=1)[:, np.newaxis]
normalizer[normalizer == 0.0] = 1.0
proba_k /= normalizer
probabilities.append(proba_k)
if not self.outputs_2d_:
probabilities = probabilities[0]
return probabilities
class RadiusNeighborsClassifier(NeighborsBase, RadiusNeighborsMixin,
SupervisedIntegerMixin, ClassifierMixin):
"""Classifier implementing a vote among neighbors within a given radius
Read more in the :ref:`User Guide <classification>`.
Parameters
----------
radius : float, optional (default = 1.0)
Range of parameter space to use by default for :meth`radius_neighbors`
queries.
weights : str or callable
weight function used in prediction. Possible values:
- 'uniform' : uniform weights. All points in each neighborhood
are weighted equally.
- 'distance' : weight points by the inverse of their distance.
in this case, closer neighbors of a query point will have a
greater influence than neighbors which are further away.
- [callable] : a user-defined function which accepts an
array of distances, and returns an array of the same shape
containing the weights.
Uniform weights are used by default.
algorithm : {'auto', 'ball_tree', 'kd_tree', 'brute'}, optional
Algorithm used to compute the nearest neighbors:
- 'ball_tree' will use :class:`BallTree`
- 'kd_tree' will use :class:`KDtree`
- 'brute' will use a brute-force search.
- 'auto' will attempt to decide the most appropriate algorithm
based on the values passed to :meth:`fit` method.
Note: fitting on sparse input will override the setting of
this parameter, using brute force.
leaf_size : int, optional (default = 30)
Leaf size passed to BallTree or KDTree. This can affect the
speed of the construction and query, as well as the memory
required to store the tree. The optimal value depends on the
nature of the problem.
metric : string or DistanceMetric object (default='minkowski')
the distance metric to use for the tree. The default metric is
minkowski, and with p=2 is equivalent to the standard Euclidean
metric. See the documentation of the DistanceMetric class for a
list of available metrics.
p : integer, optional (default = 2)
Power parameter for the Minkowski metric. When p = 1, this is
equivalent to using manhattan_distance (l1), and euclidean_distance
(l2) for p = 2. For arbitrary p, minkowski_distance (l_p) is used.
outlier_label : int, optional (default = None)
Label, which is given for outlier samples (samples with no
neighbors on given radius).
If set to None, ValueError is raised, when outlier is detected.
metric_params: dict, optional (default = None)
additional keyword arguments for the metric function.
Examples
--------
>>> X = [[0], [1], [2], [3]]
>>> y = [0, 0, 1, 1]
>>> from sklearn.neighbors import RadiusNeighborsClassifier
>>> neigh = RadiusNeighborsClassifier(radius=1.0)
>>> neigh.fit(X, y) # doctest: +ELLIPSIS
RadiusNeighborsClassifier(...)
>>> print(neigh.predict([[1.5]]))
[0]
See also
--------
KNeighborsClassifier
RadiusNeighborsRegressor
KNeighborsRegressor
NearestNeighbors
Notes
-----
See :ref:`Nearest Neighbors <neighbors>` in the online documentation
for a discussion of the choice of ``algorithm`` and ``leaf_size``.
http://en.wikipedia.org/wiki/K-nearest_neighbor_algorithm
"""
def __init__(self, radius=1.0, weights='uniform',
algorithm='auto', leaf_size=30, p=2, metric='minkowski',
outlier_label=None, metric_params=None, **kwargs):
self._init_params(radius=radius,
algorithm=algorithm,
leaf_size=leaf_size,
metric=metric, p=p, metric_params=metric_params,
**kwargs)
self.weights = _check_weights(weights)
self.outlier_label = outlier_label
def predict(self, X):
"""Predict the class labels for the provided data
Parameters
----------
X : array of shape [n_samples, n_features]
A 2-D array representing the test points.
Returns
-------
y : array of shape [n_samples] or [n_samples, n_outputs]
Class labels for each data sample.
"""
X = check_array(X, accept_sparse='csr')
n_samples = X.shape[0]
neigh_dist, neigh_ind = self.radius_neighbors(X)
inliers = [i for i, nind in enumerate(neigh_ind) if len(nind) != 0]
outliers = [i for i, nind in enumerate(neigh_ind) if len(nind) == 0]
classes_ = self.classes_
_y = self._y
if not self.outputs_2d_:
_y = self._y.reshape((-1, 1))
classes_ = [self.classes_]
n_outputs = len(classes_)
if self.outlier_label is not None:
neigh_dist[outliers] = 1e-6
elif outliers:
raise ValueError('No neighbors found for test samples %r, '
'you can try using larger radius, '
'give a label for outliers, '
'or consider removing them from your dataset.'
% outliers)
weights = _get_weights(neigh_dist, self.weights)
y_pred = np.empty((n_samples, n_outputs), dtype=classes_[0].dtype)
for k, classes_k in enumerate(classes_):
pred_labels = np.array([_y[ind, k] for ind in neigh_ind],
dtype=object)
if weights is None:
mode = np.array([stats.mode(pl)[0]
for pl in pred_labels[inliers]], dtype=np.int)
else:
mode = np.array([weighted_mode(pl, w)[0]
for (pl, w)
in zip(pred_labels[inliers], weights)],
dtype=np.int)
mode = mode.ravel()
y_pred[inliers, k] = classes_k.take(mode)
if outliers:
y_pred[outliers, :] = self.outlier_label
if not self.outputs_2d_:
y_pred = y_pred.ravel()
return y_pred
| bsd-3-clause |
ClimbsRocks/scikit-learn | sklearn/linear_model/ridge.py | 12 | 50402 | """
Ridge regression
"""
# Author: Mathieu Blondel <[email protected]>
# Reuben Fletcher-Costin <[email protected]>
# Fabian Pedregosa <[email protected]>
# Michael Eickenberg <[email protected]>
# License: BSD 3 clause
from abc import ABCMeta, abstractmethod
import warnings
import numpy as np
from scipy import linalg
from scipy import sparse
from scipy.sparse import linalg as sp_linalg
from .base import LinearClassifierMixin, LinearModel, _rescale_data
from .sag import sag_solver
from ..base import RegressorMixin
from ..utils.extmath import safe_sparse_dot
from ..utils.extmath import row_norms
from ..utils import check_X_y
from ..utils import check_array
from ..utils import check_consistent_length
from ..utils import compute_sample_weight
from ..utils import column_or_1d
from ..preprocessing import LabelBinarizer
from ..model_selection import GridSearchCV
from ..externals import six
from ..metrics.scorer import check_scoring
def _solve_sparse_cg(X, y, alpha, max_iter=None, tol=1e-3, verbose=0):
n_samples, n_features = X.shape
X1 = sp_linalg.aslinearoperator(X)
coefs = np.empty((y.shape[1], n_features))
if n_features > n_samples:
def create_mv(curr_alpha):
def _mv(x):
return X1.matvec(X1.rmatvec(x)) + curr_alpha * x
return _mv
else:
def create_mv(curr_alpha):
def _mv(x):
return X1.rmatvec(X1.matvec(x)) + curr_alpha * x
return _mv
for i in range(y.shape[1]):
y_column = y[:, i]
mv = create_mv(alpha[i])
if n_features > n_samples:
# kernel ridge
# w = X.T * inv(X X^t + alpha*Id) y
C = sp_linalg.LinearOperator(
(n_samples, n_samples), matvec=mv, dtype=X.dtype)
coef, info = sp_linalg.cg(C, y_column, tol=tol)
coefs[i] = X1.rmatvec(coef)
else:
# linear ridge
# w = inv(X^t X + alpha*Id) * X.T y
y_column = X1.rmatvec(y_column)
C = sp_linalg.LinearOperator(
(n_features, n_features), matvec=mv, dtype=X.dtype)
coefs[i], info = sp_linalg.cg(C, y_column, maxiter=max_iter,
tol=tol)
if info < 0:
raise ValueError("Failed with error code %d" % info)
if max_iter is None and info > 0 and verbose:
warnings.warn("sparse_cg did not converge after %d iterations." %
info)
return coefs
def _solve_lsqr(X, y, alpha, max_iter=None, tol=1e-3):
n_samples, n_features = X.shape
coefs = np.empty((y.shape[1], n_features))
n_iter = np.empty(y.shape[1], dtype=np.int32)
# According to the lsqr documentation, alpha = damp^2.
sqrt_alpha = np.sqrt(alpha)
for i in range(y.shape[1]):
y_column = y[:, i]
info = sp_linalg.lsqr(X, y_column, damp=sqrt_alpha[i],
atol=tol, btol=tol, iter_lim=max_iter)
coefs[i] = info[0]
n_iter[i] = info[2]
return coefs, n_iter
def _solve_cholesky(X, y, alpha):
# w = inv(X^t X + alpha*Id) * X.T y
n_samples, n_features = X.shape
n_targets = y.shape[1]
A = safe_sparse_dot(X.T, X, dense_output=True)
Xy = safe_sparse_dot(X.T, y, dense_output=True)
one_alpha = np.array_equal(alpha, len(alpha) * [alpha[0]])
if one_alpha:
A.flat[::n_features + 1] += alpha[0]
return linalg.solve(A, Xy, sym_pos=True,
overwrite_a=True).T
else:
coefs = np.empty([n_targets, n_features])
for coef, target, current_alpha in zip(coefs, Xy.T, alpha):
A.flat[::n_features + 1] += current_alpha
coef[:] = linalg.solve(A, target, sym_pos=True,
overwrite_a=False).ravel()
A.flat[::n_features + 1] -= current_alpha
return coefs
def _solve_cholesky_kernel(K, y, alpha, sample_weight=None, copy=False):
# dual_coef = inv(X X^t + alpha*Id) y
n_samples = K.shape[0]
n_targets = y.shape[1]
if copy:
K = K.copy()
alpha = np.atleast_1d(alpha)
one_alpha = (alpha == alpha[0]).all()
has_sw = isinstance(sample_weight, np.ndarray) \
or sample_weight not in [1.0, None]
if has_sw:
# Unlike other solvers, we need to support sample_weight directly
# because K might be a pre-computed kernel.
sw = np.sqrt(np.atleast_1d(sample_weight))
y = y * sw[:, np.newaxis]
K *= np.outer(sw, sw)
if one_alpha:
# Only one penalty, we can solve multi-target problems in one time.
K.flat[::n_samples + 1] += alpha[0]
try:
# Note: we must use overwrite_a=False in order to be able to
# use the fall-back solution below in case a LinAlgError
# is raised
dual_coef = linalg.solve(K, y, sym_pos=True,
overwrite_a=False)
except np.linalg.LinAlgError:
warnings.warn("Singular matrix in solving dual problem. Using "
"least-squares solution instead.")
dual_coef = linalg.lstsq(K, y)[0]
# K is expensive to compute and store in memory so change it back in
# case it was user-given.
K.flat[::n_samples + 1] -= alpha[0]
if has_sw:
dual_coef *= sw[:, np.newaxis]
return dual_coef
else:
# One penalty per target. We need to solve each target separately.
dual_coefs = np.empty([n_targets, n_samples])
for dual_coef, target, current_alpha in zip(dual_coefs, y.T, alpha):
K.flat[::n_samples + 1] += current_alpha
dual_coef[:] = linalg.solve(K, target, sym_pos=True,
overwrite_a=False).ravel()
K.flat[::n_samples + 1] -= current_alpha
if has_sw:
dual_coefs *= sw[np.newaxis, :]
return dual_coefs.T
def _solve_svd(X, y, alpha):
U, s, Vt = linalg.svd(X, full_matrices=False)
idx = s > 1e-15 # same default value as scipy.linalg.pinv
s_nnz = s[idx][:, np.newaxis]
UTy = np.dot(U.T, y)
d = np.zeros((s.size, alpha.size))
d[idx] = s_nnz / (s_nnz ** 2 + alpha)
d_UT_y = d * UTy
return np.dot(Vt.T, d_UT_y).T
def ridge_regression(X, y, alpha, sample_weight=None, solver='auto',
max_iter=None, tol=1e-3, verbose=0, random_state=None,
return_n_iter=False, return_intercept=False):
"""Solve the ridge equation by the method of normal equations.
Read more in the :ref:`User Guide <ridge_regression>`.
Parameters
----------
X : {array-like, sparse matrix, LinearOperator},
shape = [n_samples, n_features]
Training data
y : array-like, shape = [n_samples] or [n_samples, n_targets]
Target values
alpha : {float, array-like},
shape = [n_targets] if array-like
Regularization strength; must be a positive float. Regularization
improves the conditioning of the problem and reduces the variance of
the estimates. Larger values specify stronger regularization.
Alpha corresponds to ``C^-1`` in other linear models such as
LogisticRegression or LinearSVC. If an array is passed, penalties are
assumed to be specific to the targets. Hence they must correspond in
number.
max_iter : int, optional
Maximum number of iterations for conjugate gradient solver.
For 'sparse_cg' and 'lsqr' solvers, the default value is determined
by scipy.sparse.linalg. For 'sag' solver, the default value is 1000.
sample_weight : float or numpy array of shape [n_samples]
Individual weights for each sample. If sample_weight is not None and
solver='auto', the solver will be set to 'cholesky'.
.. versionadded:: 0.17
solver : {'auto', 'svd', 'cholesky', 'lsqr', 'sparse_cg'}
Solver to use in the computational routines:
- 'auto' chooses the solver automatically based on the type of data.
- 'svd' uses a Singular Value Decomposition of X to compute the Ridge
coefficients. More stable for singular matrices than
'cholesky'.
- 'cholesky' uses the standard scipy.linalg.solve function to
obtain a closed-form solution via a Cholesky decomposition of
dot(X.T, X)
- 'sparse_cg' uses the conjugate gradient solver as found in
scipy.sparse.linalg.cg. As an iterative algorithm, this solver is
more appropriate than 'cholesky' for large-scale data
(possibility to set `tol` and `max_iter`).
- 'lsqr' uses the dedicated regularized least-squares routine
scipy.sparse.linalg.lsqr. It is the fastest but may not be available
in old scipy versions. It also uses an iterative procedure.
- 'sag' uses a Stochastic Average Gradient descent. It also uses an
iterative procedure, and is often faster than other solvers when
both n_samples and n_features are large. Note that 'sag' fast
convergence is only guaranteed on features with approximately the
same scale. You can preprocess the data with a scaler from
sklearn.preprocessing.
All last four solvers support both dense and sparse data. However,
only 'sag' supports sparse input when `fit_intercept` is True.
.. versionadded:: 0.17
Stochastic Average Gradient descent solver.
tol : float
Precision of the solution.
verbose : int
Verbosity level. Setting verbose > 0 will display additional
information depending on the solver used.
random_state : int seed, RandomState instance, or None (default)
The seed of the pseudo random number generator to use when
shuffling the data. Used only in 'sag' solver.
return_n_iter : boolean, default False
If True, the method also returns `n_iter`, the actual number of
iteration performed by the solver.
.. versionadded:: 0.17
return_intercept : boolean, default False
If True and if X is sparse, the method also returns the intercept,
and the solver is automatically changed to 'sag'. This is only a
temporary fix for fitting the intercept with sparse data. For dense
data, use sklearn.linear_model._preprocess_data before your regression.
.. versionadded:: 0.17
Returns
-------
coef : array, shape = [n_features] or [n_targets, n_features]
Weight vector(s).
n_iter : int, optional
The actual number of iteration performed by the solver.
Only returned if `return_n_iter` is True.
intercept : float or array, shape = [n_targets]
The intercept of the model. Only returned if `return_intercept`
is True and if X is a scipy sparse array.
Notes
-----
This function won't compute the intercept.
"""
if return_intercept and sparse.issparse(X) and solver != 'sag':
if solver != 'auto':
warnings.warn("In Ridge, only 'sag' solver can currently fit the "
"intercept when X is sparse. Solver has been "
"automatically changed into 'sag'.")
solver = 'sag'
# SAG needs X and y columns to be C-contiguous and np.float64
if solver == 'sag':
X = check_array(X, accept_sparse=['csr'],
dtype=np.float64, order='C')
y = check_array(y, dtype=np.float64, ensure_2d=False, order='F')
else:
X = check_array(X, accept_sparse=['csr', 'csc', 'coo'],
dtype=np.float64)
y = check_array(y, dtype='numeric', ensure_2d=False)
check_consistent_length(X, y)
n_samples, n_features = X.shape
if y.ndim > 2:
raise ValueError("Target y has the wrong shape %s" % str(y.shape))
ravel = False
if y.ndim == 1:
y = y.reshape(-1, 1)
ravel = True
n_samples_, n_targets = y.shape
if n_samples != n_samples_:
raise ValueError("Number of samples in X and y does not correspond:"
" %d != %d" % (n_samples, n_samples_))
has_sw = sample_weight is not None
if solver == 'auto':
# cholesky if it's a dense array and cg in any other case
if not sparse.issparse(X) or has_sw:
solver = 'cholesky'
else:
solver = 'sparse_cg'
elif solver == 'lsqr' and not hasattr(sp_linalg, 'lsqr'):
warnings.warn("""lsqr not available on this machine, falling back
to sparse_cg.""")
solver = 'sparse_cg'
if has_sw:
if np.atleast_1d(sample_weight).ndim > 1:
raise ValueError("Sample weights must be 1D array or scalar")
if solver != 'sag':
# SAG supports sample_weight directly. For other solvers,
# we implement sample_weight via a simple rescaling.
X, y = _rescale_data(X, y, sample_weight)
# There should be either 1 or n_targets penalties
alpha = np.asarray(alpha).ravel()
if alpha.size not in [1, n_targets]:
raise ValueError("Number of targets and number of penalties "
"do not correspond: %d != %d"
% (alpha.size, n_targets))
if alpha.size == 1 and n_targets > 1:
alpha = np.repeat(alpha, n_targets)
if solver not in ('sparse_cg', 'cholesky', 'svd', 'lsqr', 'sag'):
raise ValueError('Solver %s not understood' % solver)
n_iter = None
if solver == 'sparse_cg':
coef = _solve_sparse_cg(X, y, alpha, max_iter, tol, verbose)
elif solver == 'lsqr':
coef, n_iter = _solve_lsqr(X, y, alpha, max_iter, tol)
elif solver == 'cholesky':
if n_features > n_samples:
K = safe_sparse_dot(X, X.T, dense_output=True)
try:
dual_coef = _solve_cholesky_kernel(K, y, alpha)
coef = safe_sparse_dot(X.T, dual_coef, dense_output=True).T
except linalg.LinAlgError:
# use SVD solver if matrix is singular
solver = 'svd'
else:
try:
coef = _solve_cholesky(X, y, alpha)
except linalg.LinAlgError:
# use SVD solver if matrix is singular
solver = 'svd'
elif solver == 'sag':
# precompute max_squared_sum for all targets
max_squared_sum = row_norms(X, squared=True).max()
coef = np.empty((y.shape[1], n_features))
n_iter = np.empty(y.shape[1], dtype=np.int32)
intercept = np.zeros((y.shape[1], ))
for i, (alpha_i, target) in enumerate(zip(alpha, y.T)):
init = {'coef': np.zeros((n_features + int(return_intercept), 1))}
coef_, n_iter_, _ = sag_solver(
X, target.ravel(), sample_weight, 'squared', alpha_i,
max_iter, tol, verbose, random_state, False, max_squared_sum,
init)
if return_intercept:
coef[i] = coef_[:-1]
intercept[i] = coef_[-1]
else:
coef[i] = coef_
n_iter[i] = n_iter_
if intercept.shape[0] == 1:
intercept = intercept[0]
coef = np.asarray(coef)
if solver == 'svd':
if sparse.issparse(X):
raise TypeError('SVD solver does not support sparse'
' inputs currently')
coef = _solve_svd(X, y, alpha)
if ravel:
# When y was passed as a 1d-array, we flatten the coefficients.
coef = coef.ravel()
if return_n_iter and return_intercept:
return coef, n_iter, intercept
elif return_intercept:
return coef, intercept
elif return_n_iter:
return coef, n_iter
else:
return coef
class _BaseRidge(six.with_metaclass(ABCMeta, LinearModel)):
@abstractmethod
def __init__(self, alpha=1.0, fit_intercept=True, normalize=False,
copy_X=True, max_iter=None, tol=1e-3, solver="auto",
random_state=None):
self.alpha = alpha
self.fit_intercept = fit_intercept
self.normalize = normalize
self.copy_X = copy_X
self.max_iter = max_iter
self.tol = tol
self.solver = solver
self.random_state = random_state
def fit(self, X, y, sample_weight=None):
X, y = check_X_y(X, y, ['csr', 'csc', 'coo'], dtype=np.float64,
multi_output=True, y_numeric=True)
if ((sample_weight is not None) and
np.atleast_1d(sample_weight).ndim > 1):
raise ValueError("Sample weights must be 1D array or scalar")
X, y, X_offset, y_offset, X_scale = self._preprocess_data(
X, y, self.fit_intercept, self.normalize, self.copy_X,
sample_weight=sample_weight)
# temporary fix for fitting the intercept with sparse data using 'sag'
if sparse.issparse(X) and self.fit_intercept:
self.coef_, self.n_iter_, self.intercept_ = ridge_regression(
X, y, alpha=self.alpha, sample_weight=sample_weight,
max_iter=self.max_iter, tol=self.tol, solver=self.solver,
random_state=self.random_state, return_n_iter=True,
return_intercept=True)
self.intercept_ += y_offset
else:
self.coef_, self.n_iter_ = ridge_regression(
X, y, alpha=self.alpha, sample_weight=sample_weight,
max_iter=self.max_iter, tol=self.tol, solver=self.solver,
random_state=self.random_state, return_n_iter=True,
return_intercept=False)
self._set_intercept(X_offset, y_offset, X_scale)
return self
class Ridge(_BaseRidge, RegressorMixin):
"""Linear least squares with l2 regularization.
This model solves a regression model where the loss function is
the linear least squares function and regularization is given by
the l2-norm. Also known as Ridge Regression or Tikhonov regularization.
This estimator has built-in support for multi-variate regression
(i.e., when y is a 2d-array of shape [n_samples, n_targets]).
Read more in the :ref:`User Guide <ridge_regression>`.
Parameters
----------
alpha : {float, array-like}, shape (n_targets)
Regularization strength; must be a positive float. Regularization
improves the conditioning of the problem and reduces the variance of
the estimates. Larger values specify stronger regularization.
Alpha corresponds to ``C^-1`` in other linear models such as
LogisticRegression or LinearSVC. If an array is passed, penalties are
assumed to be specific to the targets. Hence they must correspond in
number.
copy_X : boolean, optional, default True
If True, X will be copied; else, it may be overwritten.
fit_intercept : boolean
Whether to calculate the intercept for this model. If set
to false, no intercept will be used in calculations
(e.g. data is expected to be already centered).
max_iter : int, optional
Maximum number of iterations for conjugate gradient solver.
For 'sparse_cg' and 'lsqr' solvers, the default value is determined
by scipy.sparse.linalg. For 'sag' solver, the default value is 1000.
normalize : boolean, optional, default False
If True, the regressors X will be normalized before regression.
This parameter is ignored when `fit_intercept` is set to False.
When the regressors are normalized, note that this makes the
hyperparameters learnt more robust and almost independent of the number
of samples. The same property is not valid for standardized data.
However, if you wish to standardize, please use
`preprocessing.StandardScaler` before calling `fit` on an estimator
with `normalize=False`.
solver : {'auto', 'svd', 'cholesky', 'lsqr', 'sparse_cg', 'sag'}
Solver to use in the computational routines:
- 'auto' chooses the solver automatically based on the type of data.
- 'svd' uses a Singular Value Decomposition of X to compute the Ridge
coefficients. More stable for singular matrices than
'cholesky'.
- 'cholesky' uses the standard scipy.linalg.solve function to
obtain a closed-form solution.
- 'sparse_cg' uses the conjugate gradient solver as found in
scipy.sparse.linalg.cg. As an iterative algorithm, this solver is
more appropriate than 'cholesky' for large-scale data
(possibility to set `tol` and `max_iter`).
- 'lsqr' uses the dedicated regularized least-squares routine
scipy.sparse.linalg.lsqr. It is the fastest but may not be available
in old scipy versions. It also uses an iterative procedure.
- 'sag' uses a Stochastic Average Gradient descent. It also uses an
iterative procedure, and is often faster than other solvers when
both n_samples and n_features are large. Note that 'sag' fast
convergence is only guaranteed on features with approximately the
same scale. You can preprocess the data with a scaler from
sklearn.preprocessing.
All last four solvers support both dense and sparse data. However,
only 'sag' supports sparse input when `fit_intercept` is True.
.. versionadded:: 0.17
Stochastic Average Gradient descent solver.
tol : float
Precision of the solution.
random_state : int seed, RandomState instance, or None (default)
The seed of the pseudo random number generator to use when
shuffling the data. Used only in 'sag' solver.
.. versionadded:: 0.17
*random_state* to support Stochastic Average Gradient.
Attributes
----------
coef_ : array, shape (n_features,) or (n_targets, n_features)
Weight vector(s).
intercept_ : float | array, shape = (n_targets,)
Independent term in decision function. Set to 0.0 if
``fit_intercept = False``.
n_iter_ : array or None, shape (n_targets,)
Actual number of iterations for each target. Available only for
sag and lsqr solvers. Other solvers will return None.
.. versionadded:: 0.17
See also
--------
RidgeClassifier, RidgeCV, :class:`sklearn.kernel_ridge.KernelRidge`
Examples
--------
>>> from sklearn.linear_model import Ridge
>>> import numpy as np
>>> n_samples, n_features = 10, 5
>>> np.random.seed(0)
>>> y = np.random.randn(n_samples)
>>> X = np.random.randn(n_samples, n_features)
>>> clf = Ridge(alpha=1.0)
>>> clf.fit(X, y) # doctest: +NORMALIZE_WHITESPACE
Ridge(alpha=1.0, copy_X=True, fit_intercept=True, max_iter=None,
normalize=False, random_state=None, solver='auto', tol=0.001)
"""
def __init__(self, alpha=1.0, fit_intercept=True, normalize=False,
copy_X=True, max_iter=None, tol=1e-3, solver="auto",
random_state=None):
super(Ridge, self).__init__(alpha=alpha, fit_intercept=fit_intercept,
normalize=normalize, copy_X=copy_X,
max_iter=max_iter, tol=tol, solver=solver,
random_state=random_state)
def fit(self, X, y, sample_weight=None):
"""Fit Ridge regression model
Parameters
----------
X : {array-like, sparse matrix}, shape = [n_samples, n_features]
Training data
y : array-like, shape = [n_samples] or [n_samples, n_targets]
Target values
sample_weight : float or numpy array of shape [n_samples]
Individual weights for each sample
Returns
-------
self : returns an instance of self.
"""
return super(Ridge, self).fit(X, y, sample_weight=sample_weight)
class RidgeClassifier(LinearClassifierMixin, _BaseRidge):
"""Classifier using Ridge regression.
Read more in the :ref:`User Guide <ridge_regression>`.
Parameters
----------
alpha : float
Regularization strength; must be a positive float. Regularization
improves the conditioning of the problem and reduces the variance of
the estimates. Larger values specify stronger regularization.
Alpha corresponds to ``C^-1`` in other linear models such as
LogisticRegression or LinearSVC.
class_weight : dict or 'balanced', optional
Weights associated with classes in the form ``{class_label: weight}``.
If not given, all classes are supposed to have weight one.
The "balanced" mode uses the values of y to automatically adjust
weights inversely proportional to class frequencies in the input data
as ``n_samples / (n_classes * np.bincount(y))``
copy_X : boolean, optional, default True
If True, X will be copied; else, it may be overwritten.
fit_intercept : boolean
Whether to calculate the intercept for this model. If set to false, no
intercept will be used in calculations (e.g. data is expected to be
already centered).
max_iter : int, optional
Maximum number of iterations for conjugate gradient solver.
The default value is determined by scipy.sparse.linalg.
normalize : boolean, optional, default False
If True, the regressors X will be normalized before regression.
This parameter is ignored when `fit_intercept` is set to False.
When the regressors are normalized, note that this makes the
hyperparameters learnt more robust and almost independent of the number
of samples. The same property is not valid for standardized data.
However, if you wish to standardize, please use
`preprocessing.StandardScaler` before calling `fit` on an estimator
with `normalize=False`.
solver : {'auto', 'svd', 'cholesky', 'lsqr', 'sparse_cg', 'sag'}
Solver to use in the computational routines:
- 'auto' chooses the solver automatically based on the type of data.
- 'svd' uses a Singular Value Decomposition of X to compute the Ridge
coefficients. More stable for singular matrices than
'cholesky'.
- 'cholesky' uses the standard scipy.linalg.solve function to
obtain a closed-form solution.
- 'sparse_cg' uses the conjugate gradient solver as found in
scipy.sparse.linalg.cg. As an iterative algorithm, this solver is
more appropriate than 'cholesky' for large-scale data
(possibility to set `tol` and `max_iter`).
- 'lsqr' uses the dedicated regularized least-squares routine
scipy.sparse.linalg.lsqr. It is the fastest but may not be available
in old scipy versions. It also uses an iterative procedure.
- 'sag' uses a Stochastic Average Gradient descent. It also uses an
iterative procedure, and is faster than other solvers when both
n_samples and n_features are large.
.. versionadded:: 0.17
Stochastic Average Gradient descent solver.
tol : float
Precision of the solution.
random_state : int seed, RandomState instance, or None (default)
The seed of the pseudo random number generator to use when
shuffling the data. Used in 'sag' solver.
Attributes
----------
coef_ : array, shape (n_features,) or (n_classes, n_features)
Weight vector(s).
intercept_ : float | array, shape = (n_targets,)
Independent term in decision function. Set to 0.0 if
``fit_intercept = False``.
n_iter_ : array or None, shape (n_targets,)
Actual number of iterations for each target. Available only for
sag and lsqr solvers. Other solvers will return None.
See also
--------
Ridge, RidgeClassifierCV
Notes
-----
For multi-class classification, n_class classifiers are trained in
a one-versus-all approach. Concretely, this is implemented by taking
advantage of the multi-variate response support in Ridge.
"""
def __init__(self, alpha=1.0, fit_intercept=True, normalize=False,
copy_X=True, max_iter=None, tol=1e-3, class_weight=None,
solver="auto", random_state=None):
super(RidgeClassifier, self).__init__(
alpha=alpha, fit_intercept=fit_intercept, normalize=normalize,
copy_X=copy_X, max_iter=max_iter, tol=tol, solver=solver,
random_state=random_state)
self.class_weight = class_weight
def fit(self, X, y, sample_weight=None):
"""Fit Ridge regression model.
Parameters
----------
X : {array-like, sparse matrix}, shape = [n_samples,n_features]
Training data
y : array-like, shape = [n_samples]
Target values
sample_weight : float or numpy array of shape (n_samples,)
Sample weight.
.. versionadded:: 0.17
*sample_weight* support to Classifier.
Returns
-------
self : returns an instance of self.
"""
self._label_binarizer = LabelBinarizer(pos_label=1, neg_label=-1)
Y = self._label_binarizer.fit_transform(y)
if not self._label_binarizer.y_type_.startswith('multilabel'):
y = column_or_1d(y, warn=True)
else:
# we don't (yet) support multi-label classification in Ridge
raise ValueError(
"%s doesn't support multi-label classification" % (
self.__class__.__name__))
if self.class_weight:
if sample_weight is None:
sample_weight = 1.
# modify the sample weights with the corresponding class weight
sample_weight = (sample_weight *
compute_sample_weight(self.class_weight, y))
super(RidgeClassifier, self).fit(X, Y, sample_weight=sample_weight)
return self
@property
def classes_(self):
return self._label_binarizer.classes_
class _RidgeGCV(LinearModel):
"""Ridge regression with built-in Generalized Cross-Validation
It allows efficient Leave-One-Out cross-validation.
This class is not intended to be used directly. Use RidgeCV instead.
Notes
-----
We want to solve (K + alpha*Id)c = y,
where K = X X^T is the kernel matrix.
Let G = (K + alpha*Id)^-1.
Dual solution: c = Gy
Primal solution: w = X^T c
Compute eigendecomposition K = Q V Q^T.
Then G = Q (V + alpha*Id)^-1 Q^T,
where (V + alpha*Id) is diagonal.
It is thus inexpensive to inverse for many alphas.
Let loov be the vector of prediction values for each example
when the model was fitted with all examples but this example.
loov = (KGY - diag(KG)Y) / diag(I-KG)
Let looe be the vector of prediction errors for each example
when the model was fitted with all examples but this example.
looe = y - loov = c / diag(G)
References
----------
http://cbcl.mit.edu/projects/cbcl/publications/ps/MIT-CSAIL-TR-2007-025.pdf
http://www.mit.edu/~9.520/spring07/Classes/rlsslides.pdf
"""
def __init__(self, alphas=(0.1, 1.0, 10.0),
fit_intercept=True, normalize=False,
scoring=None, copy_X=True,
gcv_mode=None, store_cv_values=False):
self.alphas = np.asarray(alphas)
self.fit_intercept = fit_intercept
self.normalize = normalize
self.scoring = scoring
self.copy_X = copy_X
self.gcv_mode = gcv_mode
self.store_cv_values = store_cv_values
def _pre_compute(self, X, y):
# even if X is very sparse, K is usually very dense
K = safe_sparse_dot(X, X.T, dense_output=True)
v, Q = linalg.eigh(K)
QT_y = np.dot(Q.T, y)
return v, Q, QT_y
def _decomp_diag(self, v_prime, Q):
# compute diagonal of the matrix: dot(Q, dot(diag(v_prime), Q^T))
return (v_prime * Q ** 2).sum(axis=-1)
def _diag_dot(self, D, B):
# compute dot(diag(D), B)
if len(B.shape) > 1:
# handle case where B is > 1-d
D = D[(slice(None), ) + (np.newaxis, ) * (len(B.shape) - 1)]
return D * B
def _errors_and_values_helper(self, alpha, y, v, Q, QT_y):
"""Helper function to avoid code duplication between self._errors and
self._values.
Notes
-----
We don't construct matrix G, instead compute action on y & diagonal.
"""
w = 1.0 / (v + alpha)
c = np.dot(Q, self._diag_dot(w, QT_y))
G_diag = self._decomp_diag(w, Q)
# handle case where y is 2-d
if len(y.shape) != 1:
G_diag = G_diag[:, np.newaxis]
return G_diag, c
def _errors(self, alpha, y, v, Q, QT_y):
G_diag, c = self._errors_and_values_helper(alpha, y, v, Q, QT_y)
return (c / G_diag) ** 2, c
def _values(self, alpha, y, v, Q, QT_y):
G_diag, c = self._errors_and_values_helper(alpha, y, v, Q, QT_y)
return y - (c / G_diag), c
def _pre_compute_svd(self, X, y):
if sparse.issparse(X):
raise TypeError("SVD not supported for sparse matrices")
U, s, _ = linalg.svd(X, full_matrices=0)
v = s ** 2
UT_y = np.dot(U.T, y)
return v, U, UT_y
def _errors_and_values_svd_helper(self, alpha, y, v, U, UT_y):
"""Helper function to avoid code duplication between self._errors_svd
and self._values_svd.
"""
w = ((v + alpha) ** -1) - (alpha ** -1)
c = np.dot(U, self._diag_dot(w, UT_y)) + (alpha ** -1) * y
G_diag = self._decomp_diag(w, U) + (alpha ** -1)
if len(y.shape) != 1:
# handle case where y is 2-d
G_diag = G_diag[:, np.newaxis]
return G_diag, c
def _errors_svd(self, alpha, y, v, U, UT_y):
G_diag, c = self._errors_and_values_svd_helper(alpha, y, v, U, UT_y)
return (c / G_diag) ** 2, c
def _values_svd(self, alpha, y, v, U, UT_y):
G_diag, c = self._errors_and_values_svd_helper(alpha, y, v, U, UT_y)
return y - (c / G_diag), c
def fit(self, X, y, sample_weight=None):
"""Fit Ridge regression model
Parameters
----------
X : {array-like, sparse matrix}, shape = [n_samples, n_features]
Training data
y : array-like, shape = [n_samples] or [n_samples, n_targets]
Target values
sample_weight : float or array-like of shape [n_samples]
Sample weight
Returns
-------
self : Returns self.
"""
X, y = check_X_y(X, y, ['csr', 'csc', 'coo'], dtype=np.float64,
multi_output=True, y_numeric=True)
n_samples, n_features = X.shape
X, y, X_offset, y_offset, X_scale = LinearModel._preprocess_data(
X, y, self.fit_intercept, self.normalize, self.copy_X,
sample_weight=sample_weight)
gcv_mode = self.gcv_mode
with_sw = len(np.shape(sample_weight))
if gcv_mode is None or gcv_mode == 'auto':
if sparse.issparse(X) or n_features > n_samples or with_sw:
gcv_mode = 'eigen'
else:
gcv_mode = 'svd'
elif gcv_mode == "svd" and with_sw:
# FIXME non-uniform sample weights not yet supported
warnings.warn("non-uniform sample weights unsupported for svd, "
"forcing usage of eigen")
gcv_mode = 'eigen'
if gcv_mode == 'eigen':
_pre_compute = self._pre_compute
_errors = self._errors
_values = self._values
elif gcv_mode == 'svd':
# assert n_samples >= n_features
_pre_compute = self._pre_compute_svd
_errors = self._errors_svd
_values = self._values_svd
else:
raise ValueError('bad gcv_mode "%s"' % gcv_mode)
v, Q, QT_y = _pre_compute(X, y)
n_y = 1 if len(y.shape) == 1 else y.shape[1]
cv_values = np.zeros((n_samples * n_y, len(self.alphas)))
C = []
scorer = check_scoring(self, scoring=self.scoring, allow_none=True)
error = scorer is None
for i, alpha in enumerate(self.alphas):
weighted_alpha = (sample_weight * alpha
if sample_weight is not None
else alpha)
if error:
out, c = _errors(weighted_alpha, y, v, Q, QT_y)
else:
out, c = _values(weighted_alpha, y, v, Q, QT_y)
cv_values[:, i] = out.ravel()
C.append(c)
if error:
best = cv_values.mean(axis=0).argmin()
else:
# The scorer want an object that will make the predictions but
# they are already computed efficiently by _RidgeGCV. This
# identity_estimator will just return them
def identity_estimator():
pass
identity_estimator.decision_function = lambda y_predict: y_predict
identity_estimator.predict = lambda y_predict: y_predict
out = [scorer(identity_estimator, y.ravel(), cv_values[:, i])
for i in range(len(self.alphas))]
best = np.argmax(out)
self.alpha_ = self.alphas[best]
self.dual_coef_ = C[best]
self.coef_ = safe_sparse_dot(self.dual_coef_.T, X)
self._set_intercept(X_offset, y_offset, X_scale)
if self.store_cv_values:
if len(y.shape) == 1:
cv_values_shape = n_samples, len(self.alphas)
else:
cv_values_shape = n_samples, n_y, len(self.alphas)
self.cv_values_ = cv_values.reshape(cv_values_shape)
return self
class _BaseRidgeCV(LinearModel):
def __init__(self, alphas=(0.1, 1.0, 10.0),
fit_intercept=True, normalize=False, scoring=None,
cv=None, gcv_mode=None,
store_cv_values=False):
self.alphas = alphas
self.fit_intercept = fit_intercept
self.normalize = normalize
self.scoring = scoring
self.cv = cv
self.gcv_mode = gcv_mode
self.store_cv_values = store_cv_values
def fit(self, X, y, sample_weight=None):
"""Fit Ridge regression model
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Training data
y : array-like, shape = [n_samples] or [n_samples, n_targets]
Target values
sample_weight : float or array-like of shape [n_samples]
Sample weight
Returns
-------
self : Returns self.
"""
if self.cv is None:
estimator = _RidgeGCV(self.alphas,
fit_intercept=self.fit_intercept,
normalize=self.normalize,
scoring=self.scoring,
gcv_mode=self.gcv_mode,
store_cv_values=self.store_cv_values)
estimator.fit(X, y, sample_weight=sample_weight)
self.alpha_ = estimator.alpha_
if self.store_cv_values:
self.cv_values_ = estimator.cv_values_
else:
if self.store_cv_values:
raise ValueError("cv!=None and store_cv_values=True "
" are incompatible")
parameters = {'alpha': self.alphas}
fit_params = {'sample_weight': sample_weight}
gs = GridSearchCV(Ridge(fit_intercept=self.fit_intercept),
parameters, fit_params=fit_params, cv=self.cv)
gs.fit(X, y)
estimator = gs.best_estimator_
self.alpha_ = gs.best_estimator_.alpha
self.coef_ = estimator.coef_
self.intercept_ = estimator.intercept_
return self
class RidgeCV(_BaseRidgeCV, RegressorMixin):
"""Ridge regression with built-in cross-validation.
By default, it performs Generalized Cross-Validation, which is a form of
efficient Leave-One-Out cross-validation.
Read more in the :ref:`User Guide <ridge_regression>`.
Parameters
----------
alphas : numpy array of shape [n_alphas]
Array of alpha values to try.
Regularization strength; must be a positive float. Regularization
improves the conditioning of the problem and reduces the variance of
the estimates. Larger values specify stronger regularization.
Alpha corresponds to ``C^-1`` in other linear models such as
LogisticRegression or LinearSVC.
fit_intercept : boolean
Whether to calculate the intercept for this model. If set
to false, no intercept will be used in calculations
(e.g. data is expected to be already centered).
normalize : boolean, optional, default False
If True, the regressors X will be normalized before regression.
This parameter is ignored when `fit_intercept` is set to False.
When the regressors are normalized, note that this makes the
hyperparameters learnt more robust and almost independent of the number
of samples. The same property is not valid for standardized data.
However, if you wish to standardize, please use
`preprocessing.StandardScaler` before calling `fit` on an estimator
with `normalize=False`.
scoring : string, callable or None, optional, default: None
A string (see model evaluation documentation) or
a scorer callable object / function with signature
``scorer(estimator, X, y)``.
cv : int, cross-validation generator or an iterable, optional
Determines the cross-validation splitting strategy.
Possible inputs for cv are:
- None, to use the efficient Leave-One-Out cross-validation
- integer, to specify the number of folds.
- An object to be used as a cross-validation generator.
- An iterable yielding train/test splits.
For integer/None inputs, if ``y`` is binary or multiclass,
:class:`sklearn.model_selection.StratifiedKFold` is used, else,
:class:`sklearn.model_selection.KFold` is used.
Refer :ref:`User Guide <cross_validation>` for the various
cross-validation strategies that can be used here.
gcv_mode : {None, 'auto', 'svd', eigen'}, optional
Flag indicating which strategy to use when performing
Generalized Cross-Validation. Options are::
'auto' : use svd if n_samples > n_features or when X is a sparse
matrix, otherwise use eigen
'svd' : force computation via singular value decomposition of X
(does not work for sparse matrices)
'eigen' : force computation via eigendecomposition of X^T X
The 'auto' mode is the default and is intended to pick the cheaper
option of the two depending upon the shape and format of the training
data.
store_cv_values : boolean, default=False
Flag indicating if the cross-validation values corresponding to
each alpha should be stored in the `cv_values_` attribute (see
below). This flag is only compatible with `cv=None` (i.e. using
Generalized Cross-Validation).
Attributes
----------
cv_values_ : array, shape = [n_samples, n_alphas] or \
shape = [n_samples, n_targets, n_alphas], optional
Cross-validation values for each alpha (if `store_cv_values=True` and \
`cv=None`). After `fit()` has been called, this attribute will \
contain the mean squared errors (by default) or the values of the \
`{loss,score}_func` function (if provided in the constructor).
coef_ : array, shape = [n_features] or [n_targets, n_features]
Weight vector(s).
intercept_ : float | array, shape = (n_targets,)
Independent term in decision function. Set to 0.0 if
``fit_intercept = False``.
alpha_ : float
Estimated regularization parameter.
See also
--------
Ridge: Ridge regression
RidgeClassifier: Ridge classifier
RidgeClassifierCV: Ridge classifier with built-in cross validation
"""
pass
class RidgeClassifierCV(LinearClassifierMixin, _BaseRidgeCV):
"""Ridge classifier with built-in cross-validation.
By default, it performs Generalized Cross-Validation, which is a form of
efficient Leave-One-Out cross-validation. Currently, only the n_features >
n_samples case is handled efficiently.
Read more in the :ref:`User Guide <ridge_regression>`.
Parameters
----------
alphas : numpy array of shape [n_alphas]
Array of alpha values to try.
Regularization strength; must be a positive float. Regularization
improves the conditioning of the problem and reduces the variance of
the estimates. Larger values specify stronger regularization.
Alpha corresponds to ``C^-1`` in other linear models such as
LogisticRegression or LinearSVC.
fit_intercept : boolean
Whether to calculate the intercept for this model. If set
to false, no intercept will be used in calculations
(e.g. data is expected to be already centered).
normalize : boolean, optional, default False
If True, the regressors X will be normalized before regression.
This parameter is ignored when `fit_intercept` is set to False.
When the regressors are normalized, note that this makes the
hyperparameters learnt more robust and almost independent of the number
of samples. The same property is not valid for standardized data.
However, if you wish to standardize, please use
`preprocessing.StandardScaler` before calling `fit` on an estimator
with `normalize=False`.
scoring : string, callable or None, optional, default: None
A string (see model evaluation documentation) or
a scorer callable object / function with signature
``scorer(estimator, X, y)``.
cv : int, cross-validation generator or an iterable, optional
Determines the cross-validation splitting strategy.
Possible inputs for cv are:
- None, to use the efficient Leave-One-Out cross-validation
- integer, to specify the number of folds.
- An object to be used as a cross-validation generator.
- An iterable yielding train/test splits.
Refer :ref:`User Guide <cross_validation>` for the various
cross-validation strategies that can be used here.
class_weight : dict or 'balanced', optional
Weights associated with classes in the form ``{class_label: weight}``.
If not given, all classes are supposed to have weight one.
The "balanced" mode uses the values of y to automatically adjust
weights inversely proportional to class frequencies in the input data
as ``n_samples / (n_classes * np.bincount(y))``
Attributes
----------
cv_values_ : array, shape = [n_samples, n_alphas] or \
shape = [n_samples, n_responses, n_alphas], optional
Cross-validation values for each alpha (if `store_cv_values=True` and
`cv=None`). After `fit()` has been called, this attribute will contain \
the mean squared errors (by default) or the values of the \
`{loss,score}_func` function (if provided in the constructor).
coef_ : array, shape = [n_features] or [n_targets, n_features]
Weight vector(s).
intercept_ : float | array, shape = (n_targets,)
Independent term in decision function. Set to 0.0 if
``fit_intercept = False``.
alpha_ : float
Estimated regularization parameter
See also
--------
Ridge: Ridge regression
RidgeClassifier: Ridge classifier
RidgeCV: Ridge regression with built-in cross validation
Notes
-----
For multi-class classification, n_class classifiers are trained in
a one-versus-all approach. Concretely, this is implemented by taking
advantage of the multi-variate response support in Ridge.
"""
def __init__(self, alphas=(0.1, 1.0, 10.0), fit_intercept=True,
normalize=False, scoring=None, cv=None, class_weight=None):
super(RidgeClassifierCV, self).__init__(
alphas=alphas, fit_intercept=fit_intercept, normalize=normalize,
scoring=scoring, cv=cv)
self.class_weight = class_weight
def fit(self, X, y, sample_weight=None):
"""Fit the ridge classifier.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Training vectors, where n_samples is the number of samples
and n_features is the number of features.
y : array-like, shape (n_samples,)
Target values.
sample_weight : float or numpy array of shape (n_samples,)
Sample weight.
Returns
-------
self : object
Returns self.
"""
self._label_binarizer = LabelBinarizer(pos_label=1, neg_label=-1)
Y = self._label_binarizer.fit_transform(y)
if not self._label_binarizer.y_type_.startswith('multilabel'):
y = column_or_1d(y, warn=True)
if self.class_weight:
if sample_weight is None:
sample_weight = 1.
# modify the sample weights with the corresponding class weight
sample_weight = (sample_weight *
compute_sample_weight(self.class_weight, y))
_BaseRidgeCV.fit(self, X, Y, sample_weight=sample_weight)
return self
@property
def classes_(self):
return self._label_binarizer.classes_
| bsd-3-clause |
opengridcc/opengrid | opengrid/library/utils.py | 1 | 1423 | # -*- coding: utf-8 -*-
"""
General util functions
"""
import datetime
import pandas as pd
def week_schedule(index, on_time=None, off_time=None, off_days=None):
""" Return boolean time series following given week schedule.
Parameters
----------
index : pandas.DatetimeIndex
Datetime index
on_time : str or datetime.time
Daily opening time. Default: '09:00'
off_time : str or datetime.time
Daily closing time. Default: '17:00'
off_days : list of str
List of weekdays. Default: ['Sunday', 'Monday']
Returns
-------
pandas.Series of bool
True when on, False otherwise for given datetime index
Examples
--------
>>> import pandas as pd
>>> from opengrid.library.utils import week_schedule
>>> index = pd.date_range('20170701', '20170710', freq='H')
>>> week_schedule(index)
"""
if on_time is None:
on_time = '9:00'
if off_time is None:
off_time = '17:00'
if off_days is None:
off_days = ['Sunday', 'Monday']
if not isinstance(on_time, datetime.time):
on_time = pd.to_datetime(on_time, format='%H:%M').time()
if not isinstance(off_time, datetime.time):
off_time = pd.to_datetime(off_time, format='%H:%M').time()
times = (index.time >= on_time) & (index.time < off_time) & (~index.weekday_name.isin(off_days))
return pd.Series(times, index=index)
| apache-2.0 |
Barmaley-exe/scikit-learn | sklearn/lda.py | 6 | 17656 | """
Linear Discriminant Analysis (LDA)
"""
# Authors: Clemens Brunner
# Martin Billinger
# Matthieu Perrot
# Mathieu Blondel
# License: BSD 3-Clause
from __future__ import print_function
import warnings
import numpy as np
from scipy import linalg
from .externals.six import string_types
from .base import BaseEstimator, TransformerMixin
from .linear_model.base import LinearClassifierMixin
from .covariance import ledoit_wolf, empirical_covariance, shrunk_covariance
from .utils.multiclass import unique_labels
from .utils import check_array, check_X_y
from .utils.validation import check_is_fitted
from .utils.fixes import bincount
from .preprocessing import StandardScaler
__all__ = ['LDA']
def _cov(X, shrinkage=None):
"""Estimate covariance matrix (using optional shrinkage).
Parameters
----------
X : array-like, shape (n_samples, n_features)
Input data.
shrinkage : string or float, optional
Shrinkage parameter, possible values:
- None or 'empirical': no shrinkage (default).
- 'auto': automatic shrinkage using the Ledoit-Wolf lemma.
- float between 0 and 1: fixed shrinkage parameter.
Returns
-------
s : array, shape (n_features, n_features)
Estimated covariance matrix.
"""
shrinkage = "empirical" if shrinkage is None else shrinkage
if isinstance(shrinkage, string_types):
if shrinkage == 'auto':
sc = StandardScaler() # standardize features
X = sc.fit_transform(X)
s = sc.std_ * ledoit_wolf(X)[0] * sc.std_ # scale back
elif shrinkage == 'empirical':
s = empirical_covariance(X)
else:
raise ValueError('unknown shrinkage parameter')
elif isinstance(shrinkage, float) or isinstance(shrinkage, int):
if shrinkage < 0 or shrinkage > 1:
raise ValueError('shrinkage parameter must be between 0 and 1')
s = shrunk_covariance(empirical_covariance(X), shrinkage)
else:
raise TypeError('shrinkage must be of string or int type')
return s
def _class_means(X, y):
"""Compute class means.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Input data.
y : array-like, shape (n_samples,) or (n_samples, n_targets)
Target values.
Returns
-------
means : array-like, shape (n_features,)
Class means.
"""
means = []
classes = np.unique(y)
for group in classes:
Xg = X[y == group, :]
means.append(Xg.mean(0))
return np.asarray(means)
def _class_cov(X, y, priors=None, shrinkage=None):
"""Compute class covariance matrix.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Input data.
y : array-like, shape (n_samples,) or (n_samples, n_targets)
Target values.
priors : array-like, shape (n_classes,)
Class priors.
shrinkage : string or float, optional
Shrinkage parameter, possible values:
- None: no shrinkage (default).
- 'auto': automatic shrinkage using the Ledoit-Wolf lemma.
- float between 0 and 1: fixed shrinkage parameter.
Returns
-------
cov : array-like, shape (n_features, n_features)
Class covariance matrix.
"""
classes = np.unique(y)
covs = []
for group in classes:
Xg = X[y == group, :]
covs.append(np.atleast_2d(_cov(Xg, shrinkage)))
return np.average(covs, axis=0, weights=priors)
class LDA(BaseEstimator, LinearClassifierMixin, TransformerMixin):
"""Linear Discriminant Analysis (LDA).
A classifier with a linear decision boundary, generated by fitting class
conditional densities to the data and using Bayes' rule.
The model fits a Gaussian density to each class, assuming that all classes
share the same covariance matrix.
The fitted model can also be used to reduce the dimensionality of the input
by projecting it to the most discriminative directions.
Parameters
----------
solver : string, optional
Solver to use, possible values:
- 'svd': Singular value decomposition (default). Does not compute the
covariance matrix, therefore this solver is recommended for
data with a large number of features.
- 'lsqr': Least squares solution, can be combined with shrinkage.
- 'eigen': Eigenvalue decomposition, can be combined with shrinkage.
shrinkage : string or float, optional
Shrinkage parameter, possible values:
- None: no shrinkage (default).
- 'auto': automatic shrinkage using the Ledoit-Wolf lemma.
- float between 0 and 1: fixed shrinkage parameter.
Note that shrinkage works only with 'lsqr' and 'eigen' solvers.
priors : array, optional, shape (n_classes,)
Class priors.
n_components : int, optional
Number of components (< n_classes - 1) for dimensionality reduction.
store_covariance : bool, optional
Additionally compute class covariance matrix (default False).
tol : float, optional
Threshold used for rank estimation in SVD solver.
Attributes
----------
coef_ : array, shape (n_features,) or (n_classes, n_features)
Weight vector(s).
intercept_ : array, shape (n_features,)
Intercept term.
covariance_ : array-like, shape (n_features, n_features)
Covariance matrix (shared by all classes).
means_ : array-like, shape (n_classes, n_features)
Class means.
priors_ : array-like, shape (n_classes,)
Class priors (sum to 1).
scalings_ : array-like, shape (rank, n_classes - 1)
Scaling of the features in the space spanned by the class centroids.
xbar_ : array-like, shape (n_features,)
Overall mean.
classes_ : array-like, shape (n_classes,)
Unique class labels.
See also
--------
sklearn.qda.QDA: Quadratic discriminant analysis
Notes
-----
The default solver is 'svd'. It can perform both classification and
transform, and it does not rely on the calculation of the covariance
matrix. This can be an advantage in situations where the number of features
is large. However, the 'svd' solver cannot be used with shrinkage.
The 'lsqr' solver is an efficient algorithm that only works for
classification. It supports shrinkage.
The 'eigen' solver is based on the optimization of the between class
scatter to within class scatter ratio. It can be used for both
classification and transform, and it supports shrinkage. However, the
'eigen' solver needs to compute the covariance matrix, so it might not be
suitable for situations with a high number of features.
Examples
--------
>>> import numpy as np
>>> from sklearn.lda import LDA
>>> X = np.array([[-1, -1], [-2, -1], [-3, -2], [1, 1], [2, 1], [3, 2]])
>>> y = np.array([1, 1, 1, 2, 2, 2])
>>> clf = LDA()
>>> clf.fit(X, y)
LDA(n_components=None, priors=None, shrinkage=None, solver='svd',
store_covariance=False, tol=0.0001)
>>> print(clf.predict([[-0.8, -1]]))
[1]
"""
def __init__(self, solver='svd', shrinkage=None, priors=None,
n_components=None, store_covariance=False, tol=1e-4):
self.solver = solver
self.shrinkage = shrinkage
self.priors = priors
self.n_components = n_components
self.store_covariance = store_covariance # used only in svd solver
self.tol = tol # used only in svd solver
def _solve_lsqr(self, X, y, shrinkage):
"""Least squares solver.
The least squares solver computes a straightforward solution of the
optimal decision rule based directly on the discriminant functions. It
can only be used for classification (with optional shrinkage), because
estimation of eigenvectors is not performed. Therefore, dimensionality
reduction with the transform is not supported.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Training data.
y : array-like, shape (n_samples,) or (n_samples, n_classes)
Target values.
shrinkage : string or float, optional
Shrinkage parameter, possible values:
- None: no shrinkage (default).
- 'auto': automatic shrinkage using the Ledoit-Wolf lemma.
- float between 0 and 1: fixed shrinkage parameter.
Notes
-----
This solver is based on [1]_, section 2.6.2, pp. 39-41.
References
----------
.. [1] R. O. Duda, P. E. Hart, D. G. Stork. Pattern Classification
(Second Edition). John Wiley & Sons, Inc., New York, 2001. ISBN
0-471-05669-3.
"""
self.means_ = _class_means(X, y)
self.covariance_ = _class_cov(X, y, self.priors_, shrinkage)
self.coef_ = linalg.lstsq(self.covariance_, self.means_.T)[0].T
self.intercept_ = (-0.5 * np.diag(np.dot(self.means_, self.coef_.T))
+ np.log(self.priors_))
def _solve_eigen(self, X, y, shrinkage):
"""Eigenvalue solver.
The eigenvalue solver computes the optimal solution of the Rayleigh
coefficient (basically the ratio of between class scatter to within
class scatter). This solver supports both classification and
dimensionality reduction (with optional shrinkage).
Parameters
----------
X : array-like, shape (n_samples, n_features)
Training data.
y : array-like, shape (n_samples,) or (n_samples, n_targets)
Target values.
shrinkage : string or float, optional
Shrinkage parameter, possible values:
- None: no shrinkage (default).
- 'auto': automatic shrinkage using the Ledoit-Wolf lemma.
- float between 0 and 1: fixed shrinkage constant.
Notes
-----
This solver is based on [1]_, section 3.8.3, pp. 121-124.
References
----------
.. [1] R. O. Duda, P. E. Hart, D. G. Stork. Pattern Classification
(Second Edition). John Wiley & Sons, Inc., New York, 2001. ISBN
0-471-05669-3.
"""
self.means_ = _class_means(X, y)
self.covariance_ = _class_cov(X, y, self.priors_, shrinkage)
Sw = self.covariance_ # within scatter
St = _cov(X, shrinkage) # total scatter
Sb = St - Sw # between scatter
evals, evecs = linalg.eigh(Sb, Sw)
evecs = evecs[:, np.argsort(evals)[::-1]] # sort eigenvectors
# evecs /= np.linalg.norm(evecs, axis=0) # doesn't work with numpy 1.6
evecs /= np.apply_along_axis(np.linalg.norm, 0, evecs)
self.scalings_ = evecs
self.coef_ = np.dot(self.means_, evecs).dot(evecs.T)
self.intercept_ = (-0.5 * np.diag(np.dot(self.means_, self.coef_.T))
+ np.log(self.priors_))
def _solve_svd(self, X, y, store_covariance=False, tol=1.0e-4):
"""SVD solver.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Training data.
y : array-like, shape (n_samples,) or (n_samples, n_targets)
Target values.
store_covariance : bool, optional
Additionally compute class covariance matrix (default False).
tol : float, optional
Threshold used for rank estimation.
"""
n_samples, n_features = X.shape
n_classes = len(self.classes_)
self.means_ = _class_means(X, y)
if store_covariance:
self.covariance_ = _class_cov(X, y, self.priors_)
Xc = []
for idx, group in enumerate(self.classes_):
Xg = X[y == group, :]
Xc.append(Xg - self.means_[idx])
self.xbar_ = np.dot(self.priors_, self.means_)
Xc = np.concatenate(Xc, axis=0)
# 1) within (univariate) scaling by with classes std-dev
std = Xc.std(axis=0)
# avoid division by zero in normalization
std[std == 0] = 1.
fac = 1. / (n_samples - n_classes)
# 2) Within variance scaling
X = np.sqrt(fac) * (Xc / std)
# SVD of centered (within)scaled data
U, S, V = linalg.svd(X, full_matrices=False)
rank = np.sum(S > tol)
if rank < n_features:
warnings.warn("Variables are collinear.")
# Scaling of within covariance is: V' 1/S
scalings = (V[:rank] / std).T / S[:rank]
# 3) Between variance scaling
# Scale weighted centers
X = np.dot(((np.sqrt((n_samples * self.priors_) * fac)) *
(self.means_ - self.xbar_).T).T, scalings)
# Centers are living in a space with n_classes-1 dim (maximum)
# Use SVD to find projection in the space spanned by the
# (n_classes) centers
_, S, V = linalg.svd(X, full_matrices=0)
rank = np.sum(S > tol * S[0])
self.scalings_ = np.dot(scalings, V.T[:, :rank])
coef = np.dot(self.means_ - self.xbar_, self.scalings_)
self.intercept_ = (-0.5 * np.sum(coef ** 2, axis=1)
+ np.log(self.priors_))
self.coef_ = np.dot(coef, self.scalings_.T)
self.intercept_ -= np.dot(self.xbar_, self.coef_.T)
def fit(self, X, y, store_covariance=False, tol=1.0e-4):
"""Fit LDA model according to the given training data and parameters.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Training data.
y : array, shape (n_samples,)
Target values.
"""
if store_covariance:
warnings.warn("'store_covariance' was moved to the __init__()"
"method in version 0.16 and will be removed from"
"fit() in version 0.18.", DeprecationWarning)
else:
store_covariance = self.store_covariance
if tol != 1.0e-4:
warnings.warn("'tol' was moved to __init__() method in version"
" 0.16 and will be removed from fit() in 0.18",
DeprecationWarning)
self.tol = tol
X, y = check_X_y(X, y)
self.classes_ = unique_labels(y)
if self.priors is None: # estimate priors from sample
_, y_t = np.unique(y, return_inverse=True) # non-negative ints
self.priors_ = bincount(y_t) / float(len(y))
else:
self.priors_ = self.priors
if self.solver == 'svd':
if self.shrinkage is not None:
raise NotImplementedError('shrinkage not supported')
self._solve_svd(X, y, store_covariance=store_covariance, tol=tol)
elif self.solver == 'lsqr':
self._solve_lsqr(X, y, shrinkage=self.shrinkage)
elif self.solver == 'eigen':
self._solve_eigen(X, y, shrinkage=self.shrinkage)
else:
raise ValueError("unknown solver {} (valid solvers are 'svd', "
"'lsqr', and 'eigen').".format(self.solver))
if self.classes_.size == 2: # treat binary case as a special case
self.coef_ = np.array(self.coef_[1, :] - self.coef_[0, :], ndmin=2)
self.intercept_ = np.array(self.intercept_[1] - self.intercept_[0],
ndmin=1)
return self
def transform(self, X):
"""Project data to maximize class separation.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Input data.
Returns
-------
X_new : array, shape (n_samples, n_components)
Transformed data.
"""
check_is_fitted(self, ['xbar_', 'scalings_'], all_or_any=any)
X = check_array(X)
if self.solver == 'lsqr':
raise NotImplementedError("transform not implemented for 'lsqr' "
"solver (use 'svd' or 'eigen').")
elif self.solver == 'svd':
X_new = np.dot(X - self.xbar_, self.scalings_)
elif self.solver == 'eigen':
X_new = np.dot(X, self.scalings_)
n_components = X.shape[1] if self.n_components is None \
else self.n_components
return X_new[:, :n_components]
def predict_proba(self, X):
"""Estimate probability.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Input data.
Returns
-------
C : array, shape (n_samples, n_classes)
Estimated probabilities.
"""
prob = self.decision_function(X)
prob *= -1
np.exp(prob, prob)
prob += 1
np.reciprocal(prob, prob)
if len(self.classes_) == 2: # binary case
return np.column_stack([1 - prob, prob])
else:
# OvR normalization, like LibLinear's predict_probability
prob /= prob.sum(axis=1).reshape((prob.shape[0], -1))
return prob
def predict_log_proba(self, X):
"""Estimate log probability.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Input data.
Returns
-------
C : array, shape (n_samples, n_classes)
Estimated log probabilities.
"""
return np.log(self.predict_proba(X))
| bsd-3-clause |
artmusic0/theano-learning.part03 | Myfile_run-py_big-taining/cnn_trainingbig.py | 1 | 6461 | import os
import sys, getopt
import time
import numpy
import theano
import theano.tensor as T
from sklearn import preprocessing
from cnn import CNN
import pickle as cPickle
from logistic_sgd import LogisticRegression
import pickle, cPickle, gzip
def fit(data, labels, filename = 'weights.pkl'):
fit_predict(data, labels, filename = filename, action = 'fit')
def fit_predict(data, labels, action, filename, test_datasets = [], learning_rate=0.1, n_epochs=100, nkerns=[20, 50, 90], batch_size=50, seed=8000):
rng = numpy.random.RandomState(seed)
x = T.matrix('x') # the data is presented as rasterized images
y = T.ivector('y') # the labels are presented as 1D vector of [int] labels
index = T.lscalar() # index to a [mini]batch
if action=='fit':
TRAIN_Count = 1
NUM_TRAIN = len(data)
#print NUM_TRAIN
#print batch_size
if NUM_TRAIN % batch_size != 0: #if the last batch is not full, just don't use the remainder
whole = (NUM_TRAIN / batch_size) * batch_size
data = data[:whole]
NUM_TRAIN = len(data)
#print NUM_TRAIN
#print batch_size
# random permutation
indices = rng.permutation(NUM_TRAIN)
data, labels = data[indices, :], labels[indices]
# batch_size == 500, splits (480, 20). We will use 96% of the data for training, and the rest to validate the NN while training
is_train = numpy.array( ([0]* (batch_size - 20) + [1] * 20) * (NUM_TRAIN / batch_size))
# now we split the dataset to test and valid datasets
train_set_x, train_set_y = numpy.array(data[is_train==0]), labels[is_train==0]
valid_set_x, valid_set_y = numpy.array(data[is_train==1]), labels[is_train==1]
# compute number of minibatches
n_train_batches = len(train_set_y) / batch_size
n_valid_batches = len(valid_set_y) / batch_size
######################
# BUILD ACTUAL MODEL #
######################
print '... building the model'
# allocate symbolic variables for the data
epoch = T.scalar()
#index = T.lscalar() # index to a [mini]batch
#x = T.matrix('x') # the data is presented as rasterized images
#y = T.ivector('y') # the labels are presented as 1D vector of [int] labels
# construct the CNN class
classifier = CNN(
rng=rng,
input=x,
nkerns = nkerns,
batch_size = batch_size
)
train_set_x = theano.shared(numpy.asarray(train_set_x, dtype=theano.config.floatX))
train_set_y = T.cast(theano.shared(numpy.asarray(train_set_y, dtype=theano.config.floatX)), 'int32')
valid_set_x = theano.shared(numpy.asarray(valid_set_x, dtype=theano.config.floatX))
valid_set_y = T.cast(theano.shared(numpy.asarray(valid_set_y, dtype=theano.config.floatX)), 'int32')
validate_model = theano.function(
inputs=[index],
outputs=classifier.errors(y),
givens={
x: valid_set_x[index * batch_size:(index + 1) * batch_size],
y: valid_set_y[index * batch_size:(index + 1) * batch_size]
}
)
cost = classifier.layer4.negative_log_likelihood(y)
# create a list of gradients for all model parameters
grads = T.grad(cost, classifier.params)
# specify how to update the parameters of the model as a list of (variable, update expression) pairs
updates = [
(param_i, param_i - learning_rate * grad_i)
for param_i, grad_i in zip(classifier.params, grads)
]
# compiling a Theano function `train_model` that returns the cost, but
# in the same time updates the parameter of the model based on the rules defined in `updates`
train_model = theano.function(
inputs=[index],
outputs=cost,
updates=updates,
givens={
x: train_set_x[index * batch_size: (index + 1) * batch_size],
y: train_set_y[index * batch_size: (index + 1) * batch_size]
}
)
while(TRAIN_Count <51):
if(TRAIN_Count != 1):
print '...load data', TRAIN_Count
f = gzip.open(("training_data_200v6_" + str(TRAIN_Count) +".pkl.gz"), 'rb')
train = cPickle.load(f)
f.close()
data, labels = train
###############
# TRAIN MODEL #
###############
print '... training',TRAIN_Count,'batch'
best_iter = 0
test_score = 0.
start_time = time.clock()
epoch = 0
# here is an example how to print the current value of a Theano variable: print test_set_x.shape.eval()
# start training
while (epoch < n_epochs):
epoch = epoch + 1
for minibatch_index in xrange(n_train_batches):
minibatch_avg_cost = train_model(minibatch_index)
iter = (epoch - 1) * n_train_batches + minibatch_index
if (epoch) % 1 == 0 and minibatch_index==0:
# compute zero-one loss on validation set
validation_losses = [validate_model(i) for i
in xrange(n_valid_batches)]
this_validation_loss = numpy.mean(validation_losses)
print(
'epoch %i, minibatch %i/%i, validation error %f %%' %
(
epoch,
minibatch_index + 1,
n_train_batches,
this_validation_loss * 100.
)
)
TRAIN_Count += 1
###############
# PREDICTIONS #
###############
# save and load
print '... saving the weight'
f = file(filename, 'wb')
cPickle.dump(classifier.__getstate__(), f, protocol=cPickle.HIGHEST_PROTOCOL)
f.close()
end_time = time.clock()
print >> sys.stderr, ('The code ran for %.2fm' % ((end_time - start_time) / 60.))
| gpl-3.0 |
boada/HETDEXCluster | analysis/mkMLMasses.py | 2 | 8033 | import numpy as np
import h5py as hdf
from sklearn.ensemble import RandomForestRegressor
#from sklearn.cross_validation import train_test_split
from numpy.lib import recfunctions as rfns
from itertools import permutations
import multiprocessing
def child_initializer(_rf):
print('Starting', multiprocessing.current_process().name)
global model
model = _rf
def updateArray(data):
''' Adds the results containers to the data product. '''
newData = np.zeros(data.size)
data = rfns.append_fields(
data, ['ML_pred_1d', 'ML_pred_2d', 'ML_pred_2d2', 'ML_pred_3d',
'ML_pred_1d_err', 'ML_pred_2d_err', 'ML_pred_2d2_err',
'ML_pred_3d_err'], [newData, newData, newData, newData, newData,
newData, newData, newData],
dtypes='>f4',
usemask=False)
return data
def splitData(data, test_size=0.3):
def splitList(alist, wanted_parts=1):
''' Breaks a list into a number of parts. If it does not divide evenly
then the last list wil have an extra element.
'''
length = len(alist)
return [alist[i*length // wanted_parts: (i+1)*length // wanted_parts]\
for i in range(wanted_parts)]
np.random.shuffle(data)
sl = splitList(data, int(1 / test_size))
c = permutations(list(range(int(1 / test_size))))
prev_i = -1
for i, j, k in c:
if i == prev_i:
continue
else:
test = sl[i]
train = np.append(sl[j], sl[k])
prev_i = i
#print test
#print train
yield train, test
def addMasses(data, generator):
''' This does all of the heavy lifting to get the new masses assigned to
the right places.
'''
i = 0
for train, test in generator:
rf = RandomForestRegressor(n_estimators=1000,
min_samples_leaf=1,
verbose=1,
n_jobs=4)
X = np.log10(train['M200c'])
############
#### 1d ####
############
y = np.column_stack([np.log10(train['LOSVD'])])
rf.fit(y, X)
obs = np.column_stack([np.log10(test['LOSVD'])])
mrf = rf.predict(obs)
data['ML_pred_1d'][test['IDX']] = mrf
# errors
print('Calculating Error')
p = multiprocessing.Pool(maxtasksperchild=1000,
initializer=child_initializer,
initargs=([rf]))
result = p.map(mp_worker_wrapper, zip(obs, mrf))
p.close()
p.join()
data['ML_pred_1d_err'][test['IDX']] = result
#############
#### 2d #####
#############
y = np.column_stack([np.log10(train['LOSVD']), train['ZSPEC']])
rf.fit(y, X)
obs = np.column_stack([np.log10(test['LOSVD']), test['ZSPEC']])
mrf = rf.predict(obs)
data['ML_pred_2d'][test['IDX']] = mrf
# errors
print('Calculating Error, 2d')
p = multiprocessing.Pool(maxtasksperchild=1000,
initializer=child_initializer,
initargs=([rf]))
result = p.map(mp_worker_wrapper, zip(obs, mrf))
p.close()
p.join()
data['ML_pred_2d_err'][test['IDX']] = result
#############
#### 2d #####
#############
y = np.column_stack([np.log10(train['LOSVD']), train['NGAL']])
rf.fit(y, X)
obs = np.column_stack([np.log10(test['LOSVD']), test['NGAL']])
mrf = rf.predict(obs)
data['ML_pred_2d2'][test['IDX']] = mrf
# errors
print('Calculating Error, 2d2')
p = multiprocessing.Pool(maxtasksperchild=1000,
initializer=child_initializer,
initargs=([rf]))
result = p.map(mp_worker_wrapper, zip(obs, mrf))
p.close()
p.join()
data['ML_pred_2d2_err'][test['IDX']] = result
##############
##### 3d #####
##############
y = np.column_stack([np.log10(train['LOSVD']), train['ZSPEC'],
train['NGAL']])
rf.fit(y, X)
obs = np.column_stack([np.log10(test['LOSVD']), test['ZSPEC'],
test['NGAL']])
mrf = rf.predict(obs)
data['ML_pred_3d'][test['IDX']] = mrf
# errors
print('Calculating Error, 3d')
p = multiprocessing.Pool(maxtasksperchild=1000,
initializer=child_initializer,
initargs=([rf]))
result = p.map(mp_worker_wrapper, zip(obs, mrf))
p.close()
p.join()
data['ML_pred_3d_err'][test['IDX']] = result
print(i)
i += 1
return data
def pred_ints(model, X, mrf, percentile=68):
''' Calculates the prediction intervals of the estimators. '''
err_down = []
err_up = []
for x in range(len(X)):
preds = []
for pred in model.estimators_:
try:
preds.append(pred.predict(X[x][:, np.newaxis]))
except ValueError:
preds.append(pred.predict(X[x].reshape(1, -1)))
err_down.append(np.percentile(preds, (100 - percentile) / 2.))
err_up.append(np.percentile(preds, 100 - (100 - percentile) / 2.))
return err_down, err_up
#def mp_pred_ints(model, obs, mrf):
def mp_pred_ints(obs, mrf):
preds = []
for pred in model.estimators_:
try:
preds.append(pred.predict(obs[:, np.newaxis]))
except ValueError:
preds.append(pred.predict(obs.reshape(1, -1)))
#err_down = mrf - np.std(preds)
#err_up = mrf + np.std(preds)
# Bessel corrected std
err = np.std(preds, ddof=1)
return err
def mp_worker_wrapper(args):
return mp_pred_ints(*args)
if __name__ == "__main__":
### Targeted ###
################
with hdf.File('./result_targetedRealistic.hdf5', 'r') as f:
dset = f[list(f.keys())[0]]
data = dset['IDX', 'HALOID', 'ZSPEC', 'M200c', 'NGAL', 'LOSVD',
'LOSVD_err', 'MASS']
#data = dset.value
# add the extra fields
data = updateArray(data)
# You have to clean the data here. This is almost certainly from the fact
# that some of the HALOIDS are repeated at different redshifts. I have a
# prior on the LOSVD calculation which will limit the LOSVD to a maxium.
# Because the clusters are so far apart the LOSVD is super high.
mask = ((np.log10(data['LOSVD']) > 3.12) & (data['M200c'] < 10**14.5) |
(data['LOSVD'] < 50))
maskedDataT = data[~mask]
badData = data[mask]
sl_targeted = splitData(maskedDataT, 0.3)
data = addMasses(data, sl_targeted)
with hdf.File('targetedRealistic_MLmasses.hdf5', 'w') as f:
f['predicted masses'] = data
f.flush()
### Survey ###
##############
print('SURVEY!')
with hdf.File('./surveyCompleteRealistic.hdf5', 'r') as f:
dset = f[list(f.keys())[0]]
data = dset['IDX', 'HALOID', 'ZSPEC', 'M200c', 'NGAL', 'LOSVD',
'LOSVD_err', 'MASS']
#data = dset.value
# add the extra fields
data = updateArray(data)
# You have to clean the data here. This is almost certainly from the fact that
# some of the HALOIDS are repeated at different redshifts. I have a prior on
# the LOSVD calculation which will limit the LOSVD to a maxium. Because the
# clusters are so far apart the LOSVD is super high.
mask = ((np.log10(data['LOSVD']) > 3.12) & (data['M200c'] < 10**14.5) |
(data['LOSVD'] < 50))
maskedDataS = data[~mask]
badData = data[mask]
sl_survey = splitData(maskedDataS, 0.3)
data = addMasses(data, sl_survey)
with hdf.File('surveyCompleteRealistic_MLmasses.hdf5', 'w') as f:
f['predicted masses'] = data
f.flush()
| mit |
sonnyhu/scikit-learn | examples/model_selection/grid_search_digits.py | 8 | 2760 | """
============================================================
Parameter estimation using grid search with cross-validation
============================================================
This examples shows how a classifier is optimized by cross-validation,
which is done using the :class:`sklearn.model_selection.GridSearchCV` object
on a development set that comprises only half of the available labeled data.
The performance of the selected hyper-parameters and trained model is
then measured on a dedicated evaluation set that was not used during
the model selection step.
More details on tools available for model selection can be found in the
sections on :ref:`cross_validation` and :ref:`grid_search`.
"""
from __future__ import print_function
from sklearn import datasets
from sklearn.model_selection import train_test_split
from sklearn.model_selection import GridSearchCV
from sklearn.metrics import classification_report
from sklearn.svm import SVC
print(__doc__)
# Loading the Digits dataset
digits = datasets.load_digits()
# To apply an classifier on this data, we need to flatten the image, to
# turn the data in a (samples, feature) matrix:
n_samples = len(digits.images)
X = digits.images.reshape((n_samples, -1))
y = digits.target
# Split the dataset in two equal parts
X_train, X_test, y_train, y_test = train_test_split(
X, y, test_size=0.5, random_state=0)
# Set the parameters by cross-validation
tuned_parameters = [{'kernel': ['rbf'], 'gamma': [1e-3, 1e-4],
'C': [1, 10, 100, 1000]},
{'kernel': ['linear'], 'C': [1, 10, 100, 1000]}]
scores = ['precision', 'recall']
for score in scores:
print("# Tuning hyper-parameters for %s" % score)
print()
clf = GridSearchCV(SVC(C=1), tuned_parameters, cv=5,
scoring='%s_macro' % score)
clf.fit(X_train, y_train)
print("Best parameters set found on development set:")
print()
print(clf.best_params_)
print()
print("Grid scores on development set:")
print()
means = clf.results_['test_mean_score']
stds = clf.results_['test_std_score']
for i in range(len(clf.results_['params'])):
print("%0.3f (+/-%0.03f) for %r"
% (means[i], stds[i] * 2, clf.results_['params'][i]))
print()
print("Detailed classification report:")
print()
print("The model is trained on the full development set.")
print("The scores are computed on the full evaluation set.")
print()
y_true, y_pred = y_test, clf.predict(X_test)
print(classification_report(y_true, y_pred))
print()
# Note the problem is too easy: the hyperparameter plateau is too flat and the
# output model is the same for precision and recall with ties in quality.
| bsd-3-clause |
DailyActie/Surrogate-Model | 01-codes/scikit-learn-master/benchmarks/bench_plot_nmf.py | 1 | 5763 | """
Benchmarks of Non-Negative Matrix Factorization
"""
from __future__ import print_function
import gc
from collections import defaultdict
from time import time
import numpy as np
from scipy.linalg import norm
from sklearn.datasets.samples_generator import make_low_rank_matrix
from sklearn.decomposition.nmf import NMF, _initialize_nmf
from sklearn.externals.six.moves import xrange
def alt_nnmf(V, r, max_iter=1000, tol=1e-3, init='random'):
'''
A, S = nnmf(X, r, tol=1e-3, R=None)
Implement Lee & Seung's algorithm
Parameters
----------
V : 2-ndarray, [n_samples, n_features]
input matrix
r : integer
number of latent features
max_iter : integer, optional
maximum number of iterations (default: 1000)
tol : double
tolerance threshold for early exit (when the update factor is within
tol of 1., the function exits)
init : string
Method used to initialize the procedure.
Returns
-------
A : 2-ndarray, [n_samples, r]
Component part of the factorization
S : 2-ndarray, [r, n_features]
Data part of the factorization
Reference
---------
"Algorithms for Non-negative Matrix Factorization"
by Daniel D Lee, Sebastian H Seung
(available at http://citeseer.ist.psu.edu/lee01algorithms.html)
'''
# Nomenclature in the function follows Lee & Seung
eps = 1e-5
n, m = V.shape
W, H = _initialize_nmf(V, r, init, random_state=0)
for i in xrange(max_iter):
updateH = np.dot(W.T, V) / (np.dot(np.dot(W.T, W), H) + eps)
H *= updateH
updateW = np.dot(V, H.T) / (np.dot(W, np.dot(H, H.T)) + eps)
W *= updateW
if i % 10 == 0:
max_update = max(updateW.max(), updateH.max())
if abs(1. - max_update) < tol:
break
return W, H
def report(error, time):
print("Frobenius loss: %.5f" % error)
print("Took: %.2fs" % time)
print()
def benchmark(samples_range, features_range, rank=50, tolerance=1e-5):
timeset = defaultdict(lambda: [])
err = defaultdict(lambda: [])
for n_samples in samples_range:
for n_features in features_range:
print("%2d samples, %2d features" % (n_samples, n_features))
print('=======================')
X = np.abs(make_low_rank_matrix(n_samples, n_features,
effective_rank=rank, tail_strength=0.2))
gc.collect()
print("benchmarking nndsvd-nmf: ")
tstart = time()
m = NMF(n_components=30, tol=tolerance, init='nndsvd').fit(X)
tend = time() - tstart
timeset['nndsvd-nmf'].append(tend)
err['nndsvd-nmf'].append(m.reconstruction_err_)
report(m.reconstruction_err_, tend)
gc.collect()
print("benchmarking nndsvda-nmf: ")
tstart = time()
m = NMF(n_components=30, init='nndsvda',
tol=tolerance).fit(X)
tend = time() - tstart
timeset['nndsvda-nmf'].append(tend)
err['nndsvda-nmf'].append(m.reconstruction_err_)
report(m.reconstruction_err_, tend)
gc.collect()
print("benchmarking nndsvdar-nmf: ")
tstart = time()
m = NMF(n_components=30, init='nndsvdar',
tol=tolerance).fit(X)
tend = time() - tstart
timeset['nndsvdar-nmf'].append(tend)
err['nndsvdar-nmf'].append(m.reconstruction_err_)
report(m.reconstruction_err_, tend)
gc.collect()
print("benchmarking random-nmf")
tstart = time()
m = NMF(n_components=30, init='random', max_iter=1000,
tol=tolerance).fit(X)
tend = time() - tstart
timeset['random-nmf'].append(tend)
err['random-nmf'].append(m.reconstruction_err_)
report(m.reconstruction_err_, tend)
gc.collect()
print("benchmarking alt-random-nmf")
tstart = time()
W, H = alt_nnmf(X, r=30, init='random', tol=tolerance)
tend = time() - tstart
timeset['alt-random-nmf'].append(tend)
err['alt-random-nmf'].append(np.linalg.norm(X - np.dot(W, H)))
report(norm(X - np.dot(W, H)), tend)
return timeset, err
if __name__ == '__main__':
from mpl_toolkits.mplot3d import axes3d # register the 3d projection
axes3d
import matplotlib.pyplot as plt
samples_range = np.linspace(50, 500, 3).astype(np.int)
features_range = np.linspace(50, 500, 3).astype(np.int)
timeset, err = benchmark(samples_range, features_range)
for i, results in enumerate((timeset, err)):
fig = plt.figure('scikit-learn Non-Negative Matrix Factorization'
'benchmark results')
ax = fig.gca(projection='3d')
for c, (label, timings) in zip('rbgcm', sorted(results.iteritems())):
X, Y = np.meshgrid(samples_range, features_range)
Z = np.asarray(timings).reshape(samples_range.shape[0],
features_range.shape[0])
# plot the actual surface
ax.plot_surface(X, Y, Z, rstride=8, cstride=8, alpha=0.3,
color=c)
# dummy point plot to stick the legend to since surface plot do not
# support legends (yet?)
ax.plot([1], [1], [1], color=c, label=label)
ax.set_xlabel('n_samples')
ax.set_ylabel('n_features')
zlabel = 'Time (s)' if i == 0 else 'reconstruction error'
ax.set_zlabel(zlabel)
ax.legend()
plt.show()
| mit |
linebp/pandas | pandas/core/reshape/merge.py | 1 | 56266 | """
SQL-style merge routines
"""
import copy
import warnings
import string
import numpy as np
from pandas.compat import range, lzip, zip, map, filter
import pandas.compat as compat
from pandas import (Categorical, Series, DataFrame,
Index, MultiIndex, Timedelta)
from pandas.core.frame import _merge_doc
from pandas.core.dtypes.common import (
is_datetime64tz_dtype,
is_datetime64_dtype,
needs_i8_conversion,
is_int64_dtype,
is_categorical_dtype,
is_integer_dtype,
is_float_dtype,
is_numeric_dtype,
is_integer,
is_int_or_datetime_dtype,
is_dtype_equal,
is_bool,
is_list_like,
_ensure_int64,
_ensure_float64,
_ensure_object,
_get_dtype)
from pandas.core.dtypes.missing import na_value_for_dtype
from pandas.core.internals import (items_overlap_with_suffix,
concatenate_block_managers)
from pandas.util._decorators import Appender, Substitution
from pandas.core.sorting import is_int64_overflow_possible
import pandas.core.algorithms as algos
import pandas.core.common as com
from pandas._libs import hashtable as libhashtable, join as libjoin, lib
from pandas.errors import MergeError
@Substitution('\nleft : DataFrame')
@Appender(_merge_doc, indents=0)
def merge(left, right, how='inner', on=None, left_on=None, right_on=None,
left_index=False, right_index=False, sort=False,
suffixes=('_x', '_y'), copy=True, indicator=False,
validate=None):
op = _MergeOperation(left, right, how=how, on=on, left_on=left_on,
right_on=right_on, left_index=left_index,
right_index=right_index, sort=sort, suffixes=suffixes,
copy=copy, indicator=indicator,
validate=validate)
return op.get_result()
if __debug__:
merge.__doc__ = _merge_doc % '\nleft : DataFrame'
def _groupby_and_merge(by, on, left, right, _merge_pieces,
check_duplicates=True):
"""
groupby & merge; we are always performing a left-by type operation
Parameters
----------
by: field to group
on: duplicates field
left: left frame
right: right frame
_merge_pieces: function for merging
check_duplicates: boolean, default True
should we check & clean duplicates
"""
pieces = []
if not isinstance(by, (list, tuple)):
by = [by]
lby = left.groupby(by, sort=False)
# if we can groupby the rhs
# then we can get vastly better perf
try:
# we will check & remove duplicates if indicated
if check_duplicates:
if on is None:
on = []
elif not isinstance(on, (list, tuple)):
on = [on]
if right.duplicated(by + on).any():
right = right.drop_duplicates(by + on, keep='last')
rby = right.groupby(by, sort=False)
except KeyError:
rby = None
for key, lhs in lby:
if rby is None:
rhs = right
else:
try:
rhs = right.take(rby.indices[key])
except KeyError:
# key doesn't exist in left
lcols = lhs.columns.tolist()
cols = lcols + [r for r in right.columns
if r not in set(lcols)]
merged = lhs.reindex(columns=cols)
merged.index = range(len(merged))
pieces.append(merged)
continue
merged = _merge_pieces(lhs, rhs)
# make sure join keys are in the merged
# TODO, should _merge_pieces do this?
for k in by:
try:
if k in merged:
merged[k] = key
except:
pass
pieces.append(merged)
# preserve the original order
# if we have a missing piece this can be reset
from pandas.core.reshape.concat import concat
result = concat(pieces, ignore_index=True)
result = result.reindex(columns=pieces[0].columns, copy=False)
return result, lby
def ordered_merge(left, right, on=None,
left_on=None, right_on=None,
left_by=None, right_by=None,
fill_method=None, suffixes=('_x', '_y')):
warnings.warn("ordered_merge is deprecated and replaced by merge_ordered",
FutureWarning, stacklevel=2)
return merge_ordered(left, right, on=on,
left_on=left_on, right_on=right_on,
left_by=left_by, right_by=right_by,
fill_method=fill_method, suffixes=suffixes)
def merge_ordered(left, right, on=None,
left_on=None, right_on=None,
left_by=None, right_by=None,
fill_method=None, suffixes=('_x', '_y'),
how='outer'):
"""Perform merge with optional filling/interpolation designed for ordered
data like time series data. Optionally perform group-wise merge (see
examples)
Parameters
----------
left : DataFrame
right : DataFrame
on : label or list
Field names to join on. Must be found in both DataFrames.
left_on : label or list, or array-like
Field names to join on in left DataFrame. Can be a vector or list of
vectors of the length of the DataFrame to use a particular vector as
the join key instead of columns
right_on : label or list, or array-like
Field names to join on in right DataFrame or vector/list of vectors per
left_on docs
left_by : column name or list of column names
Group left DataFrame by group columns and merge piece by piece with
right DataFrame
right_by : column name or list of column names
Group right DataFrame by group columns and merge piece by piece with
left DataFrame
fill_method : {'ffill', None}, default None
Interpolation method for data
suffixes : 2-length sequence (tuple, list, ...)
Suffix to apply to overlapping column names in the left and right
side, respectively
how : {'left', 'right', 'outer', 'inner'}, default 'outer'
* left: use only keys from left frame (SQL: left outer join)
* right: use only keys from right frame (SQL: right outer join)
* outer: use union of keys from both frames (SQL: full outer join)
* inner: use intersection of keys from both frames (SQL: inner join)
.. versionadded:: 0.19.0
Examples
--------
>>> A >>> B
key lvalue group key rvalue
0 a 1 a 0 b 1
1 c 2 a 1 c 2
2 e 3 a 2 d 3
3 a 1 b
4 c 2 b
5 e 3 b
>>> ordered_merge(A, B, fill_method='ffill', left_by='group')
key lvalue group rvalue
0 a 1 a NaN
1 b 1 a 1
2 c 2 a 2
3 d 2 a 3
4 e 3 a 3
5 f 3 a 4
6 a 1 b NaN
7 b 1 b 1
8 c 2 b 2
9 d 2 b 3
10 e 3 b 3
11 f 3 b 4
Returns
-------
merged : DataFrame
The output type will the be same as 'left', if it is a subclass
of DataFrame.
See also
--------
merge
merge_asof
"""
def _merger(x, y):
# perform the ordered merge operation
op = _OrderedMerge(x, y, on=on, left_on=left_on, right_on=right_on,
suffixes=suffixes, fill_method=fill_method,
how=how)
return op.get_result()
if left_by is not None and right_by is not None:
raise ValueError('Can only group either left or right frames')
elif left_by is not None:
result, _ = _groupby_and_merge(left_by, on, left, right,
lambda x, y: _merger(x, y),
check_duplicates=False)
elif right_by is not None:
result, _ = _groupby_and_merge(right_by, on, right, left,
lambda x, y: _merger(y, x),
check_duplicates=False)
else:
result = _merger(left, right)
return result
ordered_merge.__doc__ = merge_ordered.__doc__
def merge_asof(left, right, on=None,
left_on=None, right_on=None,
left_index=False, right_index=False,
by=None, left_by=None, right_by=None,
suffixes=('_x', '_y'),
tolerance=None,
allow_exact_matches=True,
direction='backward'):
"""Perform an asof merge. This is similar to a left-join except that we
match on nearest key rather than equal keys.
Both DataFrames must be sorted by the key.
For each row in the left DataFrame:
- A "backward" search selects the last row in the right DataFrame whose
'on' key is less than or equal to the left's key.
- A "forward" search selects the first row in the right DataFrame whose
'on' key is greater than or equal to the left's key.
- A "nearest" search selects the row in the right DataFrame whose 'on'
key is closest in absolute distance to the left's key.
The default is "backward" and is compatible in versions below 0.20.0.
The direction parameter was added in version 0.20.0 and introduces
"forward" and "nearest".
Optionally match on equivalent keys with 'by' before searching with 'on'.
.. versionadded:: 0.19.0
Parameters
----------
left : DataFrame
right : DataFrame
on : label
Field name to join on. Must be found in both DataFrames.
The data MUST be ordered. Furthermore this must be a numeric column,
such as datetimelike, integer, or float. On or left_on/right_on
must be given.
left_on : label
Field name to join on in left DataFrame.
right_on : label
Field name to join on in right DataFrame.
left_index : boolean
Use the index of the left DataFrame as the join key.
.. versionadded:: 0.19.2
right_index : boolean
Use the index of the right DataFrame as the join key.
.. versionadded:: 0.19.2
by : column name or list of column names
Match on these columns before performing merge operation.
left_by : column name
Field names to match on in the left DataFrame.
.. versionadded:: 0.19.2
right_by : column name
Field names to match on in the right DataFrame.
.. versionadded:: 0.19.2
suffixes : 2-length sequence (tuple, list, ...)
Suffix to apply to overlapping column names in the left and right
side, respectively.
tolerance : integer or Timedelta, optional, default None
Select asof tolerance within this range; must be compatible
with the merge index.
allow_exact_matches : boolean, default True
- If True, allow matching with the same 'on' value
(i.e. less-than-or-equal-to / greater-than-or-equal-to)
- If False, don't match the same 'on' value
(i.e., stricly less-than / strictly greater-than)
direction : 'backward' (default), 'forward', or 'nearest'
Whether to search for prior, subsequent, or closest matches.
.. versionadded:: 0.20.0
Returns
-------
merged : DataFrame
Examples
--------
>>> left = pd.DataFrame({'a': [1, 5, 10], 'left_val': ['a', 'b', 'c']})
>>> left
a left_val
0 1 a
1 5 b
2 10 c
>>> right = pd.DataFrame({'a': [1, 2, 3, 6, 7],
... 'right_val': [1, 2, 3, 6, 7]})
>>> right
a right_val
0 1 1
1 2 2
2 3 3
3 6 6
4 7 7
>>> pd.merge_asof(left, right, on='a')
a left_val right_val
0 1 a 1
1 5 b 3
2 10 c 7
>>> pd.merge_asof(left, right, on='a', allow_exact_matches=False)
a left_val right_val
0 1 a NaN
1 5 b 3.0
2 10 c 7.0
>>> pd.merge_asof(left, right, on='a', direction='forward')
a left_val right_val
0 1 a 1.0
1 5 b 6.0
2 10 c NaN
>>> pd.merge_asof(left, right, on='a', direction='nearest')
a left_val right_val
0 1 a 1
1 5 b 6
2 10 c 7
We can use indexed DataFrames as well.
>>> left = pd.DataFrame({'left_val': ['a', 'b', 'c']}, index=[1, 5, 10])
>>> left
left_val
1 a
5 b
10 c
>>> right = pd.DataFrame({'right_val': [1, 2, 3, 6, 7]},
... index=[1, 2, 3, 6, 7])
>>> right
right_val
1 1
2 2
3 3
6 6
7 7
>>> pd.merge_asof(left, right, left_index=True, right_index=True)
left_val right_val
1 a 1
5 b 3
10 c 7
Here is a real-world times-series example
>>> quotes
time ticker bid ask
0 2016-05-25 13:30:00.023 GOOG 720.50 720.93
1 2016-05-25 13:30:00.023 MSFT 51.95 51.96
2 2016-05-25 13:30:00.030 MSFT 51.97 51.98
3 2016-05-25 13:30:00.041 MSFT 51.99 52.00
4 2016-05-25 13:30:00.048 GOOG 720.50 720.93
5 2016-05-25 13:30:00.049 AAPL 97.99 98.01
6 2016-05-25 13:30:00.072 GOOG 720.50 720.88
7 2016-05-25 13:30:00.075 MSFT 52.01 52.03
>>> trades
time ticker price quantity
0 2016-05-25 13:30:00.023 MSFT 51.95 75
1 2016-05-25 13:30:00.038 MSFT 51.95 155
2 2016-05-25 13:30:00.048 GOOG 720.77 100
3 2016-05-25 13:30:00.048 GOOG 720.92 100
4 2016-05-25 13:30:00.048 AAPL 98.00 100
By default we are taking the asof of the quotes
>>> pd.merge_asof(trades, quotes,
... on='time',
... by='ticker')
time ticker price quantity bid ask
0 2016-05-25 13:30:00.023 MSFT 51.95 75 51.95 51.96
1 2016-05-25 13:30:00.038 MSFT 51.95 155 51.97 51.98
2 2016-05-25 13:30:00.048 GOOG 720.77 100 720.50 720.93
3 2016-05-25 13:30:00.048 GOOG 720.92 100 720.50 720.93
4 2016-05-25 13:30:00.048 AAPL 98.00 100 NaN NaN
We only asof within 2ms betwen the quote time and the trade time
>>> pd.merge_asof(trades, quotes,
... on='time',
... by='ticker',
... tolerance=pd.Timedelta('2ms'))
time ticker price quantity bid ask
0 2016-05-25 13:30:00.023 MSFT 51.95 75 51.95 51.96
1 2016-05-25 13:30:00.038 MSFT 51.95 155 NaN NaN
2 2016-05-25 13:30:00.048 GOOG 720.77 100 720.50 720.93
3 2016-05-25 13:30:00.048 GOOG 720.92 100 720.50 720.93
4 2016-05-25 13:30:00.048 AAPL 98.00 100 NaN NaN
We only asof within 10ms betwen the quote time and the trade time
and we exclude exact matches on time. However *prior* data will
propogate forward
>>> pd.merge_asof(trades, quotes,
... on='time',
... by='ticker',
... tolerance=pd.Timedelta('10ms'),
... allow_exact_matches=False)
time ticker price quantity bid ask
0 2016-05-25 13:30:00.023 MSFT 51.95 75 NaN NaN
1 2016-05-25 13:30:00.038 MSFT 51.95 155 51.97 51.98
2 2016-05-25 13:30:00.048 GOOG 720.77 100 720.50 720.93
3 2016-05-25 13:30:00.048 GOOG 720.92 100 720.50 720.93
4 2016-05-25 13:30:00.048 AAPL 98.00 100 NaN NaN
See also
--------
merge
merge_ordered
"""
op = _AsOfMerge(left, right,
on=on, left_on=left_on, right_on=right_on,
left_index=left_index, right_index=right_index,
by=by, left_by=left_by, right_by=right_by,
suffixes=suffixes,
how='asof', tolerance=tolerance,
allow_exact_matches=allow_exact_matches,
direction=direction)
return op.get_result()
# TODO: transformations??
# TODO: only copy DataFrames when modification necessary
class _MergeOperation(object):
"""
Perform a database (SQL) merge operation between two DataFrame objects
using either columns as keys or their row indexes
"""
_merge_type = 'merge'
def __init__(self, left, right, how='inner', on=None,
left_on=None, right_on=None, axis=1,
left_index=False, right_index=False, sort=True,
suffixes=('_x', '_y'), copy=True, indicator=False,
validate=None):
self.left = self.orig_left = left
self.right = self.orig_right = right
self.how = how
self.axis = axis
self.on = com._maybe_make_list(on)
self.left_on = com._maybe_make_list(left_on)
self.right_on = com._maybe_make_list(right_on)
self.copy = copy
self.suffixes = suffixes
self.sort = sort
self.left_index = left_index
self.right_index = right_index
self.indicator = indicator
if isinstance(self.indicator, compat.string_types):
self.indicator_name = self.indicator
elif isinstance(self.indicator, bool):
self.indicator_name = '_merge' if self.indicator else None
else:
raise ValueError(
'indicator option can only accept boolean or string arguments')
if not isinstance(left, DataFrame):
raise ValueError(
'can not merge DataFrame with instance of '
'type {0}'.format(type(left)))
if not isinstance(right, DataFrame):
raise ValueError(
'can not merge DataFrame with instance of '
'type {0}'.format(type(right)))
if not is_bool(left_index):
raise ValueError(
'left_index parameter must be of type bool, not '
'{0}'.format(type(left_index)))
if not is_bool(right_index):
raise ValueError(
'right_index parameter must be of type bool, not '
'{0}'.format(type(right_index)))
# warn user when merging between different levels
if left.columns.nlevels != right.columns.nlevels:
msg = ('merging between different levels can give an unintended '
'result ({0} levels on the left, {1} on the right)')
msg = msg.format(left.columns.nlevels, right.columns.nlevels)
warnings.warn(msg, UserWarning)
self._validate_specification()
# note this function has side effects
(self.left_join_keys,
self.right_join_keys,
self.join_names) = self._get_merge_keys()
# validate the merge keys dtypes. We may need to coerce
# to avoid incompat dtypes
self._maybe_coerce_merge_keys()
# If argument passed to validate,
# check if columns specified as unique
# are in fact unique.
if validate is not None:
self._validate(validate)
def get_result(self):
if self.indicator:
self.left, self.right = self._indicator_pre_merge(
self.left, self.right)
join_index, left_indexer, right_indexer = self._get_join_info()
ldata, rdata = self.left._data, self.right._data
lsuf, rsuf = self.suffixes
llabels, rlabels = items_overlap_with_suffix(ldata.items, lsuf,
rdata.items, rsuf)
lindexers = {1: left_indexer} if left_indexer is not None else {}
rindexers = {1: right_indexer} if right_indexer is not None else {}
result_data = concatenate_block_managers(
[(ldata, lindexers), (rdata, rindexers)],
axes=[llabels.append(rlabels), join_index],
concat_axis=0, copy=self.copy)
typ = self.left._constructor
result = typ(result_data).__finalize__(self, method=self._merge_type)
if self.indicator:
result = self._indicator_post_merge(result)
self._maybe_add_join_keys(result, left_indexer, right_indexer)
return result
def _indicator_pre_merge(self, left, right):
columns = left.columns.union(right.columns)
for i in ['_left_indicator', '_right_indicator']:
if i in columns:
raise ValueError("Cannot use `indicator=True` option when "
"data contains a column named {}".format(i))
if self.indicator_name in columns:
raise ValueError(
"Cannot use name of an existing column for indicator column")
left = left.copy()
right = right.copy()
left['_left_indicator'] = 1
left['_left_indicator'] = left['_left_indicator'].astype('int8')
right['_right_indicator'] = 2
right['_right_indicator'] = right['_right_indicator'].astype('int8')
return left, right
def _indicator_post_merge(self, result):
result['_left_indicator'] = result['_left_indicator'].fillna(0)
result['_right_indicator'] = result['_right_indicator'].fillna(0)
result[self.indicator_name] = Categorical((result['_left_indicator'] +
result['_right_indicator']),
categories=[1, 2, 3])
result[self.indicator_name] = (
result[self.indicator_name]
.cat.rename_categories(['left_only', 'right_only', 'both']))
result = result.drop(labels=['_left_indicator', '_right_indicator'],
axis=1)
return result
def _maybe_add_join_keys(self, result, left_indexer, right_indexer):
left_has_missing = None
right_has_missing = None
keys = zip(self.join_names, self.left_on, self.right_on)
for i, (name, lname, rname) in enumerate(keys):
if not _should_fill(lname, rname):
continue
take_left, take_right = None, None
if name in result:
if left_indexer is not None and right_indexer is not None:
if name in self.left:
if left_has_missing is None:
left_has_missing = (left_indexer == -1).any()
if left_has_missing:
take_right = self.right_join_keys[i]
if not is_dtype_equal(result[name].dtype,
self.left[name].dtype):
take_left = self.left[name]._values
elif name in self.right:
if right_has_missing is None:
right_has_missing = (right_indexer == -1).any()
if right_has_missing:
take_left = self.left_join_keys[i]
if not is_dtype_equal(result[name].dtype,
self.right[name].dtype):
take_right = self.right[name]._values
elif left_indexer is not None \
and isinstance(self.left_join_keys[i], np.ndarray):
take_left = self.left_join_keys[i]
take_right = self.right_join_keys[i]
if take_left is not None or take_right is not None:
if take_left is None:
lvals = result[name]._values
else:
lfill = na_value_for_dtype(take_left.dtype)
lvals = algos.take_1d(take_left, left_indexer,
fill_value=lfill)
if take_right is None:
rvals = result[name]._values
else:
rfill = na_value_for_dtype(take_right.dtype)
rvals = algos.take_1d(take_right, right_indexer,
fill_value=rfill)
# if we have an all missing left_indexer
# make sure to just use the right values
mask = left_indexer == -1
if mask.all():
key_col = rvals
else:
key_col = Index(lvals).where(~mask, rvals)
if name in result:
result[name] = key_col
else:
result.insert(i, name or 'key_%d' % i, key_col)
def _get_join_indexers(self):
""" return the join indexers """
return _get_join_indexers(self.left_join_keys,
self.right_join_keys,
sort=self.sort,
how=self.how)
def _get_join_info(self):
left_ax = self.left._data.axes[self.axis]
right_ax = self.right._data.axes[self.axis]
if self.left_index and self.right_index and self.how != 'asof':
join_index, left_indexer, right_indexer = \
left_ax.join(right_ax, how=self.how, return_indexers=True,
sort=self.sort)
elif self.right_index and self.how == 'left':
join_index, left_indexer, right_indexer = \
_left_join_on_index(left_ax, right_ax, self.left_join_keys,
sort=self.sort)
elif self.left_index and self.how == 'right':
join_index, right_indexer, left_indexer = \
_left_join_on_index(right_ax, left_ax, self.right_join_keys,
sort=self.sort)
else:
(left_indexer,
right_indexer) = self._get_join_indexers()
if self.right_index:
if len(self.left) > 0:
join_index = self.left.index.take(left_indexer)
else:
join_index = self.right.index.take(right_indexer)
left_indexer = np.array([-1] * len(join_index))
elif self.left_index:
if len(self.right) > 0:
join_index = self.right.index.take(right_indexer)
else:
join_index = self.left.index.take(left_indexer)
right_indexer = np.array([-1] * len(join_index))
else:
join_index = Index(np.arange(len(left_indexer)))
if len(join_index) == 0:
join_index = join_index.astype(object)
return join_index, left_indexer, right_indexer
def _get_merge_keys(self):
"""
Note: has side effects (copy/delete key columns)
Parameters
----------
left
right
on
Returns
-------
left_keys, right_keys
"""
left_keys = []
right_keys = []
join_names = []
right_drop = []
left_drop = []
left, right = self.left, self.right
is_lkey = lambda x: isinstance(
x, (np.ndarray, Series)) and len(x) == len(left)
is_rkey = lambda x: isinstance(
x, (np.ndarray, Series)) and len(x) == len(right)
# Note that pd.merge_asof() has separate 'on' and 'by' parameters. A
# user could, for example, request 'left_index' and 'left_by'. In a
# regular pd.merge(), users cannot specify both 'left_index' and
# 'left_on'. (Instead, users have a MultiIndex). That means the
# self.left_on in this function is always empty in a pd.merge(), but
# a pd.merge_asof(left_index=True, left_by=...) will result in a
# self.left_on array with a None in the middle of it. This requires
# a work-around as designated in the code below.
# See _validate_specification() for where this happens.
# ugh, spaghetti re #733
if _any(self.left_on) and _any(self.right_on):
for lk, rk in zip(self.left_on, self.right_on):
if is_lkey(lk):
left_keys.append(lk)
if is_rkey(rk):
right_keys.append(rk)
join_names.append(None) # what to do?
else:
if rk is not None:
right_keys.append(right[rk]._values)
join_names.append(rk)
else:
# work-around for merge_asof(right_index=True)
right_keys.append(right.index)
join_names.append(right.index.name)
else:
if not is_rkey(rk):
if rk is not None:
right_keys.append(right[rk]._values)
else:
# work-around for merge_asof(right_index=True)
right_keys.append(right.index)
if lk is not None and lk == rk:
# avoid key upcast in corner case (length-0)
if len(left) > 0:
right_drop.append(rk)
else:
left_drop.append(lk)
else:
right_keys.append(rk)
if lk is not None:
left_keys.append(left[lk]._values)
join_names.append(lk)
else:
# work-around for merge_asof(left_index=True)
left_keys.append(left.index)
join_names.append(left.index.name)
elif _any(self.left_on):
for k in self.left_on:
if is_lkey(k):
left_keys.append(k)
join_names.append(None)
else:
left_keys.append(left[k]._values)
join_names.append(k)
if isinstance(self.right.index, MultiIndex):
right_keys = [lev._values.take(lab)
for lev, lab in zip(self.right.index.levels,
self.right.index.labels)]
else:
right_keys = [self.right.index.values]
elif _any(self.right_on):
for k in self.right_on:
if is_rkey(k):
right_keys.append(k)
join_names.append(None)
else:
right_keys.append(right[k]._values)
join_names.append(k)
if isinstance(self.left.index, MultiIndex):
left_keys = [lev._values.take(lab)
for lev, lab in zip(self.left.index.levels,
self.left.index.labels)]
else:
left_keys = [self.left.index.values]
if left_drop:
self.left = self.left.drop(left_drop, axis=1)
if right_drop:
self.right = self.right.drop(right_drop, axis=1)
return left_keys, right_keys, join_names
def _maybe_coerce_merge_keys(self):
# we have valid mergee's but we may have to further
# coerce these if they are originally incompatible types
#
# for example if these are categorical, but are not dtype_equal
# or if we have object and integer dtypes
for lk, rk, name in zip(self.left_join_keys,
self.right_join_keys,
self.join_names):
if (len(lk) and not len(rk)) or (not len(lk) and len(rk)):
continue
# if either left or right is a categorical
# then the must match exactly in categories & ordered
if is_categorical_dtype(lk) and is_categorical_dtype(rk):
if lk.is_dtype_equal(rk):
continue
elif is_categorical_dtype(lk) or is_categorical_dtype(rk):
pass
elif is_dtype_equal(lk.dtype, rk.dtype):
continue
# if we are numeric, then allow differing
# kinds to proceed, eg. int64 and int8
# further if we are object, but we infer to
# the same, then proceed
if (is_numeric_dtype(lk) and is_numeric_dtype(rk)):
if lk.dtype.kind == rk.dtype.kind:
continue
# let's infer and see if we are ok
if lib.infer_dtype(lk) == lib.infer_dtype(rk):
continue
# Houston, we have a problem!
# let's coerce to object
if name in self.left.columns:
self.left = self.left.assign(
**{name: self.left[name].astype(object)})
if name in self.right.columns:
self.right = self.right.assign(
**{name: self.right[name].astype(object)})
def _validate_specification(self):
# Hm, any way to make this logic less complicated??
if self.on is None and self.left_on is None and self.right_on is None:
if self.left_index and self.right_index:
self.left_on, self.right_on = (), ()
elif self.left_index:
if self.right_on is None:
raise MergeError('Must pass right_on or right_index=True')
elif self.right_index:
if self.left_on is None:
raise MergeError('Must pass left_on or left_index=True')
else:
# use the common columns
common_cols = self.left.columns.intersection(
self.right.columns)
if len(common_cols) == 0:
raise MergeError('No common columns to perform merge on')
if not common_cols.is_unique:
raise MergeError("Data columns not unique: %s"
% repr(common_cols))
self.left_on = self.right_on = common_cols
elif self.on is not None:
if self.left_on is not None or self.right_on is not None:
raise MergeError('Can only pass argument "on" OR "left_on" '
'and "right_on", not a combination of both.')
self.left_on = self.right_on = self.on
elif self.left_on is not None:
n = len(self.left_on)
if self.right_index:
if len(self.left_on) != self.right.index.nlevels:
raise ValueError('len(left_on) must equal the number '
'of levels in the index of "right"')
self.right_on = [None] * n
elif self.right_on is not None:
n = len(self.right_on)
if self.left_index:
if len(self.right_on) != self.left.index.nlevels:
raise ValueError('len(right_on) must equal the number '
'of levels in the index of "left"')
self.left_on = [None] * n
if len(self.right_on) != len(self.left_on):
raise ValueError("len(right_on) must equal len(left_on)")
def _validate(self, validate):
# Check uniqueness of each
if self.left_index:
left_unique = self.orig_left.index.is_unique
else:
left_unique = MultiIndex.from_arrays(self.left_join_keys
).is_unique
if self.right_index:
right_unique = self.orig_right.index.is_unique
else:
right_unique = MultiIndex.from_arrays(self.right_join_keys
).is_unique
# Check data integrity
if validate in ["one_to_one", "1:1"]:
if not left_unique and not right_unique:
raise MergeError("Merge keys are not unique in either left"
" or right dataset; not a one-to-one merge")
elif not left_unique:
raise MergeError("Merge keys are not unique in left dataset;"
" not a one-to-one merge")
elif not right_unique:
raise MergeError("Merge keys are not unique in right dataset;"
" not a one-to-one merge")
elif validate in ["one_to_many", "1:m"]:
if not left_unique:
raise MergeError("Merge keys are not unique in left dataset;"
"not a one-to-many merge")
elif validate in ["many_to_one", "m:1"]:
if not right_unique:
raise MergeError("Merge keys are not unique in right dataset;"
" not a many-to-one merge")
elif validate in ['many_to_many', 'm:m']:
pass
else:
raise ValueError("Not a valid argument for validate")
def _get_join_indexers(left_keys, right_keys, sort=False, how='inner',
**kwargs):
"""
Parameters
----------
left_keys: ndarray, Index, Series
right_keys: ndarray, Index, Series
sort: boolean, default False
how: string {'inner', 'outer', 'left', 'right'}, default 'inner'
Returns
-------
tuple of (left_indexer, right_indexer)
indexers into the left_keys, right_keys
"""
from functools import partial
assert len(left_keys) == len(right_keys), \
'left_key and right_keys must be the same length'
# bind `sort` arg. of _factorize_keys
fkeys = partial(_factorize_keys, sort=sort)
# get left & right join labels and num. of levels at each location
llab, rlab, shape = map(list, zip(* map(fkeys, left_keys, right_keys)))
# get flat i8 keys from label lists
lkey, rkey = _get_join_keys(llab, rlab, shape, sort)
# factorize keys to a dense i8 space
# `count` is the num. of unique keys
# set(lkey) | set(rkey) == range(count)
lkey, rkey, count = fkeys(lkey, rkey)
# preserve left frame order if how == 'left' and sort == False
kwargs = copy.copy(kwargs)
if how == 'left':
kwargs['sort'] = sort
join_func = _join_functions[how]
return join_func(lkey, rkey, count, **kwargs)
class _OrderedMerge(_MergeOperation):
_merge_type = 'ordered_merge'
def __init__(self, left, right, on=None, left_on=None, right_on=None,
left_index=False, right_index=False, axis=1,
suffixes=('_x', '_y'), copy=True,
fill_method=None, how='outer'):
self.fill_method = fill_method
_MergeOperation.__init__(self, left, right, on=on, left_on=left_on,
left_index=left_index,
right_index=right_index,
right_on=right_on, axis=axis,
how=how, suffixes=suffixes,
sort=True # factorize sorts
)
def get_result(self):
join_index, left_indexer, right_indexer = self._get_join_info()
# this is a bit kludgy
ldata, rdata = self.left._data, self.right._data
lsuf, rsuf = self.suffixes
llabels, rlabels = items_overlap_with_suffix(ldata.items, lsuf,
rdata.items, rsuf)
if self.fill_method == 'ffill':
left_join_indexer = libjoin.ffill_indexer(left_indexer)
right_join_indexer = libjoin.ffill_indexer(right_indexer)
else:
left_join_indexer = left_indexer
right_join_indexer = right_indexer
lindexers = {
1: left_join_indexer} if left_join_indexer is not None else {}
rindexers = {
1: right_join_indexer} if right_join_indexer is not None else {}
result_data = concatenate_block_managers(
[(ldata, lindexers), (rdata, rindexers)],
axes=[llabels.append(rlabels), join_index],
concat_axis=0, copy=self.copy)
typ = self.left._constructor
result = typ(result_data).__finalize__(self, method=self._merge_type)
self._maybe_add_join_keys(result, left_indexer, right_indexer)
return result
def _asof_function(direction, on_type):
return getattr(libjoin, 'asof_join_%s_%s' % (direction, on_type), None)
def _asof_by_function(direction, on_type, by_type):
return getattr(libjoin, 'asof_join_%s_%s_by_%s' %
(direction, on_type, by_type), None)
_type_casters = {
'int64_t': _ensure_int64,
'double': _ensure_float64,
'object': _ensure_object,
}
_cython_types = {
'uint8': 'uint8_t',
'uint32': 'uint32_t',
'uint16': 'uint16_t',
'uint64': 'uint64_t',
'int8': 'int8_t',
'int32': 'int32_t',
'int16': 'int16_t',
'int64': 'int64_t',
'float16': 'error',
'float32': 'float',
'float64': 'double',
}
def _get_cython_type(dtype):
""" Given a dtype, return a C name like 'int64_t' or 'double' """
type_name = _get_dtype(dtype).name
ctype = _cython_types.get(type_name, 'object')
if ctype == 'error':
raise MergeError('unsupported type: ' + type_name)
return ctype
def _get_cython_type_upcast(dtype):
""" Upcast a dtype to 'int64_t', 'double', or 'object' """
if is_integer_dtype(dtype):
return 'int64_t'
elif is_float_dtype(dtype):
return 'double'
else:
return 'object'
class _AsOfMerge(_OrderedMerge):
_merge_type = 'asof_merge'
def __init__(self, left, right, on=None, left_on=None, right_on=None,
left_index=False, right_index=False,
by=None, left_by=None, right_by=None,
axis=1, suffixes=('_x', '_y'), copy=True,
fill_method=None,
how='asof', tolerance=None,
allow_exact_matches=True,
direction='backward'):
self.by = by
self.left_by = left_by
self.right_by = right_by
self.tolerance = tolerance
self.allow_exact_matches = allow_exact_matches
self.direction = direction
_OrderedMerge.__init__(self, left, right, on=on, left_on=left_on,
right_on=right_on, left_index=left_index,
right_index=right_index, axis=axis,
how=how, suffixes=suffixes,
fill_method=fill_method)
def _validate_specification(self):
super(_AsOfMerge, self)._validate_specification()
# we only allow on to be a single item for on
if len(self.left_on) != 1 and not self.left_index:
raise MergeError("can only asof on a key for left")
if len(self.right_on) != 1 and not self.right_index:
raise MergeError("can only asof on a key for right")
if self.left_index and isinstance(self.left.index, MultiIndex):
raise MergeError("left can only have one index")
if self.right_index and isinstance(self.right.index, MultiIndex):
raise MergeError("right can only have one index")
# set 'by' columns
if self.by is not None:
if self.left_by is not None or self.right_by is not None:
raise MergeError('Can only pass by OR left_by '
'and right_by')
self.left_by = self.right_by = self.by
if self.left_by is None and self.right_by is not None:
raise MergeError('missing left_by')
if self.left_by is not None and self.right_by is None:
raise MergeError('missing right_by')
# add 'by' to our key-list so we can have it in the
# output as a key
if self.left_by is not None:
if not is_list_like(self.left_by):
self.left_by = [self.left_by]
if not is_list_like(self.right_by):
self.right_by = [self.right_by]
if len(self.left_by) != len(self.right_by):
raise MergeError('left_by and right_by must be same length')
self.left_on = self.left_by + list(self.left_on)
self.right_on = self.right_by + list(self.right_on)
# check 'direction' is valid
if self.direction not in ['backward', 'forward', 'nearest']:
raise MergeError('direction invalid: ' + self.direction)
@property
def _asof_key(self):
""" This is our asof key, the 'on' """
return self.left_on[-1]
def _get_merge_keys(self):
# note this function has side effects
(left_join_keys,
right_join_keys,
join_names) = super(_AsOfMerge, self)._get_merge_keys()
# validate index types are the same
for lk, rk in zip(left_join_keys, right_join_keys):
if not is_dtype_equal(lk.dtype, rk.dtype):
raise MergeError("incompatible merge keys, "
"must be the same type")
# validate tolerance; must be a Timedelta if we have a DTI
if self.tolerance is not None:
if self.left_index:
lt = self.left.index
else:
lt = left_join_keys[-1]
msg = "incompatible tolerance, must be compat " \
"with type {0}".format(type(lt))
if is_datetime64_dtype(lt) or is_datetime64tz_dtype(lt):
if not isinstance(self.tolerance, Timedelta):
raise MergeError(msg)
if self.tolerance < Timedelta(0):
raise MergeError("tolerance must be positive")
elif is_int64_dtype(lt):
if not is_integer(self.tolerance):
raise MergeError(msg)
if self.tolerance < 0:
raise MergeError("tolerance must be positive")
else:
raise MergeError("key must be integer or timestamp")
# validate allow_exact_matches
if not is_bool(self.allow_exact_matches):
raise MergeError("allow_exact_matches must be boolean, "
"passed {0}".format(self.allow_exact_matches))
return left_join_keys, right_join_keys, join_names
def _get_join_indexers(self):
""" return the join indexers """
def flip(xs):
""" unlike np.transpose, this returns an array of tuples """
labels = list(string.ascii_lowercase[:len(xs)])
dtypes = [x.dtype for x in xs]
labeled_dtypes = list(zip(labels, dtypes))
return np.array(lzip(*xs), labeled_dtypes)
# values to compare
left_values = (self.left.index.values if self.left_index else
self.left_join_keys[-1])
right_values = (self.right.index.values if self.right_index else
self.right_join_keys[-1])
tolerance = self.tolerance
# we required sortedness in the join keys
msg = " keys must be sorted"
if not Index(left_values).is_monotonic:
raise ValueError('left' + msg)
if not Index(right_values).is_monotonic:
raise ValueError('right' + msg)
# initial type conversion as needed
if needs_i8_conversion(left_values):
left_values = left_values.view('i8')
right_values = right_values.view('i8')
if tolerance is not None:
tolerance = tolerance.value
# a "by" parameter requires special handling
if self.left_by is not None:
# remove 'on' parameter from values if one existed
if self.left_index and self.right_index:
left_by_values = self.left_join_keys
right_by_values = self.right_join_keys
else:
left_by_values = self.left_join_keys[0:-1]
right_by_values = self.right_join_keys[0:-1]
# get tuple representation of values if more than one
if len(left_by_values) == 1:
left_by_values = left_by_values[0]
right_by_values = right_by_values[0]
else:
left_by_values = flip(left_by_values)
right_by_values = flip(right_by_values)
# upcast 'by' parameter because HashTable is limited
by_type = _get_cython_type_upcast(left_by_values.dtype)
by_type_caster = _type_casters[by_type]
left_by_values = by_type_caster(left_by_values)
right_by_values = by_type_caster(right_by_values)
# choose appropriate function by type
on_type = _get_cython_type(left_values.dtype)
func = _asof_by_function(self.direction, on_type, by_type)
return func(left_values,
right_values,
left_by_values,
right_by_values,
self.allow_exact_matches,
tolerance)
else:
# choose appropriate function by type
on_type = _get_cython_type(left_values.dtype)
func = _asof_function(self.direction, on_type)
return func(left_values,
right_values,
self.allow_exact_matches,
tolerance)
def _get_multiindex_indexer(join_keys, index, sort):
from functools import partial
# bind `sort` argument
fkeys = partial(_factorize_keys, sort=sort)
# left & right join labels and num. of levels at each location
rlab, llab, shape = map(list, zip(* map(fkeys, index.levels, join_keys)))
if sort:
rlab = list(map(np.take, rlab, index.labels))
else:
i8copy = lambda a: a.astype('i8', subok=False, copy=True)
rlab = list(map(i8copy, index.labels))
# fix right labels if there were any nulls
for i in range(len(join_keys)):
mask = index.labels[i] == -1
if mask.any():
# check if there already was any nulls at this location
# if there was, it is factorized to `shape[i] - 1`
a = join_keys[i][llab[i] == shape[i] - 1]
if a.size == 0 or not a[0] != a[0]:
shape[i] += 1
rlab[i][mask] = shape[i] - 1
# get flat i8 join keys
lkey, rkey = _get_join_keys(llab, rlab, shape, sort)
# factorize keys to a dense i8 space
lkey, rkey, count = fkeys(lkey, rkey)
return libjoin.left_outer_join(lkey, rkey, count, sort=sort)
def _get_single_indexer(join_key, index, sort=False):
left_key, right_key, count = _factorize_keys(join_key, index, sort=sort)
left_indexer, right_indexer = libjoin.left_outer_join(
_ensure_int64(left_key),
_ensure_int64(right_key),
count, sort=sort)
return left_indexer, right_indexer
def _left_join_on_index(left_ax, right_ax, join_keys, sort=False):
if len(join_keys) > 1:
if not ((isinstance(right_ax, MultiIndex) and
len(join_keys) == right_ax.nlevels)):
raise AssertionError("If more than one join key is given then "
"'right_ax' must be a MultiIndex and the "
"number of join keys must be the number of "
"levels in right_ax")
left_indexer, right_indexer = \
_get_multiindex_indexer(join_keys, right_ax, sort=sort)
else:
jkey = join_keys[0]
left_indexer, right_indexer = \
_get_single_indexer(jkey, right_ax, sort=sort)
if sort or len(left_ax) != len(left_indexer):
# if asked to sort or there are 1-to-many matches
join_index = left_ax.take(left_indexer)
return join_index, left_indexer, right_indexer
# left frame preserves order & length of its index
return left_ax, None, right_indexer
def _right_outer_join(x, y, max_groups):
right_indexer, left_indexer = libjoin.left_outer_join(y, x, max_groups)
return left_indexer, right_indexer
_join_functions = {
'inner': libjoin.inner_join,
'left': libjoin.left_outer_join,
'right': _right_outer_join,
'outer': libjoin.full_outer_join,
}
def _factorize_keys(lk, rk, sort=True):
if is_datetime64tz_dtype(lk) and is_datetime64tz_dtype(rk):
lk = lk.values
rk = rk.values
# if we exactly match in categories, allow us to use codes
if (is_categorical_dtype(lk) and
is_categorical_dtype(rk) and
lk.is_dtype_equal(rk)):
return lk.codes, rk.codes, len(lk.categories)
if is_int_or_datetime_dtype(lk) and is_int_or_datetime_dtype(rk):
klass = libhashtable.Int64Factorizer
lk = _ensure_int64(com._values_from_object(lk))
rk = _ensure_int64(com._values_from_object(rk))
else:
klass = libhashtable.Factorizer
lk = _ensure_object(lk)
rk = _ensure_object(rk)
rizer = klass(max(len(lk), len(rk)))
llab = rizer.factorize(lk)
rlab = rizer.factorize(rk)
count = rizer.get_count()
if sort:
uniques = rizer.uniques.to_array()
llab, rlab = _sort_labels(uniques, llab, rlab)
# NA group
lmask = llab == -1
lany = lmask.any()
rmask = rlab == -1
rany = rmask.any()
if lany or rany:
if lany:
np.putmask(llab, lmask, count)
if rany:
np.putmask(rlab, rmask, count)
count += 1
return llab, rlab, count
def _sort_labels(uniques, left, right):
if not isinstance(uniques, np.ndarray):
# tuplesafe
uniques = Index(uniques).values
l = len(left)
labels = np.concatenate([left, right])
_, new_labels = algos.safe_sort(uniques, labels, na_sentinel=-1)
new_labels = _ensure_int64(new_labels)
new_left, new_right = new_labels[:l], new_labels[l:]
return new_left, new_right
def _get_join_keys(llab, rlab, shape, sort):
# how many levels can be done without overflow
pred = lambda i: not is_int64_overflow_possible(shape[:i])
nlev = next(filter(pred, range(len(shape), 0, -1)))
# get keys for the first `nlev` levels
stride = np.prod(shape[1:nlev], dtype='i8')
lkey = stride * llab[0].astype('i8', subok=False, copy=False)
rkey = stride * rlab[0].astype('i8', subok=False, copy=False)
for i in range(1, nlev):
stride //= shape[i]
lkey += llab[i] * stride
rkey += rlab[i] * stride
if nlev == len(shape): # all done!
return lkey, rkey
# densify current keys to avoid overflow
lkey, rkey, count = _factorize_keys(lkey, rkey, sort=sort)
llab = [lkey] + llab[nlev:]
rlab = [rkey] + rlab[nlev:]
shape = [count] + shape[nlev:]
return _get_join_keys(llab, rlab, shape, sort)
def _should_fill(lname, rname):
if (not isinstance(lname, compat.string_types) or
not isinstance(rname, compat.string_types)):
return True
return lname == rname
def _any(x):
return x is not None and len(x) > 0 and any([y is not None for y in x])
| bsd-3-clause |
Winand/pandas | asv_bench/benchmarks/stat_ops.py | 7 | 6106 | from .pandas_vb_common import *
def _set_use_bottleneck_False():
try:
pd.options.compute.use_bottleneck = False
except:
from pandas.core import nanops
nanops._USE_BOTTLENECK = False
class FrameOps(object):
goal_time = 0.2
param_names = ['op', 'use_bottleneck', 'dtype', 'axis']
params = [['mean', 'sum', 'median'],
[True, False],
['float', 'int'],
[0, 1]]
def setup(self, op, use_bottleneck, dtype, axis):
if dtype == 'float':
self.df = DataFrame(np.random.randn(100000, 4))
elif dtype == 'int':
self.df = DataFrame(np.random.randint(1000, size=(100000, 4)))
if not use_bottleneck:
_set_use_bottleneck_False()
self.func = getattr(self.df, op)
def time_op(self, op, use_bottleneck, dtype, axis):
self.func(axis=axis)
class stat_ops_level_frame_sum(object):
goal_time = 0.2
def setup(self):
self.index = MultiIndex(levels=[np.arange(10), np.arange(100), np.arange(100)], labels=[np.arange(10).repeat(10000), np.tile(np.arange(100).repeat(100), 10), np.tile(np.tile(np.arange(100), 100), 10)])
random.shuffle(self.index.values)
self.df = DataFrame(np.random.randn(len(self.index), 4), index=self.index)
self.df_level = DataFrame(np.random.randn(100, 4), index=self.index.levels[1])
def time_stat_ops_level_frame_sum(self):
self.df.sum(level=1)
class stat_ops_level_frame_sum_multiple(object):
goal_time = 0.2
def setup(self):
self.index = MultiIndex(levels=[np.arange(10), np.arange(100), np.arange(100)], labels=[np.arange(10).repeat(10000), np.tile(np.arange(100).repeat(100), 10), np.tile(np.tile(np.arange(100), 100), 10)])
random.shuffle(self.index.values)
self.df = DataFrame(np.random.randn(len(self.index), 4), index=self.index)
self.df_level = DataFrame(np.random.randn(100, 4), index=self.index.levels[1])
def time_stat_ops_level_frame_sum_multiple(self):
self.df.sum(level=[0, 1])
class stat_ops_level_series_sum(object):
goal_time = 0.2
def setup(self):
self.index = MultiIndex(levels=[np.arange(10), np.arange(100), np.arange(100)], labels=[np.arange(10).repeat(10000), np.tile(np.arange(100).repeat(100), 10), np.tile(np.tile(np.arange(100), 100), 10)])
random.shuffle(self.index.values)
self.df = DataFrame(np.random.randn(len(self.index), 4), index=self.index)
self.df_level = DataFrame(np.random.randn(100, 4), index=self.index.levels[1])
def time_stat_ops_level_series_sum(self):
self.df[1].sum(level=1)
class stat_ops_level_series_sum_multiple(object):
goal_time = 0.2
def setup(self):
self.index = MultiIndex(levels=[np.arange(10), np.arange(100), np.arange(100)], labels=[np.arange(10).repeat(10000), np.tile(np.arange(100).repeat(100), 10), np.tile(np.tile(np.arange(100), 100), 10)])
random.shuffle(self.index.values)
self.df = DataFrame(np.random.randn(len(self.index), 4), index=self.index)
self.df_level = DataFrame(np.random.randn(100, 4), index=self.index.levels[1])
def time_stat_ops_level_series_sum_multiple(self):
self.df[1].sum(level=[0, 1])
class stat_ops_series_std(object):
goal_time = 0.2
def setup(self):
self.s = Series(np.random.randn(100000), index=np.arange(100000))
self.s[::2] = np.nan
def time_stat_ops_series_std(self):
self.s.std()
class stats_corr_spearman(object):
goal_time = 0.2
def setup(self):
self.df = DataFrame(np.random.randn(1000, 30))
def time_stats_corr_spearman(self):
self.df.corr(method='spearman')
class stats_rank2d_axis0_average(object):
goal_time = 0.2
def setup(self):
self.df = DataFrame(np.random.randn(5000, 50))
def time_stats_rank2d_axis0_average(self):
self.df.rank()
class stats_rank2d_axis1_average(object):
goal_time = 0.2
def setup(self):
self.df = DataFrame(np.random.randn(5000, 50))
def time_stats_rank2d_axis1_average(self):
self.df.rank(1)
class stats_rank_average(object):
goal_time = 0.2
def setup(self):
self.values = np.concatenate([np.arange(100000), np.random.randn(100000), np.arange(100000)])
self.s = Series(self.values)
def time_stats_rank_average(self):
self.s.rank()
class stats_rank_average_int(object):
goal_time = 0.2
def setup(self):
self.values = np.random.randint(0, 100000, size=200000)
self.s = Series(self.values)
def time_stats_rank_average_int(self):
self.s.rank()
class stats_rank_pct_average(object):
goal_time = 0.2
def setup(self):
self.values = np.concatenate([np.arange(100000), np.random.randn(100000), np.arange(100000)])
self.s = Series(self.values)
def time_stats_rank_pct_average(self):
self.s.rank(pct=True)
class stats_rank_pct_average_old(object):
goal_time = 0.2
def setup(self):
self.values = np.concatenate([np.arange(100000), np.random.randn(100000), np.arange(100000)])
self.s = Series(self.values)
def time_stats_rank_pct_average_old(self):
(self.s.rank() / len(self.s))
class stats_rolling_mean(object):
goal_time = 0.2
def setup(self):
self.arr = np.random.randn(100000)
self.win = 100
def time_rolling_mean(self):
rolling_mean(self.arr, self.win)
def time_rolling_median(self):
rolling_median(self.arr, self.win)
def time_rolling_min(self):
rolling_min(self.arr, self.win)
def time_rolling_max(self):
rolling_max(self.arr, self.win)
def time_rolling_sum(self):
rolling_sum(self.arr, self.win)
def time_rolling_std(self):
rolling_std(self.arr, self.win)
def time_rolling_var(self):
rolling_var(self.arr, self.win)
def time_rolling_skew(self):
rolling_skew(self.arr, self.win)
def time_rolling_kurt(self):
rolling_kurt(self.arr, self.win)
| bsd-3-clause |
mmottahedi/neuralnilm_prototype | scripts/e198.py | 2 | 6731 | from __future__ import print_function, division
import matplotlib
matplotlib.use('Agg') # Must be before importing matplotlib.pyplot or pylab!
from neuralnilm import Net, RealApplianceSource, BLSTMLayer, DimshuffleLayer
from lasagne.nonlinearities import sigmoid, rectify
from lasagne.objectives import crossentropy, mse
from lasagne.init import Uniform, Normal
from lasagne.layers import LSTMLayer, DenseLayer, Conv1DLayer, ReshapeLayer, FeaturePoolLayer
from neuralnilm.updates import nesterov_momentum
from functools import partial
import os
from neuralnilm.source import standardise, discretize, fdiff, power_and_fdiff
from neuralnilm.experiment import run_experiment
from neuralnilm.net import TrainingError
import __main__
NAME = os.path.splitext(os.path.split(__main__.__file__)[1])[0]
PATH = "/homes/dk3810/workspace/python/neuralnilm/figures"
SAVE_PLOT_INTERVAL = 250
GRADIENT_STEPS = 100
"""
e103
Discovered that bottom layer is hardly changing. So will try
just a single lstm layer
e104
standard init
lower learning rate
e106
lower learning rate to 0.001
e108
is e107 but with batch size of 5
e109
Normal(1) for BLSTM
e110
* Back to Uniform(5) for BLSTM
* Using nntools eb17bd923ef9ff2cacde2e92d7323b4e51bb5f1f
RESULTS: Seems to run fine again!
e111
* Try with nntools head
* peepholes=False
RESULTS: appears to be working well. Haven't seen a NaN,
even with training rate of 0.1
e112
* n_seq_per_batch = 50
e114
* Trying looking at layer by layer training again.
* Start with single BLSTM layer
e115
* Learning rate = 1
e116
* Standard inits
e117
* Uniform(1) init
e119
* Learning rate 10
# Result: didn't work well!
e120
* init: Normal(1)
* not as good as Uniform(5)
e121
* Uniform(25)
e122
* Just 10 cells
* Uniform(5)
e125
* Pre-train lower layers
e128
* Add back all 5 appliances
* Seq length 1500
* skip_prob = 0.7
e129
* max_input_power = None
* 2nd layer has Uniform(5)
* pre-train bottom layer for 2000 epochs
* add third layer at 4000 epochs
e131
e138
* Trying to replicate e82 and then break it ;)
e140
diff
e141
conv1D layer has Uniform(1), as does 2nd BLSTM layer
e142
diff AND power
e144
diff and power and max power is 5900
e145
Uniform(25) for first layer
e146
gradient clip and use peepholes
e147
* try again with new code
e148
* learning rate 0.1
e150
* Same as e149 but without peepholes and using BLSTM not BBLSTM
e151
* Max pooling
171
lower learning rate
172
even lower learning rate
173
slightly higher learning rate!
175
same as 174 but with skip prob = 0, and LSTM not BLSTM, and only 4000 epochs
176
new cost function
177
another new cost func (this one avoids NaNs)
skip prob 0.7
10x higher learning rate
178
refactored cost func (functionally equiv to 177)
0.1x learning rate
e180
* mse
e181
* back to scaled cost
* different architecture:
- convd1 at input (2x)
- then 3 LSTM layers, each with a 2x conv in between
- no diff input
e189
* divide dominant appliance power
* mse
"""
# def scaled_cost(x, t):
# raw_cost = (x - t) ** 2
# energy_per_seq = t.sum(axis=1)
# energy_per_batch = energy_per_seq.sum(axis=1)
# energy_per_batch = energy_per_batch.reshape((-1, 1))
# normaliser = energy_per_seq / energy_per_batch
# cost = raw_cost.mean(axis=1) * (1 - normaliser)
# return cost.mean()
from theano.ifelse import ifelse
import theano.tensor as T
THRESHOLD = 0
def scaled_cost(x, t):
sq_error = (x - t) ** 2
def mask_and_mean_sq_error(mask):
masked_sq_error = sq_error[mask.nonzero()]
mean = masked_sq_error.mean()
mean = ifelse(T.isnan(mean), 0.0, mean)
return mean
above_thresh_mean = mask_and_mean_sq_error(t > THRESHOLD)
below_thresh_mean = mask_and_mean_sq_error(t <= THRESHOLD)
return (above_thresh_mean + below_thresh_mean) / 2.0
def exp_a(name):
# global source
# source = RealApplianceSource(
# filename='/data/dk3810/ukdale.h5',
# appliances=[
# ['fridge freezer', 'fridge', 'freezer'],
# 'hair straighteners',
# 'television'
# # 'dish washer',
# # ['washer dryer', 'washing machine']
# ],
# max_appliance_powers=[2500] * 5,
# on_power_thresholds=[5] * 5,
# max_input_power=2500,
# min_on_durations=[60, 60, 60, 1800, 1800],
# min_off_durations=[12, 12, 12, 1800, 600],
# window=("2013-06-01", "2014-07-01"),
# seq_length=1520,
# output_one_appliance=False,
# boolean_targets=False,
# train_buildings=[1],
# validation_buildings=[1],
# skip_probability=0.7,
# n_seq_per_batch=25,
# input_padding=1,
# include_diff=False,
# clip_appliance_power=False
# )
net = Net(
experiment_name=name,
source=source,
save_plot_interval=1000,
loss_function=mse,
updates=partial(nesterov_momentum, learning_rate=0.1, clip_range=(-1, 1)),
layers_config=[
{
'type': DimshuffleLayer,
'pattern': (0, 2, 1) # (batch, features, time)
},
{
'type': Conv1DLayer, # convolve over the time axis
'num_filters': 10,
'filter_length': 2,
'stride': 1,
'nonlinearity': sigmoid,
'W': Uniform(5)
},
{
'type': DimshuffleLayer,
'pattern': (0, 2, 1) # back to (batch, time, features)
},
{
'type': DenseLayer,
'num_units': 50,
'nonlinearity': rectify
},
{
'type': DenseLayer,
'num_units': source.n_outputs,
'nonlinearity': None
# 'W': Uniform()
}
]
)
return net
def init_experiment(experiment):
full_exp_name = NAME + experiment
func_call = 'exp_{:s}(full_exp_name)'.format(experiment)
print("***********************************")
print("Preparing", full_exp_name, "...")
net = eval(func_call)
return net
def main():
for experiment in list('a'):
full_exp_name = NAME + experiment
path = os.path.join(PATH, full_exp_name)
try:
net = init_experiment(experiment)
run_experiment(net, path, epochs=None)
except KeyboardInterrupt:
break
except TrainingError as exception:
print("EXCEPTION:", exception)
except Exception as exception:
raise
print("EXCEPTION:", exception)
import ipdb; ipdb.set_trace()
if __name__ == "__main__":
main()
| mit |
ishanic/scikit-learn | examples/applications/plot_species_distribution_modeling.py | 254 | 7434 | """
=============================
Species distribution modeling
=============================
Modeling species' geographic distributions is an important
problem in conservation biology. In this example we
model the geographic distribution of two south american
mammals given past observations and 14 environmental
variables. Since we have only positive examples (there are
no unsuccessful observations), we cast this problem as a
density estimation problem and use the `OneClassSVM` provided
by the package `sklearn.svm` as our modeling tool.
The dataset is provided by Phillips et. al. (2006).
If available, the example uses
`basemap <http://matplotlib.sourceforge.net/basemap/doc/html/>`_
to plot the coast lines and national boundaries of South America.
The two species are:
- `"Bradypus variegatus"
<http://www.iucnredlist.org/apps/redlist/details/3038/0>`_ ,
the Brown-throated Sloth.
- `"Microryzomys minutus"
<http://www.iucnredlist.org/apps/redlist/details/13408/0>`_ ,
also known as the Forest Small Rice Rat, a rodent that lives in Peru,
Colombia, Ecuador, Peru, and Venezuela.
References
----------
* `"Maximum entropy modeling of species geographic distributions"
<http://www.cs.princeton.edu/~schapire/papers/ecolmod.pdf>`_
S. J. Phillips, R. P. Anderson, R. E. Schapire - Ecological Modelling,
190:231-259, 2006.
"""
# Authors: Peter Prettenhofer <[email protected]>
# Jake Vanderplas <[email protected]>
#
# License: BSD 3 clause
from __future__ import print_function
from time import time
import numpy as np
import matplotlib.pyplot as plt
from sklearn.datasets.base import Bunch
from sklearn.datasets import fetch_species_distributions
from sklearn.datasets.species_distributions import construct_grids
from sklearn import svm, metrics
# if basemap is available, we'll use it.
# otherwise, we'll improvise later...
try:
from mpl_toolkits.basemap import Basemap
basemap = True
except ImportError:
basemap = False
print(__doc__)
def create_species_bunch(species_name, train, test, coverages, xgrid, ygrid):
"""Create a bunch with information about a particular organism
This will use the test/train record arrays to extract the
data specific to the given species name.
"""
bunch = Bunch(name=' '.join(species_name.split("_")[:2]))
species_name = species_name.encode('ascii')
points = dict(test=test, train=train)
for label, pts in points.items():
# choose points associated with the desired species
pts = pts[pts['species'] == species_name]
bunch['pts_%s' % label] = pts
# determine coverage values for each of the training & testing points
ix = np.searchsorted(xgrid, pts['dd long'])
iy = np.searchsorted(ygrid, pts['dd lat'])
bunch['cov_%s' % label] = coverages[:, -iy, ix].T
return bunch
def plot_species_distribution(species=("bradypus_variegatus_0",
"microryzomys_minutus_0")):
"""
Plot the species distribution.
"""
if len(species) > 2:
print("Note: when more than two species are provided,"
" only the first two will be used")
t0 = time()
# Load the compressed data
data = fetch_species_distributions()
# Set up the data grid
xgrid, ygrid = construct_grids(data)
# The grid in x,y coordinates
X, Y = np.meshgrid(xgrid, ygrid[::-1])
# create a bunch for each species
BV_bunch = create_species_bunch(species[0],
data.train, data.test,
data.coverages, xgrid, ygrid)
MM_bunch = create_species_bunch(species[1],
data.train, data.test,
data.coverages, xgrid, ygrid)
# background points (grid coordinates) for evaluation
np.random.seed(13)
background_points = np.c_[np.random.randint(low=0, high=data.Ny,
size=10000),
np.random.randint(low=0, high=data.Nx,
size=10000)].T
# We'll make use of the fact that coverages[6] has measurements at all
# land points. This will help us decide between land and water.
land_reference = data.coverages[6]
# Fit, predict, and plot for each species.
for i, species in enumerate([BV_bunch, MM_bunch]):
print("_" * 80)
print("Modeling distribution of species '%s'" % species.name)
# Standardize features
mean = species.cov_train.mean(axis=0)
std = species.cov_train.std(axis=0)
train_cover_std = (species.cov_train - mean) / std
# Fit OneClassSVM
print(" - fit OneClassSVM ... ", end='')
clf = svm.OneClassSVM(nu=0.1, kernel="rbf", gamma=0.5)
clf.fit(train_cover_std)
print("done.")
# Plot map of South America
plt.subplot(1, 2, i + 1)
if basemap:
print(" - plot coastlines using basemap")
m = Basemap(projection='cyl', llcrnrlat=Y.min(),
urcrnrlat=Y.max(), llcrnrlon=X.min(),
urcrnrlon=X.max(), resolution='c')
m.drawcoastlines()
m.drawcountries()
else:
print(" - plot coastlines from coverage")
plt.contour(X, Y, land_reference,
levels=[-9999], colors="k",
linestyles="solid")
plt.xticks([])
plt.yticks([])
print(" - predict species distribution")
# Predict species distribution using the training data
Z = np.ones((data.Ny, data.Nx), dtype=np.float64)
# We'll predict only for the land points.
idx = np.where(land_reference > -9999)
coverages_land = data.coverages[:, idx[0], idx[1]].T
pred = clf.decision_function((coverages_land - mean) / std)[:, 0]
Z *= pred.min()
Z[idx[0], idx[1]] = pred
levels = np.linspace(Z.min(), Z.max(), 25)
Z[land_reference == -9999] = -9999
# plot contours of the prediction
plt.contourf(X, Y, Z, levels=levels, cmap=plt.cm.Reds)
plt.colorbar(format='%.2f')
# scatter training/testing points
plt.scatter(species.pts_train['dd long'], species.pts_train['dd lat'],
s=2 ** 2, c='black',
marker='^', label='train')
plt.scatter(species.pts_test['dd long'], species.pts_test['dd lat'],
s=2 ** 2, c='black',
marker='x', label='test')
plt.legend()
plt.title(species.name)
plt.axis('equal')
# Compute AUC with regards to background points
pred_background = Z[background_points[0], background_points[1]]
pred_test = clf.decision_function((species.cov_test - mean)
/ std)[:, 0]
scores = np.r_[pred_test, pred_background]
y = np.r_[np.ones(pred_test.shape), np.zeros(pred_background.shape)]
fpr, tpr, thresholds = metrics.roc_curve(y, scores)
roc_auc = metrics.auc(fpr, tpr)
plt.text(-35, -70, "AUC: %.3f" % roc_auc, ha="right")
print("\n Area under the ROC curve : %f" % roc_auc)
print("\ntime elapsed: %.2fs" % (time() - t0))
plot_species_distribution()
plt.show()
| bsd-3-clause |
cogmission/nupic.research | projects/sequence_prediction/reberGrammar/reberSequence_CompareTMvsLSTM.py | 13 | 2320 | # ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2015, Numenta, Inc. Unless you have an agreement
# with Numenta, Inc., for a separate license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU Affero Public License for more details.
#
# You should have received a copy of the GNU Affero Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
import numpy as np
import matplotlib.pyplot as plt
from matplotlib import rcParams
plt.ion()
rcParams.update({'figure.autolayout': True})
def plotResult():
resultTM = np.load('result/reberSequenceTM.npz')
resultLSTM = np.load('result/reberSequenceLSTM.npz')
plt.figure()
plt.hold(True)
plt.subplot(2,2,1)
plt.semilogx(resultTM['trainSeqN'], 100*np.mean(resultTM['correctRateAll'],1),'-*',label='TM')
plt.semilogx(resultLSTM['trainSeqN'], 100*np.mean(resultLSTM['correctRateAll'],1),'-s',label='LSTM')
plt.legend()
plt.xlabel(' Training Sequence Number')
plt.ylabel(' Hit Rate (Best Match) (%)')
plt.subplot(2,2,4)
plt.semilogx(resultTM['trainSeqN'], 100*np.mean(resultTM['missRateAll'],1),'-*',label='TM')
plt.semilogx(resultLSTM['trainSeqN'], 100*np.mean(resultLSTM['missRateAll'],1),'-*',label='LSTM')
plt.legend()
plt.xlabel(' Training Sequence Number')
plt.ylabel(' Miss Rate (%)')
plt.subplot(2,2,3)
plt.semilogx(resultTM['trainSeqN'], 100*np.mean(resultTM['fpRateAll'],1),'-*',label='TM')
plt.semilogx(resultLSTM['trainSeqN'], 100*np.mean(resultLSTM['fpRateAll'],1),'-*',label='LSTM')
plt.legend()
plt.xlabel(' Training Sequence Number')
plt.ylabel(' False Positive Rate (%)')
plt.savefig('result/ReberSequence_CompareTM&LSTMperformance.pdf')
if __name__ == "__main__":
plotResult()
| agpl-3.0 |
crichardson17/starburst_atlas | Low_resolution_sims/DustFree_LowRes/Geneva_Rot_inst/Geneva_Rot_inst_age6/UV1.py | 33 | 7340 | import csv
import matplotlib.pyplot as plt
from numpy import *
import scipy.interpolate
import math
from pylab import *
from matplotlib.ticker import MultipleLocator, FormatStrFormatter
import matplotlib.patches as patches
from matplotlib.path import Path
import os
# ------------------------------------------------------------------------------------------------------
#inputs
for file in os.listdir('.'):
if file.endswith(".grd"):
inputfile = file
for file in os.listdir('.'):
if file.endswith(".txt"):
inputfile2 = file
# ------------------------------------------------------------------------------------------------------
#Patches data
#for the Kewley and Levesque data
verts = [
(1., 7.97712125471966000000), # left, bottom
(1., 9.57712125471966000000), # left, top
(2., 10.57712125471970000000), # right, top
(2., 8.97712125471966000000), # right, bottom
(0., 0.), # ignored
]
codes = [Path.MOVETO,
Path.LINETO,
Path.LINETO,
Path.LINETO,
Path.CLOSEPOLY,
]
path = Path(verts, codes)
# ------------------------
#for the Kewley 01 data
verts2 = [
(2.4, 9.243038049), # left, bottom
(2.4, 11.0211893), # left, top
(2.6, 11.0211893), # right, top
(2.6, 9.243038049), # right, bottom
(0, 0.), # ignored
]
path = Path(verts, codes)
path2 = Path(verts2, codes)
# -------------------------
#for the Moy et al data
verts3 = [
(1., 6.86712125471966000000), # left, bottom
(1., 10.18712125471970000000), # left, top
(3., 12.18712125471970000000), # right, top
(3., 8.86712125471966000000), # right, bottom
(0., 0.), # ignored
]
path = Path(verts, codes)
path3 = Path(verts3, codes)
# ------------------------------------------------------------------------------------------------------
#the routine to add patches for others peoples' data onto our plots.
def add_patches(ax):
patch3 = patches.PathPatch(path3, facecolor='yellow', lw=0)
patch2 = patches.PathPatch(path2, facecolor='green', lw=0)
patch = patches.PathPatch(path, facecolor='red', lw=0)
ax1.add_patch(patch3)
ax1.add_patch(patch2)
ax1.add_patch(patch)
# ------------------------------------------------------------------------------------------------------
#the subplot routine
def add_sub_plot(sub_num):
numplots = 16
plt.subplot(numplots/4.,4,sub_num)
rbf = scipy.interpolate.Rbf(x, y, z[:,sub_num-1], function='linear')
zi = rbf(xi, yi)
contour = plt.contour(xi,yi,zi, levels, colors='c', linestyles = 'dashed')
contour2 = plt.contour(xi,yi,zi, levels2, colors='k', linewidths=1.5)
plt.scatter(max_values[line[sub_num-1],2], max_values[line[sub_num-1],3], c ='k',marker = '*')
plt.annotate(headers[line[sub_num-1]], xy=(8,11), xytext=(6,8.5), fontsize = 10)
plt.annotate(max_values[line[sub_num-1],0], xy= (max_values[line[sub_num-1],2], max_values[line[sub_num-1],3]), xytext = (0, -10), textcoords = 'offset points', ha = 'right', va = 'bottom', fontsize=10)
if sub_num == numplots / 2.:
print "half the plots are complete"
#axis limits
yt_min = 8
yt_max = 23
xt_min = 0
xt_max = 12
plt.ylim(yt_min,yt_max)
plt.xlim(xt_min,xt_max)
plt.yticks(arange(yt_min+1,yt_max,1),fontsize=10)
plt.xticks(arange(xt_min+1,xt_max,1), fontsize = 10)
if sub_num in [2,3,4,6,7,8,10,11,12,14,15,16]:
plt.tick_params(labelleft = 'off')
else:
plt.tick_params(labelleft = 'on')
plt.ylabel('Log ($ \phi _{\mathrm{H}} $)')
if sub_num in [1,2,3,4,5,6,7,8,9,10,11,12]:
plt.tick_params(labelbottom = 'off')
else:
plt.tick_params(labelbottom = 'on')
plt.xlabel('Log($n _{\mathrm{H}} $)')
if sub_num == 1:
plt.yticks(arange(yt_min+1,yt_max+1,1),fontsize=10)
if sub_num == 13:
plt.yticks(arange(yt_min,yt_max,1),fontsize=10)
plt.xticks(arange(xt_min,xt_max,1), fontsize = 10)
if sub_num == 16 :
plt.xticks(arange(xt_min+1,xt_max+1,1), fontsize = 10)
# ---------------------------------------------------
#this is where the grid information (phi and hdens) is read in and saved to grid.
grid = [];
with open(inputfile, 'rb') as f:
csvReader = csv.reader(f,delimiter='\t')
for row in csvReader:
grid.append(row);
grid = asarray(grid)
#here is where the data for each line is read in and saved to dataEmissionlines
dataEmissionlines = [];
with open(inputfile2, 'rb') as f:
csvReader = csv.reader(f,delimiter='\t')
headers = csvReader.next()
for row in csvReader:
dataEmissionlines.append(row);
dataEmissionlines = asarray(dataEmissionlines)
print "import files complete"
# ---------------------------------------------------
#for grid
phi_values = grid[1:len(dataEmissionlines)+1,6]
hdens_values = grid[1:len(dataEmissionlines)+1,7]
#for lines
headers = headers[1:]
Emissionlines = dataEmissionlines[:, 1:]
concatenated_data = zeros((len(Emissionlines),len(Emissionlines[0])))
max_values = zeros((len(Emissionlines[0]),4))
#select the scaling factor
#for 1215
#incident = Emissionlines[1:,4]
#for 4860
incident = Emissionlines[:,57]
#take the ratio of incident and all the lines and put it all in an array concatenated_data
for i in range(len(Emissionlines)):
for j in range(len(Emissionlines[0])):
if math.log(4860.*(float(Emissionlines[i,j])/float(Emissionlines[i,57])), 10) > 0:
concatenated_data[i,j] = math.log(4860.*(float(Emissionlines[i,j])/float(Emissionlines[i,57])), 10)
else:
concatenated_data[i,j] == 0
# for 1215
#for i in range(len(Emissionlines)):
# for j in range(len(Emissionlines[0])):
# if math.log(1215.*(float(Emissionlines[i,j])/float(Emissionlines[i,4])), 10) > 0:
# concatenated_data[i,j] = math.log(1215.*(float(Emissionlines[i,j])/float(Emissionlines[i,4])), 10)
# else:
# concatenated_data[i,j] == 0
#find the maxima to plot onto the contour plots
for j in range(len(concatenated_data[0])):
max_values[j,0] = max(concatenated_data[:,j])
max_values[j,1] = argmax(concatenated_data[:,j], axis = 0)
max_values[j,2] = hdens_values[max_values[j,1]]
max_values[j,3] = phi_values[max_values[j,1]]
#to round off the maxima
max_values[:,0] = [ '%.1f' % elem for elem in max_values[:,0] ]
print "data arranged"
# ---------------------------------------------------
#Creating the grid to interpolate with for contours.
gridarray = zeros((len(Emissionlines),2))
gridarray[:,0] = hdens_values
gridarray[:,1] = phi_values
x = gridarray[:,0]
y = gridarray[:,1]
#change desired lines here!
line = [0, #977
1, #991
2, #1026
5, #1216
91, #1218
6, #1239
7, #1240
8, #1243
9, #1263
10, #1304
11,#1308
12, #1397
13, #1402
14, #1406
16, #1486
17] #1531
#create z array for this plot
z = concatenated_data[:,line[:]]
# ---------------------------------------------------
# Interpolate
print "starting interpolation"
xi, yi = linspace(x.min(), x.max(), 10), linspace(y.min(), y.max(), 10)
xi, yi = meshgrid(xi, yi)
# ---------------------------------------------------
print "interpolatation complete; now plotting"
#plot
plt.subplots_adjust(wspace=0, hspace=0) #remove space between plots
levels = arange(10**-1,10, .2)
levels2 = arange(10**-2,10**2, 1)
plt.suptitle("UV Lines", fontsize=14)
# ---------------------------------------------------
for i in range(16):
add_sub_plot(i)
ax1 = plt.subplot(4,4,1)
add_patches(ax1)
print "complete"
plt.savefig('UV_Lines.pdf')
plt.clf()
| gpl-2.0 |
IndraVikas/scikit-learn | sklearn/manifold/locally_linear.py | 206 | 25061 | """Locally Linear Embedding"""
# Author: Fabian Pedregosa -- <[email protected]>
# Jake Vanderplas -- <[email protected]>
# License: BSD 3 clause (C) INRIA 2011
import numpy as np
from scipy.linalg import eigh, svd, qr, solve
from scipy.sparse import eye, csr_matrix
from ..base import BaseEstimator, TransformerMixin
from ..utils import check_random_state, check_array
from ..utils.arpack import eigsh
from ..utils.validation import check_is_fitted
from ..utils.validation import FLOAT_DTYPES
from ..neighbors import NearestNeighbors
def barycenter_weights(X, Z, reg=1e-3):
"""Compute barycenter weights of X from Y along the first axis
We estimate the weights to assign to each point in Y[i] to recover
the point X[i]. The barycenter weights sum to 1.
Parameters
----------
X : array-like, shape (n_samples, n_dim)
Z : array-like, shape (n_samples, n_neighbors, n_dim)
reg: float, optional
amount of regularization to add for the problem to be
well-posed in the case of n_neighbors > n_dim
Returns
-------
B : array-like, shape (n_samples, n_neighbors)
Notes
-----
See developers note for more information.
"""
X = check_array(X, dtype=FLOAT_DTYPES)
Z = check_array(Z, dtype=FLOAT_DTYPES, allow_nd=True)
n_samples, n_neighbors = X.shape[0], Z.shape[1]
B = np.empty((n_samples, n_neighbors), dtype=X.dtype)
v = np.ones(n_neighbors, dtype=X.dtype)
# this might raise a LinalgError if G is singular and has trace
# zero
for i, A in enumerate(Z.transpose(0, 2, 1)):
C = A.T - X[i] # broadcasting
G = np.dot(C, C.T)
trace = np.trace(G)
if trace > 0:
R = reg * trace
else:
R = reg
G.flat[::Z.shape[1] + 1] += R
w = solve(G, v, sym_pos=True)
B[i, :] = w / np.sum(w)
return B
def barycenter_kneighbors_graph(X, n_neighbors, reg=1e-3):
"""Computes the barycenter weighted graph of k-Neighbors for points in X
Parameters
----------
X : {array-like, sparse matrix, BallTree, KDTree, NearestNeighbors}
Sample data, shape = (n_samples, n_features), in the form of a
numpy array, sparse array, precomputed tree, or NearestNeighbors
object.
n_neighbors : int
Number of neighbors for each sample.
reg : float, optional
Amount of regularization when solving the least-squares
problem. Only relevant if mode='barycenter'. If None, use the
default.
Returns
-------
A : sparse matrix in CSR format, shape = [n_samples, n_samples]
A[i, j] is assigned the weight of edge that connects i to j.
See also
--------
sklearn.neighbors.kneighbors_graph
sklearn.neighbors.radius_neighbors_graph
"""
knn = NearestNeighbors(n_neighbors + 1).fit(X)
X = knn._fit_X
n_samples = X.shape[0]
ind = knn.kneighbors(X, return_distance=False)[:, 1:]
data = barycenter_weights(X, X[ind], reg=reg)
indptr = np.arange(0, n_samples * n_neighbors + 1, n_neighbors)
return csr_matrix((data.ravel(), ind.ravel(), indptr),
shape=(n_samples, n_samples))
def null_space(M, k, k_skip=1, eigen_solver='arpack', tol=1E-6, max_iter=100,
random_state=None):
"""
Find the null space of a matrix M.
Parameters
----------
M : {array, matrix, sparse matrix, LinearOperator}
Input covariance matrix: should be symmetric positive semi-definite
k : integer
Number of eigenvalues/vectors to return
k_skip : integer, optional
Number of low eigenvalues to skip.
eigen_solver : string, {'auto', 'arpack', 'dense'}
auto : algorithm will attempt to choose the best method for input data
arpack : use arnoldi iteration in shift-invert mode.
For this method, M may be a dense matrix, sparse matrix,
or general linear operator.
Warning: ARPACK can be unstable for some problems. It is
best to try several random seeds in order to check results.
dense : use standard dense matrix operations for the eigenvalue
decomposition. For this method, M must be an array
or matrix type. This method should be avoided for
large problems.
tol : float, optional
Tolerance for 'arpack' method.
Not used if eigen_solver=='dense'.
max_iter : maximum number of iterations for 'arpack' method
not used if eigen_solver=='dense'
random_state: numpy.RandomState or int, optional
The generator or seed used to determine the starting vector for arpack
iterations. Defaults to numpy.random.
"""
if eigen_solver == 'auto':
if M.shape[0] > 200 and k + k_skip < 10:
eigen_solver = 'arpack'
else:
eigen_solver = 'dense'
if eigen_solver == 'arpack':
random_state = check_random_state(random_state)
v0 = random_state.rand(M.shape[0])
try:
eigen_values, eigen_vectors = eigsh(M, k + k_skip, sigma=0.0,
tol=tol, maxiter=max_iter,
v0=v0)
except RuntimeError as msg:
raise ValueError("Error in determining null-space with ARPACK. "
"Error message: '%s'. "
"Note that method='arpack' can fail when the "
"weight matrix is singular or otherwise "
"ill-behaved. method='dense' is recommended. "
"See online documentation for more information."
% msg)
return eigen_vectors[:, k_skip:], np.sum(eigen_values[k_skip:])
elif eigen_solver == 'dense':
if hasattr(M, 'toarray'):
M = M.toarray()
eigen_values, eigen_vectors = eigh(
M, eigvals=(k_skip, k + k_skip - 1), overwrite_a=True)
index = np.argsort(np.abs(eigen_values))
return eigen_vectors[:, index], np.sum(eigen_values)
else:
raise ValueError("Unrecognized eigen_solver '%s'" % eigen_solver)
def locally_linear_embedding(
X, n_neighbors, n_components, reg=1e-3, eigen_solver='auto', tol=1e-6,
max_iter=100, method='standard', hessian_tol=1E-4, modified_tol=1E-12,
random_state=None):
"""Perform a Locally Linear Embedding analysis on the data.
Read more in the :ref:`User Guide <locally_linear_embedding>`.
Parameters
----------
X : {array-like, sparse matrix, BallTree, KDTree, NearestNeighbors}
Sample data, shape = (n_samples, n_features), in the form of a
numpy array, sparse array, precomputed tree, or NearestNeighbors
object.
n_neighbors : integer
number of neighbors to consider for each point.
n_components : integer
number of coordinates for the manifold.
reg : float
regularization constant, multiplies the trace of the local covariance
matrix of the distances.
eigen_solver : string, {'auto', 'arpack', 'dense'}
auto : algorithm will attempt to choose the best method for input data
arpack : use arnoldi iteration in shift-invert mode.
For this method, M may be a dense matrix, sparse matrix,
or general linear operator.
Warning: ARPACK can be unstable for some problems. It is
best to try several random seeds in order to check results.
dense : use standard dense matrix operations for the eigenvalue
decomposition. For this method, M must be an array
or matrix type. This method should be avoided for
large problems.
tol : float, optional
Tolerance for 'arpack' method
Not used if eigen_solver=='dense'.
max_iter : integer
maximum number of iterations for the arpack solver.
method : {'standard', 'hessian', 'modified', 'ltsa'}
standard : use the standard locally linear embedding algorithm.
see reference [1]_
hessian : use the Hessian eigenmap method. This method requires
n_neighbors > n_components * (1 + (n_components + 1) / 2.
see reference [2]_
modified : use the modified locally linear embedding algorithm.
see reference [3]_
ltsa : use local tangent space alignment algorithm
see reference [4]_
hessian_tol : float, optional
Tolerance for Hessian eigenmapping method.
Only used if method == 'hessian'
modified_tol : float, optional
Tolerance for modified LLE method.
Only used if method == 'modified'
random_state: numpy.RandomState or int, optional
The generator or seed used to determine the starting vector for arpack
iterations. Defaults to numpy.random.
Returns
-------
Y : array-like, shape [n_samples, n_components]
Embedding vectors.
squared_error : float
Reconstruction error for the embedding vectors. Equivalent to
``norm(Y - W Y, 'fro')**2``, where W are the reconstruction weights.
References
----------
.. [1] `Roweis, S. & Saul, L. Nonlinear dimensionality reduction
by locally linear embedding. Science 290:2323 (2000).`
.. [2] `Donoho, D. & Grimes, C. Hessian eigenmaps: Locally
linear embedding techniques for high-dimensional data.
Proc Natl Acad Sci U S A. 100:5591 (2003).`
.. [3] `Zhang, Z. & Wang, J. MLLE: Modified Locally Linear
Embedding Using Multiple Weights.`
http://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.70.382
.. [4] `Zhang, Z. & Zha, H. Principal manifolds and nonlinear
dimensionality reduction via tangent space alignment.
Journal of Shanghai Univ. 8:406 (2004)`
"""
if eigen_solver not in ('auto', 'arpack', 'dense'):
raise ValueError("unrecognized eigen_solver '%s'" % eigen_solver)
if method not in ('standard', 'hessian', 'modified', 'ltsa'):
raise ValueError("unrecognized method '%s'" % method)
nbrs = NearestNeighbors(n_neighbors=n_neighbors + 1)
nbrs.fit(X)
X = nbrs._fit_X
N, d_in = X.shape
if n_components > d_in:
raise ValueError("output dimension must be less than or equal "
"to input dimension")
if n_neighbors >= N:
raise ValueError("n_neighbors must be less than number of points")
if n_neighbors <= 0:
raise ValueError("n_neighbors must be positive")
M_sparse = (eigen_solver != 'dense')
if method == 'standard':
W = barycenter_kneighbors_graph(
nbrs, n_neighbors=n_neighbors, reg=reg)
# we'll compute M = (I-W)'(I-W)
# depending on the solver, we'll do this differently
if M_sparse:
M = eye(*W.shape, format=W.format) - W
M = (M.T * M).tocsr()
else:
M = (W.T * W - W.T - W).toarray()
M.flat[::M.shape[0] + 1] += 1 # W = W - I = W - I
elif method == 'hessian':
dp = n_components * (n_components + 1) // 2
if n_neighbors <= n_components + dp:
raise ValueError("for method='hessian', n_neighbors must be "
"greater than "
"[n_components * (n_components + 3) / 2]")
neighbors = nbrs.kneighbors(X, n_neighbors=n_neighbors + 1,
return_distance=False)
neighbors = neighbors[:, 1:]
Yi = np.empty((n_neighbors, 1 + n_components + dp), dtype=np.float)
Yi[:, 0] = 1
M = np.zeros((N, N), dtype=np.float)
use_svd = (n_neighbors > d_in)
for i in range(N):
Gi = X[neighbors[i]]
Gi -= Gi.mean(0)
#build Hessian estimator
if use_svd:
U = svd(Gi, full_matrices=0)[0]
else:
Ci = np.dot(Gi, Gi.T)
U = eigh(Ci)[1][:, ::-1]
Yi[:, 1:1 + n_components] = U[:, :n_components]
j = 1 + n_components
for k in range(n_components):
Yi[:, j:j + n_components - k] = (U[:, k:k + 1]
* U[:, k:n_components])
j += n_components - k
Q, R = qr(Yi)
w = Q[:, n_components + 1:]
S = w.sum(0)
S[np.where(abs(S) < hessian_tol)] = 1
w /= S
nbrs_x, nbrs_y = np.meshgrid(neighbors[i], neighbors[i])
M[nbrs_x, nbrs_y] += np.dot(w, w.T)
if M_sparse:
M = csr_matrix(M)
elif method == 'modified':
if n_neighbors < n_components:
raise ValueError("modified LLE requires "
"n_neighbors >= n_components")
neighbors = nbrs.kneighbors(X, n_neighbors=n_neighbors + 1,
return_distance=False)
neighbors = neighbors[:, 1:]
#find the eigenvectors and eigenvalues of each local covariance
# matrix. We want V[i] to be a [n_neighbors x n_neighbors] matrix,
# where the columns are eigenvectors
V = np.zeros((N, n_neighbors, n_neighbors))
nev = min(d_in, n_neighbors)
evals = np.zeros([N, nev])
#choose the most efficient way to find the eigenvectors
use_svd = (n_neighbors > d_in)
if use_svd:
for i in range(N):
X_nbrs = X[neighbors[i]] - X[i]
V[i], evals[i], _ = svd(X_nbrs,
full_matrices=True)
evals **= 2
else:
for i in range(N):
X_nbrs = X[neighbors[i]] - X[i]
C_nbrs = np.dot(X_nbrs, X_nbrs.T)
evi, vi = eigh(C_nbrs)
evals[i] = evi[::-1]
V[i] = vi[:, ::-1]
#find regularized weights: this is like normal LLE.
# because we've already computed the SVD of each covariance matrix,
# it's faster to use this rather than np.linalg.solve
reg = 1E-3 * evals.sum(1)
tmp = np.dot(V.transpose(0, 2, 1), np.ones(n_neighbors))
tmp[:, :nev] /= evals + reg[:, None]
tmp[:, nev:] /= reg[:, None]
w_reg = np.zeros((N, n_neighbors))
for i in range(N):
w_reg[i] = np.dot(V[i], tmp[i])
w_reg /= w_reg.sum(1)[:, None]
#calculate eta: the median of the ratio of small to large eigenvalues
# across the points. This is used to determine s_i, below
rho = evals[:, n_components:].sum(1) / evals[:, :n_components].sum(1)
eta = np.median(rho)
#find s_i, the size of the "almost null space" for each point:
# this is the size of the largest set of eigenvalues
# such that Sum[v; v in set]/Sum[v; v not in set] < eta
s_range = np.zeros(N, dtype=int)
evals_cumsum = np.cumsum(evals, 1)
eta_range = evals_cumsum[:, -1:] / evals_cumsum[:, :-1] - 1
for i in range(N):
s_range[i] = np.searchsorted(eta_range[i, ::-1], eta)
s_range += n_neighbors - nev # number of zero eigenvalues
#Now calculate M.
# This is the [N x N] matrix whose null space is the desired embedding
M = np.zeros((N, N), dtype=np.float)
for i in range(N):
s_i = s_range[i]
#select bottom s_i eigenvectors and calculate alpha
Vi = V[i, :, n_neighbors - s_i:]
alpha_i = np.linalg.norm(Vi.sum(0)) / np.sqrt(s_i)
#compute Householder matrix which satisfies
# Hi*Vi.T*ones(n_neighbors) = alpha_i*ones(s)
# using prescription from paper
h = alpha_i * np.ones(s_i) - np.dot(Vi.T, np.ones(n_neighbors))
norm_h = np.linalg.norm(h)
if norm_h < modified_tol:
h *= 0
else:
h /= norm_h
#Householder matrix is
# >> Hi = np.identity(s_i) - 2*np.outer(h,h)
#Then the weight matrix is
# >> Wi = np.dot(Vi,Hi) + (1-alpha_i) * w_reg[i,:,None]
#We do this much more efficiently:
Wi = (Vi - 2 * np.outer(np.dot(Vi, h), h)
+ (1 - alpha_i) * w_reg[i, :, None])
#Update M as follows:
# >> W_hat = np.zeros( (N,s_i) )
# >> W_hat[neighbors[i],:] = Wi
# >> W_hat[i] -= 1
# >> M += np.dot(W_hat,W_hat.T)
#We can do this much more efficiently:
nbrs_x, nbrs_y = np.meshgrid(neighbors[i], neighbors[i])
M[nbrs_x, nbrs_y] += np.dot(Wi, Wi.T)
Wi_sum1 = Wi.sum(1)
M[i, neighbors[i]] -= Wi_sum1
M[neighbors[i], i] -= Wi_sum1
M[i, i] += s_i
if M_sparse:
M = csr_matrix(M)
elif method == 'ltsa':
neighbors = nbrs.kneighbors(X, n_neighbors=n_neighbors + 1,
return_distance=False)
neighbors = neighbors[:, 1:]
M = np.zeros((N, N))
use_svd = (n_neighbors > d_in)
for i in range(N):
Xi = X[neighbors[i]]
Xi -= Xi.mean(0)
# compute n_components largest eigenvalues of Xi * Xi^T
if use_svd:
v = svd(Xi, full_matrices=True)[0]
else:
Ci = np.dot(Xi, Xi.T)
v = eigh(Ci)[1][:, ::-1]
Gi = np.zeros((n_neighbors, n_components + 1))
Gi[:, 1:] = v[:, :n_components]
Gi[:, 0] = 1. / np.sqrt(n_neighbors)
GiGiT = np.dot(Gi, Gi.T)
nbrs_x, nbrs_y = np.meshgrid(neighbors[i], neighbors[i])
M[nbrs_x, nbrs_y] -= GiGiT
M[neighbors[i], neighbors[i]] += 1
return null_space(M, n_components, k_skip=1, eigen_solver=eigen_solver,
tol=tol, max_iter=max_iter, random_state=random_state)
class LocallyLinearEmbedding(BaseEstimator, TransformerMixin):
"""Locally Linear Embedding
Read more in the :ref:`User Guide <locally_linear_embedding>`.
Parameters
----------
n_neighbors : integer
number of neighbors to consider for each point.
n_components : integer
number of coordinates for the manifold
reg : float
regularization constant, multiplies the trace of the local covariance
matrix of the distances.
eigen_solver : string, {'auto', 'arpack', 'dense'}
auto : algorithm will attempt to choose the best method for input data
arpack : use arnoldi iteration in shift-invert mode.
For this method, M may be a dense matrix, sparse matrix,
or general linear operator.
Warning: ARPACK can be unstable for some problems. It is
best to try several random seeds in order to check results.
dense : use standard dense matrix operations for the eigenvalue
decomposition. For this method, M must be an array
or matrix type. This method should be avoided for
large problems.
tol : float, optional
Tolerance for 'arpack' method
Not used if eigen_solver=='dense'.
max_iter : integer
maximum number of iterations for the arpack solver.
Not used if eigen_solver=='dense'.
method : string ('standard', 'hessian', 'modified' or 'ltsa')
standard : use the standard locally linear embedding algorithm. see
reference [1]
hessian : use the Hessian eigenmap method. This method requires
``n_neighbors > n_components * (1 + (n_components + 1) / 2``
see reference [2]
modified : use the modified locally linear embedding algorithm.
see reference [3]
ltsa : use local tangent space alignment algorithm
see reference [4]
hessian_tol : float, optional
Tolerance for Hessian eigenmapping method.
Only used if ``method == 'hessian'``
modified_tol : float, optional
Tolerance for modified LLE method.
Only used if ``method == 'modified'``
neighbors_algorithm : string ['auto'|'brute'|'kd_tree'|'ball_tree']
algorithm to use for nearest neighbors search,
passed to neighbors.NearestNeighbors instance
random_state: numpy.RandomState or int, optional
The generator or seed used to determine the starting vector for arpack
iterations. Defaults to numpy.random.
Attributes
----------
embedding_vectors_ : array-like, shape [n_components, n_samples]
Stores the embedding vectors
reconstruction_error_ : float
Reconstruction error associated with `embedding_vectors_`
nbrs_ : NearestNeighbors object
Stores nearest neighbors instance, including BallTree or KDtree
if applicable.
References
----------
.. [1] `Roweis, S. & Saul, L. Nonlinear dimensionality reduction
by locally linear embedding. Science 290:2323 (2000).`
.. [2] `Donoho, D. & Grimes, C. Hessian eigenmaps: Locally
linear embedding techniques for high-dimensional data.
Proc Natl Acad Sci U S A. 100:5591 (2003).`
.. [3] `Zhang, Z. & Wang, J. MLLE: Modified Locally Linear
Embedding Using Multiple Weights.`
http://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.70.382
.. [4] `Zhang, Z. & Zha, H. Principal manifolds and nonlinear
dimensionality reduction via tangent space alignment.
Journal of Shanghai Univ. 8:406 (2004)`
"""
def __init__(self, n_neighbors=5, n_components=2, reg=1E-3,
eigen_solver='auto', tol=1E-6, max_iter=100,
method='standard', hessian_tol=1E-4, modified_tol=1E-12,
neighbors_algorithm='auto', random_state=None):
self.n_neighbors = n_neighbors
self.n_components = n_components
self.reg = reg
self.eigen_solver = eigen_solver
self.tol = tol
self.max_iter = max_iter
self.method = method
self.hessian_tol = hessian_tol
self.modified_tol = modified_tol
self.random_state = random_state
self.neighbors_algorithm = neighbors_algorithm
def _fit_transform(self, X):
self.nbrs_ = NearestNeighbors(self.n_neighbors,
algorithm=self.neighbors_algorithm)
random_state = check_random_state(self.random_state)
X = check_array(X)
self.nbrs_.fit(X)
self.embedding_, self.reconstruction_error_ = \
locally_linear_embedding(
self.nbrs_, self.n_neighbors, self.n_components,
eigen_solver=self.eigen_solver, tol=self.tol,
max_iter=self.max_iter, method=self.method,
hessian_tol=self.hessian_tol, modified_tol=self.modified_tol,
random_state=random_state, reg=self.reg)
def fit(self, X, y=None):
"""Compute the embedding vectors for data X
Parameters
----------
X : array-like of shape [n_samples, n_features]
training set.
Returns
-------
self : returns an instance of self.
"""
self._fit_transform(X)
return self
def fit_transform(self, X, y=None):
"""Compute the embedding vectors for data X and transform X.
Parameters
----------
X : array-like of shape [n_samples, n_features]
training set.
Returns
-------
X_new: array-like, shape (n_samples, n_components)
"""
self._fit_transform(X)
return self.embedding_
def transform(self, X):
"""
Transform new points into embedding space.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Returns
-------
X_new : array, shape = [n_samples, n_components]
Notes
-----
Because of scaling performed by this method, it is discouraged to use
it together with methods that are not scale-invariant (like SVMs)
"""
check_is_fitted(self, "nbrs_")
X = check_array(X)
ind = self.nbrs_.kneighbors(X, n_neighbors=self.n_neighbors,
return_distance=False)
weights = barycenter_weights(X, self.nbrs_._fit_X[ind],
reg=self.reg)
X_new = np.empty((X.shape[0], self.n_components))
for i in range(X.shape[0]):
X_new[i] = np.dot(self.embedding_[ind[i]].T, weights[i])
return X_new
| bsd-3-clause |
Odingod/mne-python | mne/tests/test_label.py | 3 | 28933 | import os
import os.path as op
import shutil
import glob
import warnings
import sys
import numpy as np
from scipy import sparse
from numpy.testing import assert_array_equal, assert_array_almost_equal
from nose.tools import assert_equal, assert_true, assert_false, assert_raises
from mne.datasets import testing
from mne import (read_label, stc_to_label, read_source_estimate,
read_source_spaces, grow_labels, read_labels_from_annot,
write_labels_to_annot, split_label, spatial_tris_connectivity,
read_surface)
from mne.label import Label, _blend_colors
from mne.utils import (_TempDir, requires_sklearn, get_subjects_dir,
run_tests_if_main, slow_test)
from mne.fixes import digitize, in1d, assert_is, assert_is_not
from mne.label import _n_colors
from mne.source_space import SourceSpaces
from mne.source_estimate import mesh_edges
from mne.externals.six import string_types
from mne.externals.six.moves import cPickle as pickle
warnings.simplefilter('always') # enable b/c these tests throw warnings
data_path = testing.data_path(download=False)
subjects_dir = op.join(data_path, 'subjects')
src_fname = op.join(subjects_dir, 'sample', 'bem', 'sample-oct-6-src.fif')
stc_fname = op.join(data_path, 'MEG', 'sample',
'sample_audvis_trunc-meg-lh.stc')
real_label_fname = op.join(data_path, 'MEG', 'sample', 'labels',
'Aud-lh.label')
real_label_rh_fname = op.join(data_path, 'MEG', 'sample', 'labels',
'Aud-rh.label')
v1_label_fname = op.join(subjects_dir, 'sample', 'label', 'lh.V1.label')
fwd_fname = op.join(data_path, 'MEG', 'sample',
'sample_audvis_trunc-meg-eeg-oct-6-fwd.fif')
src_bad_fname = op.join(data_path, 'subjects', 'fsaverage', 'bem',
'fsaverage-ico-5-src.fif')
label_dir = op.join(subjects_dir, 'sample', 'label', 'aparc')
test_path = op.join(op.split(__file__)[0], '..', 'io', 'tests', 'data')
label_fname = op.join(test_path, 'test-lh.label')
label_rh_fname = op.join(test_path, 'test-rh.label')
# This code was used to generate the "fake" test labels:
# for hemi in ['lh', 'rh']:
# label = Label(np.unique((np.random.rand(100) * 10242).astype(int)),
# hemi=hemi, comment='Test ' + hemi, subject='fsaverage')
# label.save(op.join(test_path, 'test-%s.label' % hemi))
# XXX : this was added for backward compat and keep the old test_label_in_src
def _stc_to_label(stc, src, smooth, subjects_dir=None):
"""Compute a label from the non-zero sources in an stc object.
Parameters
----------
stc : SourceEstimate
The source estimates.
src : SourceSpaces | str | None
The source space over which the source estimates are defined.
If it's a string it should the subject name (e.g. fsaverage).
Can be None if stc.subject is not None.
smooth : int
Number of smoothing iterations.
subjects_dir : str | None
Path to SUBJECTS_DIR if it is not set in the environment.
Returns
-------
labels : list of Labels | list of list of Labels
The generated labels. If connected is False, it returns
a list of Labels (one per hemisphere). If no Label is available
in a hemisphere, None is returned. If connected is True,
it returns for each hemisphere a list of connected labels
ordered in decreasing order depending of the maximum value in the stc.
If no Label is available in an hemisphere, an empty list is returned.
"""
src = stc.subject if src is None else src
if isinstance(src, string_types):
subject = src
else:
subject = stc.subject
if isinstance(src, string_types):
subjects_dir = get_subjects_dir(subjects_dir)
surf_path_from = op.join(subjects_dir, src, 'surf')
rr_lh, tris_lh = read_surface(op.join(surf_path_from,
'lh.white'))
rr_rh, tris_rh = read_surface(op.join(surf_path_from,
'rh.white'))
rr = [rr_lh, rr_rh]
tris = [tris_lh, tris_rh]
else:
if not isinstance(src, SourceSpaces):
raise TypeError('src must be a string or a set of source spaces')
if len(src) != 2:
raise ValueError('source space should contain the 2 hemispheres')
rr = [1e3 * src[0]['rr'], 1e3 * src[1]['rr']]
tris = [src[0]['tris'], src[1]['tris']]
labels = []
cnt = 0
for hemi_idx, (hemi, this_vertno, this_tris, this_rr) in enumerate(
zip(['lh', 'rh'], stc.vertices, tris, rr)):
this_data = stc.data[cnt:cnt + len(this_vertno)]
e = mesh_edges(this_tris)
e.data[e.data == 2] = 1
n_vertices = e.shape[0]
e = e + sparse.eye(n_vertices, n_vertices)
clusters = [this_vertno[np.any(this_data, axis=1)]]
cnt += len(this_vertno)
clusters = [c for c in clusters if len(c) > 0]
if len(clusters) == 0:
this_labels = None
else:
this_labels = []
colors = _n_colors(len(clusters))
for c, color in zip(clusters, colors):
idx_use = c
for k in range(smooth):
e_use = e[:, idx_use]
data1 = e_use * np.ones(len(idx_use))
idx_use = np.where(data1)[0]
label = Label(idx_use, this_rr[idx_use], None, hemi,
'Label from stc', subject=subject,
color=color)
this_labels.append(label)
this_labels = this_labels[0]
labels.append(this_labels)
return labels
def assert_labels_equal(l0, l1, decimal=5, comment=True, color=True):
if comment:
assert_equal(l0.comment, l1.comment)
if color:
assert_equal(l0.color, l1.color)
for attr in ['hemi', 'subject']:
attr0 = getattr(l0, attr)
attr1 = getattr(l1, attr)
msg = "label.%s: %r != %r" % (attr, attr0, attr1)
assert_equal(attr0, attr1, msg)
for attr in ['vertices', 'pos', 'values']:
a0 = getattr(l0, attr)
a1 = getattr(l1, attr)
assert_array_almost_equal(a0, a1, decimal)
def test_label_subject():
"""Test label subject name extraction
"""
label = read_label(label_fname)
assert_is(label.subject, None)
assert_true('unknown' in repr(label))
label = read_label(label_fname, subject='fsaverage')
assert_true(label.subject == 'fsaverage')
assert_true('fsaverage' in repr(label))
def test_label_addition():
"""Test label addition
"""
pos = np.random.rand(10, 3)
values = np.arange(10.) / 10
idx0 = list(range(7))
idx1 = list(range(7, 10)) # non-overlapping
idx2 = list(range(5, 10)) # overlapping
l0 = Label(idx0, pos[idx0], values[idx0], 'lh', color='red')
l1 = Label(idx1, pos[idx1], values[idx1], 'lh')
l2 = Label(idx2, pos[idx2], values[idx2], 'lh', color=(0, 1, 0, .5))
assert_equal(len(l0), len(idx0))
l_good = l0.copy()
l_good.subject = 'sample'
l_bad = l1.copy()
l_bad.subject = 'foo'
assert_raises(ValueError, l_good.__add__, l_bad)
assert_raises(TypeError, l_good.__add__, 'foo')
assert_raises(ValueError, l_good.__sub__, l_bad)
assert_raises(TypeError, l_good.__sub__, 'foo')
# adding non-overlapping labels
l01 = l0 + l1
assert_equal(len(l01), len(l0) + len(l1))
assert_array_equal(l01.values[:len(l0)], l0.values)
assert_equal(l01.color, l0.color)
# subtraction
assert_labels_equal(l01 - l0, l1, comment=False, color=False)
assert_labels_equal(l01 - l1, l0, comment=False, color=False)
# adding overlappig labels
l = l0 + l2
i0 = np.where(l0.vertices == 6)[0][0]
i2 = np.where(l2.vertices == 6)[0][0]
i = np.where(l.vertices == 6)[0][0]
assert_equal(l.values[i], l0.values[i0] + l2.values[i2])
assert_equal(l.values[0], l0.values[0])
assert_array_equal(np.unique(l.vertices), np.unique(idx0 + idx2))
assert_equal(l.color, _blend_colors(l0.color, l2.color))
# adding lh and rh
l2.hemi = 'rh'
# this now has deprecated behavior
bhl = l0 + l2
assert_equal(bhl.hemi, 'both')
assert_equal(len(bhl), len(l0) + len(l2))
assert_equal(bhl.color, l.color)
assert_true('BiHemiLabel' in repr(bhl))
# subtraction
assert_labels_equal(bhl - l0, l2)
assert_labels_equal(bhl - l2, l0)
bhl2 = l1 + bhl
assert_labels_equal(bhl2.lh, l01)
assert_equal(bhl2.color, _blend_colors(l1.color, bhl.color))
assert_array_equal((l2 + bhl).rh.vertices, bhl.rh.vertices) # rh label
assert_array_equal((bhl + bhl).lh.vertices, bhl.lh.vertices)
assert_raises(TypeError, bhl.__add__, 5)
# subtraction
bhl_ = bhl2 - l1
assert_labels_equal(bhl_.lh, bhl.lh, comment=False, color=False)
assert_labels_equal(bhl_.rh, bhl.rh)
assert_labels_equal(bhl2 - l2, l0 + l1)
assert_labels_equal(bhl2 - l1 - l0, l2)
bhl_ = bhl2 - bhl2
assert_array_equal(bhl_.vertices, [])
@testing.requires_testing_data
def test_label_in_src():
"""Test label in src"""
src = read_source_spaces(src_fname)
label = read_label(v1_label_fname)
# construct label from source space vertices
vert_in_src = np.intersect1d(label.vertices, src[0]['vertno'], True)
where = in1d(label.vertices, vert_in_src)
pos_in_src = label.pos[where]
values_in_src = label.values[where]
label_src = Label(vert_in_src, pos_in_src, values_in_src,
hemi='lh').fill(src)
# check label vertices
vertices_status = in1d(src[0]['nearest'], label.vertices)
vertices_in = np.nonzero(vertices_status)[0]
vertices_out = np.nonzero(np.logical_not(vertices_status))[0]
assert_array_equal(label_src.vertices, vertices_in)
assert_array_equal(in1d(vertices_out, label_src.vertices), False)
# check values
value_idx = digitize(src[0]['nearest'][vertices_in], vert_in_src, True)
assert_array_equal(label_src.values, values_in_src[value_idx])
# test exception
vertices = np.append([-1], vert_in_src)
assert_raises(ValueError, Label(vertices, hemi='lh').fill, src)
@testing.requires_testing_data
def test_label_io_and_time_course_estimates():
"""Test IO for label + stc files
"""
stc = read_source_estimate(stc_fname)
label = read_label(real_label_fname)
stc_label = stc.in_label(label)
assert_true(len(stc_label.times) == stc_label.data.shape[1])
assert_true(len(stc_label.vertices[0]) == stc_label.data.shape[0])
@testing.requires_testing_data
def test_label_io():
"""Test IO of label files
"""
tempdir = _TempDir()
label = read_label(label_fname)
# label attributes
assert_equal(label.name, 'test-lh')
assert_is(label.subject, None)
assert_is(label.color, None)
# save and reload
label.save(op.join(tempdir, 'foo'))
label2 = read_label(op.join(tempdir, 'foo-lh.label'))
assert_labels_equal(label, label2)
# pickling
dest = op.join(tempdir, 'foo.pickled')
with open(dest, 'wb') as fid:
pickle.dump(label, fid, pickle.HIGHEST_PROTOCOL)
with open(dest, 'rb') as fid:
label2 = pickle.load(fid)
assert_labels_equal(label, label2)
def _assert_labels_equal(labels_a, labels_b, ignore_pos=False):
"""Make sure two sets of labels are equal"""
for label_a, label_b in zip(labels_a, labels_b):
assert_array_equal(label_a.vertices, label_b.vertices)
assert_true(label_a.name == label_b.name)
assert_true(label_a.hemi == label_b.hemi)
if not ignore_pos:
assert_array_equal(label_a.pos, label_b.pos)
@testing.requires_testing_data
def test_annot_io():
"""Test I/O from and to *.annot files"""
# copy necessary files from fsaverage to tempdir
tempdir = _TempDir()
subject = 'fsaverage'
label_src = os.path.join(subjects_dir, 'fsaverage', 'label')
surf_src = os.path.join(subjects_dir, 'fsaverage', 'surf')
label_dir = os.path.join(tempdir, subject, 'label')
surf_dir = os.path.join(tempdir, subject, 'surf')
os.makedirs(label_dir)
os.mkdir(surf_dir)
shutil.copy(os.path.join(label_src, 'lh.PALS_B12_Lobes.annot'), label_dir)
shutil.copy(os.path.join(label_src, 'rh.PALS_B12_Lobes.annot'), label_dir)
shutil.copy(os.path.join(surf_src, 'lh.white'), surf_dir)
shutil.copy(os.path.join(surf_src, 'rh.white'), surf_dir)
# read original labels
assert_raises(IOError, read_labels_from_annot, subject, 'PALS_B12_Lobesey',
subjects_dir=tempdir)
labels = read_labels_from_annot(subject, 'PALS_B12_Lobes',
subjects_dir=tempdir)
# test saving parcellation only covering one hemisphere
parc = [l for l in labels if l.name == 'LOBE.TEMPORAL-lh']
write_labels_to_annot(parc, subject, 'myparc', subjects_dir=tempdir)
parc1 = read_labels_from_annot(subject, 'myparc', subjects_dir=tempdir)
parc1 = [l for l in parc1 if not l.name.startswith('unknown')]
assert_equal(len(parc1), len(parc))
for l1, l in zip(parc1, parc):
assert_labels_equal(l1, l)
# test saving only one hemisphere
parc = [l for l in labels if l.name.startswith('LOBE')]
write_labels_to_annot(parc, subject, 'myparc2', hemi='lh',
subjects_dir=tempdir)
annot_fname = os.path.join(tempdir, subject, 'label', '%sh.myparc2.annot')
assert_true(os.path.isfile(annot_fname % 'l'))
assert_false(os.path.isfile(annot_fname % 'r'))
parc1 = read_labels_from_annot(subject, 'myparc2',
annot_fname=annot_fname % 'l',
subjects_dir=tempdir)
parc_lh = [l for l in parc if l.name.endswith('lh')]
for l1, l in zip(parc1, parc_lh):
assert_labels_equal(l1, l)
@testing.requires_testing_data
def test_read_labels_from_annot():
"""Test reading labels from FreeSurfer parcellation
"""
# test some invalid inputs
assert_raises(ValueError, read_labels_from_annot, 'sample', hemi='bla',
subjects_dir=subjects_dir)
assert_raises(ValueError, read_labels_from_annot, 'sample',
annot_fname='bla.annot', subjects_dir=subjects_dir)
# read labels using hemi specification
labels_lh = read_labels_from_annot('sample', hemi='lh',
subjects_dir=subjects_dir)
for label in labels_lh:
assert_true(label.name.endswith('-lh'))
assert_true(label.hemi == 'lh')
# XXX fails on 2.6 for some reason...
if sys.version_info[:2] > (2, 6):
assert_is_not(label.color, None)
# read labels using annot_fname
annot_fname = op.join(subjects_dir, 'sample', 'label', 'rh.aparc.annot')
labels_rh = read_labels_from_annot('sample', annot_fname=annot_fname,
subjects_dir=subjects_dir)
for label in labels_rh:
assert_true(label.name.endswith('-rh'))
assert_true(label.hemi == 'rh')
assert_is_not(label.color, None)
# combine the lh, rh, labels and sort them
labels_lhrh = list()
labels_lhrh.extend(labels_lh)
labels_lhrh.extend(labels_rh)
names = [label.name for label in labels_lhrh]
labels_lhrh = [label for (name, label) in sorted(zip(names, labels_lhrh))]
# read all labels at once
labels_both = read_labels_from_annot('sample', subjects_dir=subjects_dir)
# we have the same result
_assert_labels_equal(labels_lhrh, labels_both)
# aparc has 68 cortical labels
assert_true(len(labels_both) == 68)
# test regexp
label = read_labels_from_annot('sample', parc='aparc.a2009s',
regexp='Angu', subjects_dir=subjects_dir)[0]
assert_true(label.name == 'G_pariet_inf-Angular-lh')
# silly, but real regexp:
label = read_labels_from_annot('sample', 'aparc.a2009s',
regexp='.*-.{4,}_.{3,3}-L',
subjects_dir=subjects_dir)[0]
assert_true(label.name == 'G_oc-temp_med-Lingual-lh')
assert_raises(RuntimeError, read_labels_from_annot, 'sample', parc='aparc',
annot_fname=annot_fname, regexp='JackTheRipper',
subjects_dir=subjects_dir)
@testing.requires_testing_data
def test_read_labels_from_annot_annot2labels():
"""Test reading labels from parc. by comparing with mne_annot2labels
"""
label_fnames = glob.glob(label_dir + '/*.label')
label_fnames.sort()
labels_mne = [read_label(fname) for fname in label_fnames]
labels = read_labels_from_annot('sample', subjects_dir=subjects_dir)
# we have the same result, mne does not fill pos, so ignore it
_assert_labels_equal(labels, labels_mne, ignore_pos=True)
@testing.requires_testing_data
def test_write_labels_to_annot():
"""Test writing FreeSurfer parcellation from labels"""
tempdir = _TempDir()
labels = read_labels_from_annot('sample', subjects_dir=subjects_dir)
# create temporary subjects-dir skeleton
surf_dir = op.join(subjects_dir, 'sample', 'surf')
temp_surf_dir = op.join(tempdir, 'sample', 'surf')
os.makedirs(temp_surf_dir)
shutil.copy(op.join(surf_dir, 'lh.white'), temp_surf_dir)
shutil.copy(op.join(surf_dir, 'rh.white'), temp_surf_dir)
os.makedirs(op.join(tempdir, 'sample', 'label'))
# test automatic filenames
dst = op.join(tempdir, 'sample', 'label', '%s.%s.annot')
write_labels_to_annot(labels, 'sample', 'test1', subjects_dir=tempdir)
assert_true(op.exists(dst % ('lh', 'test1')))
assert_true(op.exists(dst % ('rh', 'test1')))
# lh only
for label in labels:
if label.hemi == 'lh':
break
write_labels_to_annot([label], 'sample', 'test2', subjects_dir=tempdir)
assert_true(op.exists(dst % ('lh', 'test2')))
assert_true(op.exists(dst % ('rh', 'test2')))
# rh only
for label in labels:
if label.hemi == 'rh':
break
write_labels_to_annot([label], 'sample', 'test3', subjects_dir=tempdir)
assert_true(op.exists(dst % ('lh', 'test3')))
assert_true(op.exists(dst % ('rh', 'test3')))
# label alone
assert_raises(TypeError, write_labels_to_annot, labels[0], 'sample',
'test4', subjects_dir=tempdir)
# write left and right hemi labels with filenames:
fnames = ['%s/%s-myparc' % (tempdir, hemi) for hemi in ['lh', 'rh']]
for fname in fnames:
write_labels_to_annot(labels, annot_fname=fname)
# read it back
labels2 = read_labels_from_annot('sample', subjects_dir=subjects_dir,
annot_fname=fnames[0])
labels22 = read_labels_from_annot('sample', subjects_dir=subjects_dir,
annot_fname=fnames[1])
labels2.extend(labels22)
names = [label.name for label in labels2]
for label in labels:
idx = names.index(label.name)
assert_labels_equal(label, labels2[idx])
# same with label-internal colors
for fname in fnames:
write_labels_to_annot(labels, annot_fname=fname, overwrite=True)
labels3 = read_labels_from_annot('sample', subjects_dir=subjects_dir,
annot_fname=fnames[0])
labels33 = read_labels_from_annot('sample', subjects_dir=subjects_dir,
annot_fname=fnames[1])
labels3.extend(labels33)
names3 = [label.name for label in labels3]
for label in labels:
idx = names3.index(label.name)
assert_labels_equal(label, labels3[idx])
# make sure we can't overwrite things
assert_raises(ValueError, write_labels_to_annot, labels,
annot_fname=fnames[0])
# however, this works
write_labels_to_annot(labels, annot_fname=fnames[0], overwrite=True)
# label without color
labels_ = labels[:]
labels_[0] = labels_[0].copy()
labels_[0].color = None
write_labels_to_annot(labels_, annot_fname=fnames[0], overwrite=True)
# duplicate color
labels_[0].color = labels_[2].color
assert_raises(ValueError, write_labels_to_annot, labels_,
annot_fname=fnames[0], overwrite=True)
# invalid color inputs
labels_[0].color = (1.1, 1., 1., 1.)
assert_raises(ValueError, write_labels_to_annot, labels_,
annot_fname=fnames[0], overwrite=True)
# overlapping labels
labels_ = labels[:]
cuneus_lh = labels[6]
precuneus_lh = labels[50]
labels_.append(precuneus_lh + cuneus_lh)
assert_raises(ValueError, write_labels_to_annot, labels_,
annot_fname=fnames[0], overwrite=True)
# unlabeled vertices
labels_lh = [label for label in labels if label.name.endswith('lh')]
write_labels_to_annot(labels_lh[1:], 'sample', annot_fname=fnames[0],
overwrite=True, subjects_dir=subjects_dir)
labels_reloaded = read_labels_from_annot('sample', annot_fname=fnames[0],
subjects_dir=subjects_dir)
assert_equal(len(labels_lh), len(labels_reloaded))
label0 = labels_lh[0]
label1 = labels_reloaded[-1]
assert_equal(label1.name, "unknown-lh")
assert_true(np.all(in1d(label0.vertices, label1.vertices)))
@testing.requires_testing_data
def test_split_label():
"""Test splitting labels"""
aparc = read_labels_from_annot('fsaverage', 'aparc', 'lh',
regexp='lingual', subjects_dir=subjects_dir)
lingual = aparc[0]
# split with names
parts = ('lingual_post', 'lingual_ant')
post, ant = split_label(lingual, parts, subjects_dir=subjects_dir)
# check output names
assert_equal(post.name, parts[0])
assert_equal(ant.name, parts[1])
# check vertices add up
lingual_reconst = post + ant
lingual_reconst.name = lingual.name
lingual_reconst.comment = lingual.comment
lingual_reconst.color = lingual.color
assert_labels_equal(lingual_reconst, lingual)
# compare output of Label.split() method
post1, ant1 = lingual.split(parts, subjects_dir=subjects_dir)
assert_labels_equal(post1, post)
assert_labels_equal(ant1, ant)
# compare fs_like split with freesurfer split
antmost = split_label(lingual, 40, None, subjects_dir, True)[-1]
fs_vert = [210, 4401, 7405, 12079, 16276, 18956, 26356, 32713, 32716,
32719, 36047, 36050, 42797, 42798, 42799, 59281, 59282, 59283,
71864, 71865, 71866, 71874, 71883, 79901, 79903, 79910, 103024,
107849, 107850, 122928, 139356, 139357, 139373, 139374, 139375,
139376, 139377, 139378, 139381, 149117, 149118, 149120, 149127]
assert_array_equal(antmost.vertices, fs_vert)
# check default label name
assert_equal(antmost.name, "lingual_div40-lh")
@slow_test
@testing.requires_testing_data
@requires_sklearn
def test_stc_to_label():
"""Test stc_to_label
"""
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter('always')
src = read_source_spaces(fwd_fname)
src_bad = read_source_spaces(src_bad_fname)
stc = read_source_estimate(stc_fname, 'sample')
os.environ['SUBJECTS_DIR'] = op.join(data_path, 'subjects')
labels1 = _stc_to_label(stc, src='sample', smooth=3)
labels2 = _stc_to_label(stc, src=src, smooth=3)
assert_equal(len(labels1), len(labels2))
for l1, l2 in zip(labels1, labels2):
assert_labels_equal(l1, l2, decimal=4)
with warnings.catch_warnings(record=True) as w: # connectedness warning
warnings.simplefilter('always')
labels_lh, labels_rh = stc_to_label(stc, src=src, smooth=True,
connected=True)
assert_true(len(w) > 0)
assert_raises(ValueError, stc_to_label, stc, 'sample', smooth=True,
connected=True)
assert_raises(RuntimeError, stc_to_label, stc, smooth=True, src=src_bad,
connected=True)
assert_equal(len(labels_lh), 1)
assert_equal(len(labels_rh), 1)
# test getting tris
tris = labels_lh[0].get_tris(src[0]['use_tris'], vertices=stc.vertices[0])
assert_raises(ValueError, spatial_tris_connectivity, tris,
remap_vertices=False)
connectivity = spatial_tris_connectivity(tris, remap_vertices=True)
assert_true(connectivity.shape[0] == len(stc.vertices[0]))
# "src" as a subject name
assert_raises(TypeError, stc_to_label, stc, src=1, smooth=False,
connected=False, subjects_dir=subjects_dir)
assert_raises(ValueError, stc_to_label, stc, src=SourceSpaces([src[0]]),
smooth=False, connected=False, subjects_dir=subjects_dir)
assert_raises(ValueError, stc_to_label, stc, src='sample', smooth=False,
connected=True, subjects_dir=subjects_dir)
assert_raises(ValueError, stc_to_label, stc, src='sample', smooth=True,
connected=False, subjects_dir=subjects_dir)
labels_lh, labels_rh = stc_to_label(stc, src='sample', smooth=False,
connected=False,
subjects_dir=subjects_dir)
assert_true(len(labels_lh) > 1)
assert_true(len(labels_rh) > 1)
# with smooth='patch'
with warnings.catch_warnings(record=True) as w: # connectedness warning
warnings.simplefilter('always')
labels_patch = stc_to_label(stc, src=src, smooth=True)
assert_equal(len(w), 1)
assert_equal(len(labels_patch), len(labels1))
for l1, l2 in zip(labels1, labels2):
assert_labels_equal(l1, l2, decimal=4)
@slow_test
@testing.requires_testing_data
def test_morph():
"""Test inter-subject label morphing
"""
label_orig = read_label(real_label_fname)
label_orig.subject = 'sample'
# should work for specifying vertices for both hemis, or just the
# hemi of the given label
vals = list()
for grade in [5, [np.arange(10242), np.arange(10242)], np.arange(10242)]:
label = label_orig.copy()
# this should throw an error because the label has all zero values
assert_raises(ValueError, label.morph, 'sample', 'fsaverage')
label.values.fill(1)
label.morph(None, 'fsaverage', 5, grade, subjects_dir, 1,
copy=False)
label.morph('fsaverage', 'sample', 5, None, subjects_dir, 2,
copy=False)
assert_true(np.mean(in1d(label_orig.vertices, label.vertices)) == 1.0)
assert_true(len(label.vertices) < 3 * len(label_orig.vertices))
vals.append(label.vertices)
assert_array_equal(vals[0], vals[1])
# make sure label smoothing can run
assert_equal(label.subject, 'sample')
verts = [np.arange(10242), np.arange(10242)]
for hemi in ['lh', 'rh']:
label.hemi = hemi
label.morph(None, 'fsaverage', 5, verts, subjects_dir, 2)
assert_raises(TypeError, label.morph, None, 1, 5, verts,
subjects_dir, 2)
assert_raises(TypeError, label.morph, None, 'fsaverage', 5.5, verts,
subjects_dir, 2)
label.smooth(subjects_dir=subjects_dir) # make sure this runs
@testing.requires_testing_data
def test_grow_labels():
"""Test generation of circular source labels"""
seeds = [0, 50000]
# these were chosen manually in mne_analyze
should_be_in = [[49, 227], [51207, 48794]]
hemis = [0, 1]
names = ['aneurism', 'tumor']
labels = grow_labels('sample', seeds, 3, hemis, subjects_dir, names=names)
tgt_names = ['aneurism-lh', 'tumor-rh']
tgt_hemis = ['lh', 'rh']
for label, seed, hemi, sh, name in zip(labels, seeds, tgt_hemis,
should_be_in, tgt_names):
assert_true(np.any(label.vertices == seed))
assert_true(np.all(in1d(sh, label.vertices)))
assert_equal(label.hemi, hemi)
assert_equal(label.name, name)
# grow labels with and without overlap
seeds = [57532, [58887, 6304]]
l01, l02 = grow_labels('fsaverage', seeds, 20, [0, 0], subjects_dir)
seeds = [57532, [58887, 6304]]
l11, l12 = grow_labels('fsaverage', seeds, 20, [0, 0], subjects_dir,
overlap=False)
# test label naming
assert_equal(l01.name, 'Label_0-lh')
assert_equal(l02.name, 'Label_1-lh')
assert_equal(l11.name, 'Label_0-lh')
assert_equal(l12.name, 'Label_1-lh')
# make sure set 1 does not overlap
overlap = np.intersect1d(l11.vertices, l12.vertices, True)
assert_array_equal(overlap, [])
# make sure both sets cover the same vertices
l0 = l01 + l02
l1 = l11 + l12
assert_array_equal(l1.vertices, l0.vertices)
run_tests_if_main()
| bsd-3-clause |
jbloomlab/dms_tools2 | dms_tools2/utils.py | 1 | 59560 | """
===================
utils
===================
Miscellaneous utilities for ``dms_tools2``.
"""
import os
import math
import sys
import time
import platform
import importlib
import logging
import tempfile
import textwrap
import itertools
import collections
import random
import re
import pysam
import numpy
import scipy.misc
import scipy.special
import pandas
import gzip
import dms_tools2
from dms_tools2 import CODONS, CODON_TO_AA, AAS_WITHSTOP, AA_TO_CODONS, NTS
import dms_tools2._cutils
def sessionInfo():
"""Returns string with information about session / packages."""
s = [
'Version information:',
'\tTime and date: {0}'.format(time.asctime()),
'\tPlatform: {0}'.format(platform.platform()),
'\tPython version: {0}'.format(
sys.version.replace('\n', ' ')),
'\tdms_tools2 version: {0}'.format(dms_tools2.__version__),
]
for modname in ['Bio', 'pandas', 'numpy', 'IPython',
'jupyter', 'matplotlib', 'plotnine', 'natsort', 'pystan',
'scipy', 'seaborn', 'phydmslib', 'statsmodels', 'rpy2',
'regex', 'umi_tools']:
try:
v = importlib.import_module(modname).__version__
s.append('\t{0} version: {1}'.format(modname, v))
except AttributeError:
s.append('\t{0} version unknown'.format(modname))
except ImportError:
s.append("\t{0} cannot be imported".format(modname))
return '\n'.join(s)
def initLogger(logfile, prog, args):
"""Initialize output logging for scripts.
Args:
`logfile` (str or `sys.stdout`)
Name of file to which log is written, or
`sys.stdout` if you just want to write information
to standard output.
`prog` (str)
Name of program for which we are logging.
`args` (dict)
Program arguments as arg / value pairs.
Returns:
If `logfile` is a string giving a file name, returns
an opened and initialized `logging.Logger`. If `logfile`
is `sys.stdout`, then writes information to `sys.stdout`.
In either case, basic information is written about the program
and args.
"""
if logfile == sys.stdout:
logfile.write("Beginning execution of {0} in directory {1}\n\n".format(
prog, os.getcwd()))
logfile.write("{0}\n\n".format(sessionInfo()))
logfile.write("Parsed the following arguments:\n\t{0}\n\n".format(
'\n\t'.join(['{0} = {1}'.format(arg, val) for (arg, val)
in args.items()])))
else:
if os.path.isfile(logfile):
os.remove(logfile)
logging.basicConfig(level=logging.INFO,
format='%(asctime)s - %(levelname)s - %(message)s')
logger = logging.getLogger(prog)
logfile_handler = logging.FileHandler(logfile)
logger.addHandler(logfile_handler)
formatter = logging.Formatter(
'%(asctime)s - %(levelname)s - %(message)s')
logfile_handler.setFormatter(formatter)
try:
logger.info("Beginning execution of {0} in directory {1}\n"
.format(prog, os.getcwd()))
logger.info("Progress is being logged to {0}".format(logfile))
logger.info("{0}\n".format(sessionInfo()))
logger.info("Parsed the following arguments:\n\t{0}\n".format(
'\n\t'.join(['{0} = {1}'.format(arg, val) for (arg, val)
in args.items()])))
except:
logger.exception("Error")
raise
return logger
def iteratePairedFASTQ(r1files, r2files, r1trim=None, r2trim=None):
"""Iterates over FASTQ files for single or paired-end sequencing.
Args:
`r1files` (list or str)
Name of R1 FASTQ file or list of such files. Can optionally
be gzipped.
`r2files` (list or str or `None`)
Like `r1files` but for R2 files, or `None` if no R2.
`r1trim` (int or `None`)
If not `None`, trim `r1` and `q1` to be no longer than this.
`r2trim` (int or `None`)
Like `r1trim` but for R2.
Returns:
Each iteration returns `(name, r1, r2, q1, q2, fail)` where:
- `name` is a string giving the read name
- `r1` and `r2` are strings giving the reads; `r2`
is `None` if no R2.
- `q1` and `q2` are strings giving the PHRED Q scores;
`q2` is none if no R2.
- `fail` is `True` if either read failed Illumina chastity
filter, `False` if both passed, `None` if info not present.
We run a simple test by first writing an example FASTQ file and
then testing on it.
>>> n1_1 = '@DH1DQQN1:933:HMLH5BCXY:1:1101:2165:1984 1:N:0:CGATGT'
>>> r1_1 = 'ATGCAATTG'
>>> q1_1 = 'GGGGGIIII'
>>> n2_1 = '@DH1DQQN1:933:HMLH5BCXY:1:1101:2165:1984 2:N:0:CGATGT'
>>> r2_1 = 'CATGCATA'
>>> q2_1 = 'G<GGGIII'
>>> tf = tempfile.NamedTemporaryFile
>>> with tf(mode='w') as r1file, tf(mode='w') as r2file:
... _ = r1file.write('\\n'.join([
... n1_1, r1_1, '+', q1_1,
... n1_1.replace(':N:', ':Y:'), r1_1, '+', q1_1,
... n1_1.split()[0], r1_1, '+', q1_1,
... ]))
... r1file.flush()
... _ = r2file.write('\\n'.join([
... n2_1, r2_1, '+', q2_1,
... n2_1, r2_1, '+', q2_1,
... n2_1, r2_1, '+', q2_1,
... ]))
... r2file.flush()
... itr = iteratePairedFASTQ(r1file.name, r2file.name, r1trim=4, r2trim=5)
... next(itr) == (n1_1.split()[0][1 : ], r1_1[ : 4],
... r2_1[ : 5], q1_1[ : 4], q2_1[ : 5], False)
... next(itr) == (n1_1.split()[0][1 : ], r1_1[ : 4],
... r2_1[ : 5], q1_1[ : 4], q2_1[ : 5], True)
... next(itr) == (n1_1.split()[0][1 : ], r1_1[ : 4],
... r2_1[ : 5], q1_1[ : 4], q2_1[ : 5], None)
True
True
True
Now do the same test but for just R1:
>>> with tf(mode='w') as r1file:
... _ = r1file.write('\\n'.join([
... n1_1, r1_1, '+', q1_1,
... n1_1.replace(':N:', ':Y:'), r1_1, '+', q1_1,
... n1_1.split()[0], r1_1, '+', q1_1,
... ]))
... r1file.flush()
... itr_R1 = iteratePairedFASTQ(r1file.name, None, r1trim=4)
... next(itr_R1) == (n1_1.split()[0][1 : ], r1_1[ : 4],
... None, q1_1[ : 4], None, False)
... next(itr_R1) == (n1_1.split()[0][1 : ], r1_1[ : 4],
... None, q1_1[ : 4], None, True)
... next(itr_R1) == (n1_1.split()[0][1 : ], r1_1[ : 4],
... None, q1_1[ : 4], None, None)
True
True
True
"""
if isinstance(r1files, str):
r1files = [r1files]
if r2files is not None:
r2files = [r2files]
if not all(map(os.path.isfile, r1files)):
raise ValueError('cannot find all `r1files`')
if r2files is None:
r2files = [None] * len(r1files)
elif len(r1files) != len(r2files):
raise ValueError('`r1files` and `r2files` differ in length')
elif not all(map(os.path.isfile, r2files)):
raise ValueError('cannot find all `r2files`')
for (r1file, r2file) in zip(r1files, r2files):
r1reader = pysam.FastxFile(r1file)
if r2file is None:
read_iterator = r1reader
else:
r2reader = pysam.FastxFile(r2file)
read_iterator = zip(r1reader, r2reader)
for tup in read_iterator:
if r2file is None:
a1 = tup
r2 = q2 = None
else:
a1, a2 = tup
r2 = a2.sequence
q2 = a2.quality
if a2.comment is not None:
id2 = f"{a2.name} {a2.comment}".split()
else:
id2 = a2.name.split()
name2 = id2[0]
r1 = a1.sequence
q1 = a1.quality
if a1.comment is not None:
id1 = f"{a1.name} {a1.comment}".split()
else:
id1 = a1.name.split()
name1 = id1[0]
if r2file is not None:
# trims last two chars, need for SRA downloaded files
if name1[-2 : ] == '.1' and name2[-2 : ] == '.2':
name1 = name1[ : -2]
name2 = name2[ : -2]
if name1 != name2:
raise ValueError(f"name mismatch {name1} vs {name2}")
# parse chastity filter assuming CASAVA 1.8 header
try:
f1 = id1[1][2]
if r2file is None:
f2 = 'N'
else:
f2 = id2[1][2]
if f1 == 'N' and f2 == 'N':
fail = False
elif f1 in ['N', 'Y'] and f2 in ['N', 'Y']:
fail = True
except IndexError:
fail = None # header does not specify chastity filter
if r1trim is not None:
r1 = r1[ : r1trim]
q1 = q1[ : r1trim]
if (r2trim is not None) and (r2file is not None):
r2 = r2[ : r2trim]
q2 = q2[ : r2trim]
yield (name1, r1, r2, q1, q2, fail)
def lowQtoN(r, q, minq, use_cutils=True):
"""Replaces low quality nucleotides with ``N`` characters.
Args:
`r` (str)
A string representing a sequencing read.
`q` (str)
String of same length as `r` holding Q scores
in Sanger ASCII encoding.
`minq` (length-one string)
Replace all positions in `r` where `q` is < this.
`use_cutils` (bool)
Use the faster implementation in the `_cutils` module.
Returns:
A version of `r` where all positions `i` where
`q[i] < minq` have been replaced with ``N``.
>>> r = 'ATGCAT'
>>> q = 'GB<.0+'
>>> minq = '0'
>>> lowQtoN(r, q, minq) == 'ATGNAN'
True
"""
if use_cutils:
return dms_tools2._cutils.lowQtoN(r, q, minq)
assert len(r) == len(q)
return ''.join([ri if qi >= minq else 'N'
for (ri, qi) in zip(r, q)])
def buildReadConsensus(reads, minreads, minconcur, use_cutils=True):
"""Builds consensus sequence of some reads.
You may want to pre-fill low-quality sites with ``N``
using `lowQtoN`. An ``N`` is considered a non-called identity.
Args:
`reads` (list)
List of reads as strings. If reads are not all same
length, shorter ones are extended from 3' end with ``N``
to match maximal length.
`minreads` (int)
Only call consensus at a site if at least this many reads
have called identity.
`minconcur` (float)
Only call consensus at site if >= this fraction of called
identities agree.
`use_cutils` (bool)
Use the faster implementation in the `_cutils` module.
Returns:
A string giving the consensus sequence. Non-called
sites are returned as ``N```.
>>> reads = ['ATGCAT',
... 'NTGNANA',
... 'ACGNNTAT',
... 'NTGNTA']
>>> buildReadConsensus(reads, 2, 0.75) == 'ATGNNNAN'
True
>>> reads.append('CTGCATAT')
>>> buildReadConsensus(reads, 2, 0.75) == 'NTGCATAT'
True
"""
if use_cutils:
return dms_tools2._cutils.buildReadConsensus(reads,
minreads, minconcur)
readlens = list(map(len, reads))
maxlen = max(readlens)
consensus = []
for i in range(maxlen):
counts = {}
for (r, lenr) in zip(reads, readlens):
if lenr > i:
x = r[i]
if x != 'N':
if x in counts:
counts[x] += 1
else:
counts[x] = 1
ntot = sum(counts.values())
if ntot < minreads:
consensus.append('N')
else:
(nmax, xmax) = sorted([(n, x) for (x, n) in counts.items()])[-1]
if nmax / float(ntot) >= minconcur:
consensus.append(xmax)
else:
consensus.append('N')
return ''.join(consensus)
def rarefactionCurve(barcodes, *, maxpoints=1e5, logspace=True):
"""Rarefaction curve from list of barcodes.
Uses the analytical formula for the rarefaction curve defined
`on Wikipedia <https://en.wikipedia.org/wiki/Rarefaction_(ecology)#Derivation>`_.
Args:
`barcodes` (list or pandas Series)
Holds the list of unique barcodes for which we calculate
the rarefaction curve. It is expected that some of these
barcodes will be repeated multiple times in the list if
the sampling is approaching saturation.
`maxpoints` (int)
Only calculate values at this many points. The benefit
of this is that it can become very costly to calculate
the curve at every point when there are many points.
`logspace` (True)
Logarithmically space the `maxpoints` points for
the calculation. This will give better results if
we are subsampling and the curve saturates. Only
done if we have to subsample.
Returns:
The 2-tuple `(nreads, nbarcodes)`, where both `nreads` and
`nbarcodes` are lists of the same length, and `nbarcodes[i]`
is the expected number of barcodes observed when there are
`nreads[i]` reads.
Here we take a very small list and show that the results given
by the function are equivalent to those obtained by random
subsampling:
>>> barcodes = ['A', 'A', 'A', 'A', 'G', 'G', 'C', 'T']
>>> (nreads, nbarcodes) = rarefactionCurve(barcodes)
>>> random.seed(1)
>>> nrand = 100000
>>> sim_equal_calc = []
>>> for n in range(1, len(barcodes) + 1):
... nbarcodes_sim = sum([len(set(random.sample(barcodes, n)))
... for _ in range(nrand)]) / nrand
... sim_equal_calc.append(numpy.allclose(nbarcodes_sim,
... nbarcodes[nreads.index(n)], atol=1e-2))
>>> all(sim_equal_calc)
True
"""
N = len(barcodes) # total number of items
Ni = collections.Counter(barcodes)
K = len(Ni)
Mj = collections.Counter(Ni.values())
Nk, num = map(numpy.array, zip(*Mj.items()))
# use simplification that (N - Ni)Cr(n) / (N)Cr(n) =
# [(N - Ni)! * (N - n)!] / [N! * (N - Ni - n)!]
#
# Also use fact that gamma(x + 1) = x!
nbarcodes = []
lnFactorial_N = scipy.special.gammaln(N + 1)
if logspace and N > maxpoints:
nreads = list(numpy.unique(numpy.logspace(
math.log10(1), math.log10(N),
num=int(min(N, maxpoints))).astype('int')))
else:
nreads = list(numpy.unique(numpy.linspace(
1, N, num=min(N, maxpoints)).astype('int')))
for n in nreads:
lnFactorial_N_minus_n = scipy.special.gammaln(N - n + 1)
i = numpy.nonzero(N - Nk - n >= 0) # indices where this is true
nbarcodes.append(
K - (num[i] * numpy.exp(
scipy.special.gammaln(N - Nk[i] + 1) +
lnFactorial_N_minus_n -
lnFactorial_N -
scipy.special.gammaln(N - Nk[i] - n + 1))
).sum()
)
return (nreads, nbarcodes)
def reverseComplement(s, use_cutils=True):
"""Gets reverse complement of DNA sequence `s`.
Args:
`s` (str)
Sequence to reverse complement.
`use_cutils` (bool)
Use the faster implementation in the `_cutils` module.
Returns:
Reverse complement of `s` as a str.
>>> s = 'ATGCAAN'
>>> reverseComplement(s) == 'NTTGCAT'
True
"""
if use_cutils:
return dms_tools2._cutils.reverseComplement(s)
return ''.join(reversed([dms_tools2.NTCOMPLEMENT[nt] for nt in s]))
def alignSubamplicon(refseq, r1, r2, refseqstart, refseqend, maxmuts,
maxN, chartype, use_cutils=True):
"""Try to align subamplicon to reference sequence at defined location.
Tries to align reads `r1` and `r2` to `refseq` at location
specified by `refseqstart` and `refseqend`. Determines how many
sites of type `chartype` have mutations, and if <= `maxmuts` conside
the subamplicon to align if fraction of ambiguous nucleotides <= `maxN`.
In `r1` and `r2`, an ``N`` indicates a non-called ambiguous identity.
If the reads disagree in a region of overlap that is set to ``N`` in
the final subamplicon, but if one read has ``N`` and the other a called
identity, then the called identity is used in the final subamplicon.
Args:
`refseq` (str)
Sequence to which we align. if `chartype` is 'codon',
must be a valid coding (length multiple of 3).
`r1` (str)
The forward sequence to align.
`r2` (str)
The reverse sequence to align. When reverse complemented,
should read backwards in `refseq`.
`refseqstart` (int)
The nucleotide in `refseq` (1, 2, ... numbering) where the
first nucleotide in `r1` aligns.
`refseqend` (int)
The nucleotide in `refseq` (1, 2, ... numbering) where the
first nucleotide in `r2` aligns (note that `r2` then reads
backwards towards the 5' end of `refseq`).
`maxmuts` (int or float)
Maximum number of mutations of character `chartype` that
are allowed in the aligned subamplicons from the two reads.
`maxN` (int or float)
Maximum number of nucleotides for which we allow
ambiguous (``N``) identities in final subamplicon.
`chartype` (str)
Character type for which we count mutations.
Currently, the only allowable value is 'codon'.
`use_cutils` (bool)
Use the faster implementation in the `_cutils` module.
Returns:
If reads align, return aligned subamplicon as string (of length
`refseqend - refseqstart + 1`). Otherwise return `None`.
>>> refseq = 'ATGGGGAAA'
>>> s = alignSubamplicon(refseq, 'GGGGAA', 'TTTCCC', 3, 9, 1, 1, 'codon')
>>> s == 'GGGGAAA'
True
>>> s = alignSubamplicon(refseq, 'GGGGAA', 'TTTCCC', 1, 9, 1, 1, 'codon')
>>> s == False
True
>>> s = alignSubamplicon(refseq, 'GGGGAT', 'TTTCCC', 3, 9, 1, 0, 'codon')
>>> s == False
True
>>> s = alignSubamplicon(refseq, 'GGGGAT', 'TTTCCC', 3, 9, 1, 1, 'codon')
>>> s == 'GGGGANA'
True
>>> s = alignSubamplicon(refseq, 'GGGGAT', 'TATCCC', 3, 9, 1, 0, 'codon')
>>> s == 'GGGGATA'
True
>>> s = alignSubamplicon(refseq, 'GGGGAT', 'TATCCC', 3, 9, 0, 0, 'codon')
>>> s == False
True
>>> s = alignSubamplicon(refseq, 'GGGNAA', 'TTTCCC', 3, 9, 0, 0, 'codon')
>>> s == 'GGGGAAA'
True
>>> s = alignSubamplicon(refseq, 'GGGNAA', 'TTNCCC', 3, 9, 0, 0, 'codon')
>>> s == 'GGGGAAA'
True
>>> s = alignSubamplicon(refseq, 'GTTTAA', 'TTTAAA', 3, 9, 1, 0, 'codon')
>>> s == 'GTTTAAA'
True
>>> s = alignSubamplicon(refseq, 'GGGGTA', 'TTACCC', 3, 9, 1, 0, 'codon')
>>> s == 'GGGGTAA'
True
>>> s = alignSubamplicon(refseq, 'GGGCTA', 'TTAGCC', 3, 9, 1, 0, 'codon')
>>> s == False
True
"""
r2 = reverseComplement(r2)
if use_cutils:
return dms_tools2._cutils.alignSubamplicon(refseq, r1, r2,
refseqstart, refseqend, maxmuts, maxN, chartype)
assert chartype in ['codon'], "Invalid chartype"
if chartype == 'codon':
assert len(refseq) % 3 == 0, "refseq length not divisible by 3"
len_subamplicon = refseqend - refseqstart + 1
len_r1 = len(r1)
len_subamplicon_minus_len_r2 = len_subamplicon - len(r2)
subamplicon = []
for i in range(len_subamplicon):
if i < len_subamplicon_minus_len_r2: # site not in r2
if i < len_r1: # site in r1
subamplicon.append(r1[i])
else: # site not in r1
subamplicon.append('N')
else: # site in r2
if i < len_r1: # site in r1
r1i = r1[i]
r2i = r2[i - len_subamplicon_minus_len_r2]
if r1i == r2i:
subamplicon.append(r1i)
elif r1i == 'N':
subamplicon.append(r2i)
elif r2i == 'N':
subamplicon.append(r1i)
else:
subamplicon.append('N')
else: # site not in r1
subamplicon.append(r2[i - len_subamplicon_minus_len_r2])
subamplicon = ''.join(subamplicon)
if subamplicon.count('N') > maxN:
return False
if chartype == 'codon':
if refseqstart % 3 == 1:
startcodon = (refseqstart + 2) // 3
codonshift = 0
elif refseqstart % 3 == 2:
startcodon = (refseqstart + 1) // 3 + 1
codonshift = 2
elif refseqstart % 3 == 0:
startcodon = refseqstart // 3 + 1
codonshift = 1
nmuts = 0
for icodon in range(startcodon, refseqend // 3 + 1):
mutcodon = subamplicon[3 * (icodon - startcodon) + codonshift :
3 * (icodon - startcodon) + 3 + codonshift]
if ('N' not in mutcodon) and (mutcodon !=
refseq[3 * icodon - 3 : 3 * icodon]):
nmuts += 1
if nmuts > maxmuts:
return False
else:
raise ValueError("Invalid chartype")
return subamplicon
def incrementCounts(refseqstart, subamplicon, chartype, counts):
"""Increment counts dict based on an aligned subamplicon.
This is designed for keeping track of counts of different
mutations / identities when aligning many subamplicons to
a sequence.
Any positions where `subamplicon` has an ``N`` are ignored,
and not added to `counts`.
Args:
`refseqstart` (int)
First nucleotide position in 1, 2, ... numbering
where `subamplicon` aligns.
`subamplicon` (str)
The subamplicon.
`chartype` (str)
Character type for which we are counting mutations.
Currently, only allowable value is 'codon'.
`counts` (dict)
Stores counts of identities, and is incremented by
this function. Is a dict keyed by every possible
character (e.g., codon), with values lists with
element `i` holding the counts for position `i`
in 0, 1, ... numbering.
Returns:
On completion, `counts` has been incremented.
>>> codonlen = 10
>>> counts = dict([(codon, [0] * codonlen) for codon
... in CODONS])
>>> subamplicon1 = 'ATGGACTTTC'
>>> incrementCounts(1, subamplicon1, 'codon', counts)
>>> subamplicon2 = 'GGTCTTTCCCGGN'
>>> incrementCounts(3, subamplicon2, 'codon', counts)
>>> counts['ATG'][0] == 1
True
>>> counts['GAC'][1] == 1
True
>>> counts['GTC'][1] == 1
True
>>> counts['TTT'][2] == 2
True
>>> counts['CCC'][3] == 1
True
>>> sum([sum(c) for c in counts.values()]) == 6
True
"""
if chartype == 'codon':
if refseqstart % 3 == 1:
startcodon = (refseqstart + 2) // 3 - 1
codonshift = 0
elif refseqstart % 3 == 2:
startcodon = (refseqstart + 1) // 3
codonshift = 2
elif refseqstart % 3 == 0:
startcodon = refseqstart // 3
codonshift = 1
else:
raise ValueError("Invalid chartype")
shiftedsubamplicon = subamplicon[codonshift : ]
for i in range(len(shiftedsubamplicon) // 3):
codon = shiftedsubamplicon[3 * i : 3 * i + 3]
if 'N' not in codon:
counts[codon][startcodon + i] += 1
def codonToAACounts(counts):
"""Makes amino-acid counts `pandas.DataFrame` from codon counts.
Args:
`counts` (`pandas.DataFrame`)
Columns are the string `site` `wildtype` and all codons
in `CODONS`. Additional columns are allowed
but ignored.
Returns:
`aacounts` (`pandas.DataFrame`)
Columns are the string `site` and all amino acids
in `AAS_WITHSTOP` with counts for each
amino acid made by summing counts for encoding codons.
>>> d = {'site':[1, 2], 'othercol':[0, 0], 'ATG':[105, 1],
... 'GGG':[3, 117], 'GGA':[2, 20], 'TGA':[0, 1],
... 'wildtype':['ATG', 'GGG']}
>>> for codon in CODONS:
... if codon not in d:
... d[codon] = [0, 0]
>>> counts = pandas.DataFrame(d)
>>> aacounts = codonToAACounts(counts)
>>> 'othercol' in aacounts.columns
False
>>> all(aacounts['site'] == [1, 2])
True
>>> all(aacounts['wildtype'] == ['M', 'G'])
True
>>> all(aacounts['M'] == [105, 1])
True
>>> all(aacounts['G'] == [5, 137])
True
>>> all(aacounts['*'] == [0, 1])
True
>>> all(aacounts['V'] == [0, 0])
True
"""
d = dict([(key, []) for key in ['site', 'wildtype'] +
AAS_WITHSTOP])
for (i, row) in counts.iterrows():
d['site'].append(row['site'])
d['wildtype'].append(CODON_TO_AA[row['wildtype']])
for aa in AAS_WITHSTOP:
d[aa].append(0)
for c in CODONS:
d[CODON_TO_AA[c]][-1] += (row[c])
return pandas.DataFrame(d)
def annotateCodonCounts(counts):
"""Gets annotated `pandas.DataFrame` from codon counts.
Some of the programs (e.g., `dms2_bcsubamplicons`) create
``*_codoncounts.csv`` files when run with ``--chartype codon``.
These CSV files have columns indicating the `site` and `wildtype`
codon, as well as a column for each codon giving the counts for that
codon. This function reads that file (or a `pandas.DataFrame` read
from it) to return a `pandas.DataFrame` where a variety of additional
useful annotations have been added.
Args:
`counts` (str)
Name of existing codon counts CSV file, or `pandas.DataFrame`
holding counts.
Returns:
`df` (`pandas.DataFrame`)
The DataFrame with the information in `counts` plus
the following added columns for each site:
`ncounts` : number of counts at site
`mutfreq` : mutation frequency at site
`nstop` : number of stop-codon mutations
`nsyn` : number of synonymous mutations
`nnonsyn` : number of nonsynonymous mutations
`n1nt` : number of 1-nucleotide codon mutations
`n2nt` : number of 2-nucleotide codon mutations
`n3nt` : number of 3-nucleotide codon mutations
`AtoC`, `AtoG`, etc : number of each nucleotide mutation
type among codon mutations with **one** nucleotide change.
`mutfreq1nt`, `mutfreq2nt`, `mutfreq3nt` : frequency
of 1-, 2-, and 3-nucleotide codon mutations at site.
>>> d = {'site':[1, 2], 'wildtype':['ATG', 'GGG'], 'ATG':[105, 1],
... 'GGG':[3, 117], 'GGA':[2, 20], 'TGA':[0, 1]}
>>> for codon in CODONS:
... if codon not in d:
... d[codon] = [0, 0]
>>> counts = pandas.DataFrame(d)
>>> with tempfile.NamedTemporaryFile(mode='w') as f:
... counts.to_csv(f, index=False)
... f.flush()
... df = annotateCodonCounts(f.name)
>>> all([all(df[col] == counts[col]) for col in counts.columns])
True
>>> all(df['ncounts'] == [110, 139])
True
>>> all(df['mutfreq'] == [5 / 110., 22 / 139.])
True
>>> all(df['nstop'] == [0, 1])
True
>>> all(df['nsyn'] == [0, 20])
True
>>> all(df['nnonsyn'] == [5, 1])
True
>>> all(df['n1nt'] == [0, 20])
True
>>> all(df['n2nt'] == [3, 2])
True
>>> all(df['n3nt'] == [2, 0])
True
>>> all(df['GtoA'] == [0, 20])
True
>>> all(df['AtoC'] == [0, 0])
True
>>> all(df['mutfreq1nt'] == [0, 20 / 139.])
True
>>> all(df['mutfreq3nt'] == [2 / 110., 0])
True
"""
if isinstance(counts, str):
df = pandas.read_csv(counts)
elif isinstance(counts, pandas.DataFrame):
df = counts.copy()
else:
raise ValueError("invalid counts")
assert set(CODONS) <= set(df.columns), \
"Did not find counts for all codons".format(counts)
df['ncounts'] = df[CODONS].sum(axis=1)
df['mutfreq'] = (((df['ncounts'] - df.lookup(df['wildtype'].index,
df['wildtype'].values)) / df['ncounts'].astype('float'))
.fillna(0))
ntchanges = ['{0}to{1}'.format(nt1, nt2) for nt1 in dms_tools2.NTS
for nt2 in dms_tools2.NTS if nt1 != nt2]
nstoplist = []
nsynlist = []
nnonsynlist = []
nXntlists = dict([(n + 1, []) for n in range(3)])
nntchangeslists = dict([(ntchange, []) for ntchange in ntchanges])
for (i, row) in df.iterrows():
nstop = nsyn = nnonsyn = 0
nXnt = dict([(n + 1, 0) for n in range(3)])
nntchanges = dict([(ntchange, 0) for ntchange in ntchanges])
wt = row['wildtype']
wtaa = CODON_TO_AA[wt]
for c in CODONS:
if c == wt:
continue
aa = CODON_TO_AA[c]
if aa == '*':
nstop += row[c]
elif aa == wtaa:
nsyn += row[c]
else:
nnonsyn += row[c]
ntdiffs = ['{0}to{1}'.format(nt1, nt2) for (nt1, nt2)
in zip(wt, c) if nt1 != nt2]
nXnt[len(ntdiffs)] += row[c]
if len(ntdiffs) == 1:
nntchanges[ntdiffs[0]] += row[c]
nstoplist.append(nstop)
nsynlist.append(nsyn)
nnonsynlist.append(nnonsyn)
for n in range(3):
nXntlists[n + 1].append(nXnt[n + 1])
for ntchange in ntchanges:
nntchangeslists[ntchange].append(nntchanges[ntchange])
df = df.assign(nstop=nstoplist, nsyn=nsynlist, nnonsyn=nnonsynlist)
df = df.assign(n1nt=nXntlists[1], n2nt=nXntlists[2], n3nt=nXntlists[3])
for ntchange in ntchanges:
df[ntchange] = nntchangeslists[ntchange]
for nnt in range(3):
df['mutfreq{0}nt'.format(nnt + 1)] = (df['n{0}nt'.format(nnt + 1)]
/ df['ncounts'].astype('float')).fillna(0)
return df
def adjustErrorCounts(errcounts, counts, charlist, maxexcess):
"""Adjust error counts to not greatly exceed counts of interest.
This function is useful when estimating preferences. Under the
model, the error-control should not have a higher rate of error
than the actual sample. However, this could happen if the experimental
data don't fully meet the assumptions. So this function scales
down the error counts in that case.
Args:
`errcounts` (pandas.DataFrame)
Holds counts for error control.
`counts` (pandas.DataFrame)
Holds counts for which we are correcting errors.
`charlist` (list)
Characters for which we have counts.
`maxexcess` (int)
Only let error-control counts exceed actual by this much.
Returns:
A copy of `errcounts` except for any non-wildtype character,
the maximum frequency of that character is adjusted to be
at most the number predicted by the frequency in `counts`
plus `maxexcess`.
>>> counts = pandas.DataFrame({'site':[1], 'wildtype':['A'],
... 'A':500, 'C':10, 'G':40, 'T':20})
>>> errcounts = pandas.DataFrame({'site':[1], 'wildtype':['A'],
... 'A':250, 'C':1, 'G':30, 'T':10})
>>> charlist = ['A', 'C', 'G', 'T']
>>> errcounts = errcounts[['site', 'wildtype'] + charlist]
>>> adj_errcounts = adjustErrorCounts(errcounts, counts, charlist, 1)
>>> set(adj_errcounts.columns) == set(errcounts.columns)
True
>>> all(adj_errcounts['site'] == errcounts['site'])
True
>>> all(adj_errcounts['wildtype'] == errcounts['wildtype'])
True
>>> (adj_errcounts[adj_errcounts['site'] == 1][charlist].values[0]
... == numpy.array([250, 1, 21, 10])).all()
True
"""
cols = counts.columns
counts = counts.sort_values('site')
errcounts = errcounts.sort_values('site')
assert all(counts['site'] == errcounts['site'])
assert all(counts['wildtype'] == errcounts['wildtype'])
counts['total'] = counts[charlist].sum(axis=1).astype('float')
errcounts['total'] = errcounts[charlist].sum(axis=1)
maxallowed = (counts[charlist].div(counts['total'], axis=0).multiply(
errcounts['total'], axis=0) + maxexcess).round().astype('int')
adj_errcounts = errcounts[charlist].where(errcounts[charlist] < maxallowed,
maxallowed[charlist])
for c in charlist:
adj_errcounts[c] = adj_errcounts[c].where(counts['wildtype'] != c,
errcounts[c])
for col in cols:
if col not in charlist:
adj_errcounts[col] = counts[col]
return adj_errcounts[cols]
def convertCountsFormat(oldfile, newfile, charlist):
"""Convert counts file from ``dms_tools`` to ``dms_tools2`` format.
Args:
`oldfile` (str)
Name of counts file in the old ``dms_tools`` format:
http://jbloomlab.github.io/dms_tools/fileformats.html
`newfile` (str)
Name of created counts file in the ``dms_tools2`` format:
https://jbloomlab.github.io/dms_tools2/dms2_bcsubamp.html
`charlist` (list)
List of characters that we expect in the counts files.
For instance, could be `CODONS`.
"""
with open(oldfile) as f:
header = f.readline()
assert header[0] == '#'
cols = header[1 : ].split()
assert cols[0] == 'POSITION' and cols[1] == 'WT'
cols = ['site', 'wildtype'] + cols[2 : ]
assert set(charlist) == set(cols[2 : ])
old = pandas.read_csv(oldfile, delim_whitespace=True,
names=cols, comment='#')
old.to_csv(newfile, index=False)
def renumberSites(renumbfile, infiles, missing='error',
outfiles=None, outprefix=None, outdir=None):
"""Renumber sites in CSV files.
Switch numbering scheme in files with a column named `site`.
You must specify **exactly one** of `outfiles`,
`outprefix`, and `outdir` as something other than `None`.
Args:
`renumbfile` (str)
Name of existing CSV file with the re-numbering scheme.
Should have columns with name `original` and `new`.
Each entry in `original` should refer to a site in
the input files, and each entry in `new` should be
the new number for this site. If an entry in `new`
is `None` or `nan` then it is dropped from the newly
numbered files regardless of `missing`.
`infiles` (list)
List of existing CSV files that we are re-numbering.
Each file must have an entry of `site`.
`missing` (str)
How to handle sites in `infiles` but not `renumbfile`.
- `error`: raise an error
- `skip`: skip renumbering, leave with original number
- `drop`: drop any sites not in `renumbfile`
`outfiles` (list)
List of output files of the same length as `infiles`.
The numbered version of `infiles` is named as the
corresponding entry in `outfiles`.
`outdir` (str)
A directory name. The renumbered files have the same
names as in `infile`, but are now placed in `outdir`.
`outprefix` (str)
The renumbered files have the same names and locations
as `infiles`, but have the pre-pended filename extension
`outprefix`.
"""
assert os.path.isfile(renumbfile), "no renumbfile {0}".format(renumbfile)
renumb = pandas.read_csv(renumbfile)
assert {'original', 'new'} <= set(renumb.columns), \
"renumbfile lacks columns `original` and/or `new`"
for col in ['original', 'new']:
assert len(renumb[col]) == len(set(renumb[col])), \
"duplicate sites for {0} in {1}".format(col, renumbfile)
renumb[col] = renumb[col].astype('str')
assert isinstance(infiles, list), "infiles is not a list"
nin = len(infiles)
infiles = [os.path.abspath(f) for f in infiles]
assert len(set(infiles)) == nin, "duplicate files in `infiles`"
if outfiles is not None:
assert isinstance(outfiles, list), "`outfiles` not list"
assert (outdir is None) and (outprefix is None), \
"only specify one of `outfiles`, `outdir`, and `outprefix`"
nout = len(outfiles)
assert nout == nin, "`outfiles` and `infiles` different length"
elif outdir is not None:
assert isinstance(outdir, str), "`outdir` should be string"
assert (outfiles is None) and (outprefix is None), \
"only specify one of `outfiles`, `outdir`, and `outprefix`"
if not os.path.isdir(outdir):
os.mkdir(outdir)
outfiles = [os.path.join(outdir, os.path.basename(f))
for f in infiles]
elif outprefix is not None:
assert isinstance(outprefix, str), "`outdir` should be string"
assert (outfiles is None) and (outdir is None), \
"only specify one of `outfiles`, `outdir`, and `outprefix`"
outfiles = [os.path.join(os.path.dirname(f), outprefix +
os.path.basename(f)) for f in infiles]
else:
raise ValueError("specify `outdir`, `outprefix`, `outfiles`")
outfiles = [os.path.abspath(f) for f in outfiles]
assert len(set(outfiles)) == len(outfiles), "duplicate files in `outfiles`"
assert not set(outfiles).intersection(set(infiles)), \
"some in and outfiles the same"
for (fin, fout) in zip(infiles, outfiles):
df_in = pandas.read_csv(fin)
assert 'site' in df_in.columns, "no `site` column in {0}".format(fin)
df_in['site'] = df_in['site'].astype('str')
if missing == 'error':
if set(df_in['site']) > set(renumb['original']):
raise ValueError("`missing` is `error`, excess sites in {0}"
.format(fin))
elif missing == 'skip':
pass
elif missing == 'drop':
df_in = df_in[df_in['site'].isin(renumb['original'])]
else:
raise ValueError("invalid `missing` of {0}".format(missing))
# can't just use replace below because of this bug:
# https://github.com/pandas-dev/pandas/issues/16051
unmappedsites = df_in[~df_in['site'].isin(renumb['original'])]['site']
replacemap = dict(zip(
renumb['original'].append(unmappedsites),
renumb['new'].append(unmappedsites)))
df_in['site'] = df_in['site'].map(replacemap)
df_in = (df_in[df_in['site'].notnull()]
.query('site != "NaN"')
.query('site != "nan"')
.query('site != "None"')
)
df_in.to_csv(fout, index=False)
def codonEvolAccessibility(seqs):
"""Accessibility of amino acids by nucleotide mutations.
Args:
`seqs` (str or list)
A single coding sequence or a list of such sequences.
Returns:
A pandas DataFrame listing all sites in the sequence(s)
numbered 1, 2, ..., with columns giving the accessibility
of each amino acid by single nucleotide mutations.
The accessibility of codon :math:`c` to amino-acid :math:`a`
by single-nucleotide mutations is defined as the minimum
number of nucleotide mutations needed to generate that
amino-acid.
For a collection of sequences, we calculate the
accessibility as the weighted average of the accessibilities
of all codons observed at that site in the collection of
sequences.
As an example, compute accessibility for one sequence:
>>> s = "ATGGGA"
>>> acc = codonEvolAccessibility(s)
The returned pandas DataFrame `acc` is has a column named
`site` plus columns for all amino acids:
>>> all(acc.columns == ['site'] + AAS_WITHSTOP)
True
We look at entries for a few amino acids. At the first
site, the wildtype entry in the sequence `s` is the codon
for *M* (``ATG``). So at this site, the distance to *M*
is 0. The distance to *I* (which has codon ``ATA`` as a
codon) is 1, and the distance to *W* (which has only ``TGG``
as a codon) is 2.
>>> acc[['site', 'G', 'I', 'M', 'W']]
site G I M W
0 1 2.0 1.0 0.0 2.0
1 2 0.0 2.0 3.0 2.0
If we pass the function a list of multiple sequences,
then the accessibilities are averaged over the sequences:
>>> acc2 = codonEvolAccessibility(['ATGGGA', 'ATAGGA'])
>>> acc2[['site', 'G', 'I', 'M', 'W']]
site G I M W
0 1 2.0 0.5 0.5 2.5
1 2 0.0 2.0 3.0 2.0
"""
# get number of nucleotide diffs between all pairs of codons
nt_diffs = dict([
((c1, c2), sum(1 for x1, x2 in zip(c1, c2) if x1 != x2))
for c1, c2 in itertools.product(CODONS, repeat=2)])
# get number of nucleotide diffs to nearest codon for amino acid
aa_nt_diffs = {}
for c in CODONS:
for aa, othercs in AA_TO_CODONS.items():
aa_nt_diffs[(c, aa)] = min([nt_diffs[(c, c2)]
for c2 in othercs])
# make sure seqs are of same valid length
if isinstance(seqs, str):
seqs = [seqs]
assert len(seqs[0]) % 3 == 0, "seqs not of length divisible by 3"
assert all([len(seqs[0]) == len(s) for s in seqs[1 : ]]), \
"seqs not all of same length"
# get nucleotide distances, summing for all sequences
dists = collections.defaultdict(lambda: collections.defaultdict(float))
for s in seqs:
for r in range(len(s) // 3):
c = s[3 * r : 3 * r + 3]
assert c in CODONS, "invalid codon {0}".format(c)
for aa in AAS_WITHSTOP:
dists[r + 1][aa] += aa_nt_diffs[(c, aa)]
return (pandas.DataFrame.from_dict(dists, orient='index')
.rename_axis('site')
[AAS_WITHSTOP]
/ len(seqs)).reset_index()
def sigFigStr(x, nsig):
"""Get str of `x` with `nsig` significant figures.
>>> sigFigStr(11190, 2)
'11000'
>>> sigFigStr(117, 2)
'120'
>>> sigFigStr(6, 2)
'6.0'
>>> sigFigStr(0.213, 2)
'0.21'
>>> sigFigStr(0.007517, 3)
'0.00752'
"""
if x <= 0:
raise ValueError('currently only handles numbers > 0')
x = float(f"{{:.{nsig}g}}".format(x))
if x >= 10**(nsig - 1):
return '{:d}'.format(round(x))
else:
predecimal = math.floor(math.log10(x)) + 1
postdecimal = nsig - predecimal
assert postdecimal > 0, str(x)
return f"{{:.{postdecimal}f}}".format(x)
def getSubstitutions(wildtype, mutant, amino_acid=False):
"""Get space delimited string of substitutions
Args:
`wildtype` (str):
The wildtype sequence
`mutant` (str):
The mutant sequence
`amino_acid` (bool)
Specify whether the sequence is amino acid.
Default is False
Returns:
A space delimited string of substitutions present in the
mutant sequence
>>> getSubstitutions('AGT', 'TGT')
'A1T'
>>> getSubstitutions('AAGTAACGA', 'ATCTAACGA')
'A2T G3C'
>>> getSubstitutions('TYARV', 'GYAGV', amino_acid=True)
'T1G R4G'
"""
if len(wildtype) != len(mutant):
raise ValueError('wildtype and mutant must be same length')
subs = []
for site in range(len(wildtype)):
wt = wildtype[site]
mut = mutant[site]
if amino_acid:
if wt not in AAS_WITHSTOP:
raise ValueError (f"Invalid wt residue {wt} at site {site+1}")
if mut not in AAS_WITHSTOP:
raise ValueError (f"Invalid mutant residue {mut} at site {site+1}")
else:
if wt not in NTS:
raise ValueError (f"Invalid wt nucleotide {wt} at site {site+1}")
if mut not in NTS:
raise ValueError (f"Invalid mutant nucleotide {mut} at site {site+1}")
if wt!=mut:
pos = str(site + 1)
subs.append(f"{wt}{pos}{mut}")
subs = ' '.join(subs)
return subs
def codon_to_nt_counts(codoncounts):
"""Convert codon counts file to nucleotide counts.
Args:
`codoncounts` (str or pandas.DataFrame)
Codon counts in format produced by ``dms2_bcsubamp``,
either as CSV file or data frame holding CSV.
Returns:
pandas.DataFrame with nucleotide counts.
Example:
>>> with tempfile.NamedTemporaryFile('w') as f:
... _ = f.write(textwrap.dedent('''
... site,wildtype,AAA,AAC,AAG,AAT,ACA,ACC,ACG,ACT,AGA,AGC,AGG,AGT,ATA,ATC,ATG,ATT,CAA,CAC,CAG,CAT,CCA,CCC,CCG,CCT,CGA,CGC,CGG,CGT,CTA,CTC,CTG,CTT,GAA,GAC,GAG,GAT,GCA,GCC,GCG,GCT,GGA,GGC,GGG,GGT,GTA,GTC,GTG,GTT,TAA,TAC,TAG,TAT,TCA,TCC,TCG,TCT,TGA,TGC,TGG,TGT,TTA,TTC,TTG,TTT
... 1,ATG,0,0,0,0,0,0,2,0,0,0,0,0,8,0,333985,14,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,6,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0
... 2,AAG,16,20,333132,41,13,12,27,14,8,6,67,8,9,13,29,9,10,11,12,8,10,15,15,11,6,9,3,7,8,10,17,4,3,7,49,7,9,14,9,4,10,7,7,7,9,11,11,5,14,14,11,6,13,16,15,14,9,9,15,8,9,11,8,15
... 3,GCA,2,3,8,3,34,11,7,6,7,6,9,8,4,3,5,0,6,14,10,12,6,8,7,10,5,11,7,6,6,1,3,12,19,6,11,9,333250,10,6,9,15,3,5,5,37,9,9,7,8,4,8,3,23,5,7,8,6,11,7,10,7,9,3,6
... '''.strip()))
... f.flush()
... nt_counts = codon_to_nt_counts(f.name)
>>> nt_counts
site wildtype A C G T
0 1 A 334009 0 6 0
1 2 T 0 2 0 334013
2 3 G 8 0 333993 14
3 4 A 333424 156 169 187
4 5 A 333361 211 186 178
5 6 G 156 185 333427 168
6 7 G 116 124 333410 125
7 8 C 126 333407 121 121
8 9 A 333435 114 112 114
"""
if not isinstance(codoncounts, pandas.DataFrame):
codoncounts = pandas.read_csv(codoncounts)
if codoncounts['site'].dtype != int:
raise ValueError('`site` column in `codoncounts` must be integer')
nt_counts = []
for i_nt in [0, 1, 2]:
nt_counts.append(
codoncounts
.melt(id_vars=['site', 'wildtype'],
var_name='codon',
value_name='count',
)
.assign(
site=lambda x: 3 * (x['site'] - 1) + i_nt + 1,
wildtype=lambda x: x['wildtype'].str[i_nt],
nucleotide=lambda x: x['codon'].str[i_nt],
)
.groupby(['site', 'wildtype', 'nucleotide'])
.aggregate({'count': 'sum'})
.reset_index()
.pivot_table(values='count',
columns='nucleotide',
index=['site', 'wildtype'])
.reset_index()
)
nt_counts = (pandas.concat(nt_counts)
.sort_values('site')
.reset_index(drop=True)
)
del nt_counts.columns.name
return nt_counts
def barcodeInfoToCodonVariantTable(samples, geneseq, path=None):
"""Convert barcode info files into a CodonVariantTable
Convert barcode info files output from `dms2_bcsubamp` into a
`CodonVariantTable`. Barcode info files contain reads and barcodes from
barcoded subamplicon sequencing, described
`here <https://jbloomlab.github.io/dms_tools2/bcsubamp.html>`_.
This function takes consensus reads retained by `dms2_bcsubamp`,
gives each unique sequence a numerical barcode (since the barcodes from
`dms2_bcsubamp` could come from the same variant), and counts the number
of retained consensus reads corresponding to each sequence. Then, a
`CodonVariantTable` is made using the sequences and their numerical
barcodes, and counts are added based on the number of retained consensus
reads of those sequences. Therefore, the `CodonVariantTable` will only
contain one 'variant' for each unique sequence with the total count for all
the unbarcoded variants in the experiment which had the same sequence.
Args:
`samples` (dict):
Dictionary with libraries as keys and lists of info file prefixes
(file names without the '_bcinfo.txt.gz') for files corresponding
to those libraries as values.
Example: {'library-1':['condition-1-library-1'],
'library-2':['condition-1-library-2']}
`geneseq` (str):
The wildtype gene sequence
`path` (str)
Directory in which barcode info files are located
Returns:
A `dms_variants.codonvarianttable.CodonVariantTable` with 'counts'
generated from the barcode info files
"""
# Set up re matchers for looking at lines
matcher = re.compile(r'(?P<linetype>^.*\:) '
r'(?P<contents>.*$)')
alt_matcher = re.compile(r'(?P<linetype>^R\d READS:$)')
read_matcher = re.compile(r'(?P<read>^[ATGCN\s]*$)')
# Create a dictionary to contain dictionaries of each library's barcodes
libraries = {}
# Initialize lists for making the codonvarianttable
barcodes = []
subs = []
variant_call_support = []
library_list = []
# For each library, go through each sample file and collect data
for library in samples.keys():
# Initialize dictionary to contain this library's reads and barcodes
barcode_dictionary = {}
# Start a barcode count for this library
cur_barcode = 1
# For each barcode info file corresponding to a sample in this library
for sample in samples[library]:
# Set initial conditions
take_next = False
description_skipped = False
# Find the file
f = f"{sample}_bcinfo.txt.gz"
if path:
file_path = os.path.join(os.path.abspath(path), f)
else:
file_path = f
# Open the file and loop through it to find retained consensus
# reads and give them each a new barcode
with gzip.open(file_path, 'r') as f:
# Make sure the first line looks like it is supposed to
firstline = f.readline()
firstline = firstline.decode()
first_match = matcher.match(firstline)
if first_match.group('linetype') != 'BARCODE:':
raise ValueError(f"Unexpected first line {firstline}: may be "
"unexpected file type")
else:
previous_line = first_match
# Go through the lines, making they are in the expected order
for line in f:
line = line.decode()
line_match = matcher.match(line)
if not line_match:
line_match = alt_matcher.match(line)
if not line_match:
read_match = read_matcher.match(line)
if not read_match:
raise ValueError(f"Unable to recognize line {line}")
else:
line_is_read = True
previous_linetype = previous_line.group('linetype')
if previous_linetype != 'R1 READS:' and \
previous_linetype != 'R2 READS:':
raise ValueError(f"Unexpected line {line}")
else:
line_is_read = False
if previous_line.group('linetype') == 'BARCODE:':
if line_match.group('linetype') != 'RETAINED:':
raise ValueError(f"Unexpected line {line}")
# Decide whether to retain the next consensus or not
else:
if line_match.group('contents') == 'False':
retain = False
elif line_match.group('contents') == 'True':
retain = True
else:
raise ValueError(f"Unexpected line {line}")
elif previous_line.group('linetype') == 'RETAINED:':
if line_match.group('linetype') != 'DESCRIPTION:':
raise ValueError(f"Unexpected line {line}")
elif previous_line.group('linetype') == 'DESCRIPTION:':
if line_match.group('linetype') != 'CONSENSUS:':
raise ValueError(f"Unexpected line {line}")
# Make sure we know whether to retain or not
elif not isinstance(retain, bool):
raise ValueError(
f"Unclear whether to retain {line_match.group('contents')}"
)
elif retain:
read = line_match.group('contents')
# Add the read to the dictionary if not in it
# Also give it a barcode
if 'N' not in read:
if read not in barcode_dictionary:
# Create the sequence in the dictionary
barcode_dictionary[read] = {}
# Give it an initial count of 1 for this sample
barcode_dictionary[read][sample] = 1
# Give it the next barcode
barcode_dictionary[read]['barcode'] = cur_barcode
# Save values for making CodonVariantTable
barcodes.append(cur_barcode)
subs.append(getSubstitutions(geneseq, read))
variant_call_support.append(1)
library_list.append(library)
# Advance current barcode
cur_barcode += 1
else:
# Add a counter for the sample if sequence
# not seen for this sample yet
if sample not in barcode_dictionary[read]:
barcode_dictionary[read][sample] = 1
else:
# Add another count to this read for
# this sample
barcode_dictionary[read][sample] += 1
# Set retain to None
retain = None
elif previous_line.group('linetype') == 'CONSENSUS:':
if line_match.group('linetype') != 'R1 READS:':
raise ValueError(f"Unexpected line {line}")
elif previous_line.group('linetype') == 'R1 READS:':
if not line_is_read:
if line_match.group('linetype') != 'R2 READS:':
raise ValueError(f"Unexpected line {line}")
elif previous_line.group('linetype') == 'R2 READS:':
if not line_is_read:
if line_match.group('linetype') != 'BARCODE:':
raise ValueError(f"Unexpected line {line}")
# Save this line as the previous line if it is not a read
if not line_is_read:
previous_line = line_match
# After going through each file for a library, save its dictionary with
# reads and barcodes
libraries[library] = barcode_dictionary
# Make the dataframe for creating the codonvarianttable
df = {'barcode':barcodes,
'substitutions':subs,
'library':library_list,
'variant_call_support':variant_call_support,
}
df = pandas.DataFrame(df)
# Make the codonvarianttable
with tempfile.NamedTemporaryFile(mode='w') as f:
df.to_csv(f, index=False)
f.flush()
variants = dms_variants.codonvarianttable.CodonVariantTable(
barcode_variant_file=f.name,
geneseq=geneseq)
# Make the counts dataframe:
# Initialize list of dataframes
dfs = []
# Loop through each library and produce count dataframes for each sample
for library in libraries:
barcode_dictionary = libraries[library]
for sample in samples[library]:
barcodes_list = []
counts_list = []
sample_list = []
library_list = []
# Get counts for this sample
for sequence in barcode_dictionary.keys():
if sample not in barcode_dictionary[sequence].keys():
counts_list.append(0)
else:
counts_list.append(barcode_dictionary[sequence][sample])
barcodes_list.append(barcode_dictionary[sequence]['barcode'])
sample_list.append(sample)
library_list.append(library)
# Make a dataframe for this sample
data = {'barcode':barcodes_list,
'count':counts_list,
'sample':sample_list,
'library':library_list,
}
data = pandas.DataFrame(data)
# Append it to the list of dataframes
dfs.append(data)
# Concatenate the list of dataframes into a counts dataframe
barcode_counts = pandas.concat(dfs)
# Add the counts for each sample to the codonvarianttable
for library in libraries:
for sample in samples[library]:
icounts = barcode_counts.query('library == @library & sample == @sample')
icounts = icounts[['barcode', 'count']]
variants.addSampleCounts(library, sample, icounts)
return(variants)
if __name__ == '__main__':
import doctest
doctest.testmod()
| gpl-3.0 |
danche354/Sequence-Labeling | ner_BIOES/senna-hash-2-chunk-gazetteer-128-64-rmsprop5.py | 1 | 7858 | from keras.models import Model
from keras.layers import Input, Masking, Dense, LSTM
from keras.layers import Dropout, TimeDistributed, Bidirectional, merge
from keras.layers.embeddings import Embedding
from keras.utils import np_utils
from keras.optimizers import RMSprop
import numpy as np
import pandas as pd
import sys
import math
import os
from datetime import datetime
# add path
sys.path.append('../')
sys.path.append('../tools')
from tools import conf
from tools import load_data
from tools import prepare
from tools import plot
np.random.seed(0)
# train hyperparameters
step_length = conf.ner_step_length
pos_length = conf.ner_pos_length
chunk_length = conf.ner_chunk_length
gazetteer_length = conf.gazetteer_length
emb_vocab = conf.senna_vocab
emb_length = conf.senna_length
hash_vocab = conf.ner_hash_vocab
hash_length = conf.ner_hash_length
output_length = conf.ner_BIOES_length
batch_size = conf.batch_size
nb_epoch = 70 #conf.nb_epoch
model_name = os.path.basename(__file__)[:-3]
folder_path = 'model/%s'%model_name
if not os.path.isdir(folder_path):
os.makedirs(folder_path)
# the data, shuffled and split between train and test sets
train_data = load_data.load_ner(dataset='eng.train', form='BIOES')
dev_data = load_data.load_ner(dataset='eng.testa', form='BIOES')
train_samples = len(train_data)
dev_samples = len(dev_data)
print('train shape:', train_samples)
print('dev shape:', dev_samples)
print()
word_embedding = pd.read_csv('../preprocessing/senna/embeddings.txt', delimiter=' ', header=None)
word_embedding = word_embedding.values
word_embedding = np.concatenate([np.zeros((1,emb_length)),word_embedding, np.random.uniform(-1,1,(1,emb_length))])
hash_embedding = pd.read_csv('../preprocessing/ner-auto-encoder-2/auto-encoder-embeddings.txt', delimiter=' ', header=None)
hash_embedding = hash_embedding.values
hash_embedding = np.concatenate([np.zeros((1,hash_length)),hash_embedding, np.random.rand(1,hash_length)])
embed_index_input = Input(shape=(step_length,))
embedding = Embedding(emb_vocab+2, emb_length, weights=[word_embedding], mask_zero=True, input_length=step_length)(embed_index_input)
hash_index_input = Input(shape=(step_length,))
encoder_embedding = Embedding(hash_vocab+2, hash_length, weights=[hash_embedding], mask_zero=True, input_length=step_length)(hash_index_input)
# pos_input = Input(shape=(step_length, pos_length))
chunk_input = Input(shape=(step_length, chunk_length))
gazetteer_input = Input(shape=(step_length, gazetteer_length))
senna_hash_pos_chunk_gazetteer_merge = merge([embedding, encoder_embedding, chunk_input, gazetteer_input], mode='concat')
input_mask = Masking(mask_value=0)(senna_hash_pos_chunk_gazetteer_merge)
dp_1 = Dropout(0.5)(input_mask)
hidden_1 = Bidirectional(LSTM(128, return_sequences=True))(dp_1)
hidden_2 = Bidirectional(LSTM(64, return_sequences=True))(hidden_1)
dp_2 = Dropout(0.5)(hidden_2)
output = TimeDistributed(Dense(output_length, activation='softmax'))(dp_2)
model = Model(input=[embed_index_input,hash_index_input,chunk_input, gazetteer_input], output=output)
rmsprop = RMSprop(lr=0.0005)
model.compile(loss='categorical_crossentropy',
optimizer=rmsprop,
metrics=['accuracy'])
print(model.summary())
number_of_train_batches = int(math.ceil(float(train_samples)/batch_size))
number_of_dev_batches = int(math.ceil(float(dev_samples)/batch_size))
print('start train %s ...\n'%model_name)
best_accuracy = 0
best_epoch = 0
all_train_loss = []
all_dev_loss = []
all_dev_accuracy = []
log = open('%s/model_log.txt'%folder_path, 'w')
start_time = datetime.now()
print('train start at %s\n'%str(start_time))
log.write('train start at %s\n\n'%str(start_time))
for epoch in range(nb_epoch):
start = datetime.now()
print('-'*60)
print('epoch %d start at %s'%(epoch, str(start)))
log.write('-'*60+'\n')
log.write('epoch %d start at %s\n'%(epoch, str(start)))
train_loss = 0
dev_loss = 0
np.random.shuffle(train_data)
for i in range(number_of_train_batches):
train_batch = train_data[i*batch_size: (i+1)*batch_size]
embed_index, hash_index, pos, chunk, label, length, sentence = prepare.prepare_ner(batch=train_batch, form='BIOES', gram='bi')
# pos = np.array([(np.concatenate([np_utils.to_categorical(p, pos_length), np.zeros((step_length-length[l], pos_length))])) for l,p in enumerate(pos)])
chunk = np.array([(np.concatenate([np_utils.to_categorical(c, chunk_length), np.zeros((step_length-length[l], chunk_length))])) for l,c in enumerate(chunk)])
gazetteer, length_2 = prepare.prepare_gazetteer(batch=train_batch)
gazetteer = np.array([(np.concatenate([a, np.zeros((step_length-length_2[l], gazetteer_length))])) for l,a in enumerate(gazetteer)])
y = np.array([np_utils.to_categorical(each, output_length) for each in label])
train_metrics = model.train_on_batch([embed_index, hash_index, chunk, gazetteer], y)
train_loss += train_metrics[0]
all_train_loss.append(train_loss)
correct_predict = 0
all_predict = 0
for j in range(number_of_dev_batches):
dev_batch = dev_data[j*batch_size: (j+1)*batch_size]
embed_index, hash_index, pos, chunk, label, length, sentence = prepare.prepare_ner(batch=dev_batch, form='BIOES', gram='bi')
# pos = np.array([(np.concatenate([np_utils.to_categorical(p, pos_length), np.zeros((step_length-length[l], pos_length))])) for l,p in enumerate(pos)])
chunk = np.array([(np.concatenate([np_utils.to_categorical(c, chunk_length), np.zeros((step_length-length[l], chunk_length))])) for l,c in enumerate(chunk)])
gazetteer, length_2 = prepare.prepare_gazetteer(batch=dev_batch)
gazetteer = np.array([(np.concatenate([a, np.zeros((step_length-length_2[l], gazetteer_length))])) for l,a in enumerate(gazetteer)])
y = np.array([np_utils.to_categorical(each, output_length) for each in label])
# for loss
dev_metrics = model.test_on_batch([embed_index, hash_index, chunk, gazetteer], y)
dev_loss += dev_metrics[0]
# for accuracy
prob = model.predict_on_batch([embed_index, hash_index, chunk, gazetteer])
for i, l in enumerate(length):
predict_label = np_utils.categorical_probas_to_classes(prob[i])
correct_predict += np.sum(predict_label[:l]==label[i][:l])
all_predict += np.sum(length)
epcoh_accuracy = float(correct_predict)/all_predict
all_dev_accuracy.append(epcoh_accuracy)
all_dev_loss.append(dev_loss)
if epcoh_accuracy>=best_accuracy:
best_accuracy = epcoh_accuracy
best_epoch = epoch
end = datetime.now()
model.save('%s/model_epoch_%d.h5'%(folder_path, epoch), overwrite=True)
print('epoch %d end at %s'%(epoch, str(end)))
print('epoch %d train loss: %f'%(epoch, train_loss))
print('epoch %d dev loss: %f'%(epoch, dev_loss))
print('epoch %d dev accuracy: %f'%(epoch, epcoh_accuracy))
print('best epoch now: %d\n'%best_epoch)
log.write('epoch %d end at %s\n'%(epoch, str(end)))
log.write('epoch %d train loss: %f\n'%(epoch, train_loss))
log.write('epoch %d dev loss: %f\n'%(epoch, dev_loss))
log.write('epoch %d dev accuracy: %f\n'%(epoch, epcoh_accuracy))
log.write('best epoch now: %d\n\n'%best_epoch)
end_time = datetime.now()
print('train end at %s\n'%str(end_time))
log.write('train end at %s\n\n'%str(end_time))
timedelta = end_time - start_time
print('train cost time: %s\n'%str(timedelta))
print('best epoch last: %d\n'%best_epoch)
log.write('train cost time: %s\n\n'%str(timedelta))
log.write('best epoch last: %d\n\n'%best_epoch)
plot.plot_loss(all_train_loss, all_dev_loss, folder_path=folder_path, title='%s'%model_name)
plot.plot_accuracy(all_dev_accuracy, folder_path=folder_path, title='%s'%model_name)
| mit |
mugizico/scikit-learn | examples/model_selection/plot_underfitting_overfitting.py | 230 | 2649 | """
============================
Underfitting vs. Overfitting
============================
This example demonstrates the problems of underfitting and overfitting and
how we can use linear regression with polynomial features to approximate
nonlinear functions. The plot shows the function that we want to approximate,
which is a part of the cosine function. In addition, the samples from the
real function and the approximations of different models are displayed. The
models have polynomial features of different degrees. We can see that a
linear function (polynomial with degree 1) is not sufficient to fit the
training samples. This is called **underfitting**. A polynomial of degree 4
approximates the true function almost perfectly. However, for higher degrees
the model will **overfit** the training data, i.e. it learns the noise of the
training data.
We evaluate quantitatively **overfitting** / **underfitting** by using
cross-validation. We calculate the mean squared error (MSE) on the validation
set, the higher, the less likely the model generalizes correctly from the
training data.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from sklearn.pipeline import Pipeline
from sklearn.preprocessing import PolynomialFeatures
from sklearn.linear_model import LinearRegression
from sklearn import cross_validation
np.random.seed(0)
n_samples = 30
degrees = [1, 4, 15]
true_fun = lambda X: np.cos(1.5 * np.pi * X)
X = np.sort(np.random.rand(n_samples))
y = true_fun(X) + np.random.randn(n_samples) * 0.1
plt.figure(figsize=(14, 5))
for i in range(len(degrees)):
ax = plt.subplot(1, len(degrees), i + 1)
plt.setp(ax, xticks=(), yticks=())
polynomial_features = PolynomialFeatures(degree=degrees[i],
include_bias=False)
linear_regression = LinearRegression()
pipeline = Pipeline([("polynomial_features", polynomial_features),
("linear_regression", linear_regression)])
pipeline.fit(X[:, np.newaxis], y)
# Evaluate the models using crossvalidation
scores = cross_validation.cross_val_score(pipeline,
X[:, np.newaxis], y, scoring="mean_squared_error", cv=10)
X_test = np.linspace(0, 1, 100)
plt.plot(X_test, pipeline.predict(X_test[:, np.newaxis]), label="Model")
plt.plot(X_test, true_fun(X_test), label="True function")
plt.scatter(X, y, label="Samples")
plt.xlabel("x")
plt.ylabel("y")
plt.xlim((0, 1))
plt.ylim((-2, 2))
plt.legend(loc="best")
plt.title("Degree {}\nMSE = {:.2e}(+/- {:.2e})".format(
degrees[i], -scores.mean(), scores.std()))
plt.show()
| bsd-3-clause |
chaluemwut/fbserver | venv/lib/python2.7/site-packages/sklearn/learning_curve.py | 2 | 13315 | """Utilities to evaluate models with respect to a variable
"""
# Author: Alexander Fabisch <[email protected]>
#
# License: BSD 3 clause
import warnings
import numpy as np
from .base import is_classifier, clone
from .cross_validation import _check_cv
from .externals.joblib import Parallel, delayed
from .cross_validation import _safe_split, _score, _fit_and_score
from .metrics.scorer import check_scoring
from .utils import check_arrays
from .utils.fixes import astype
def learning_curve(estimator, X, y, train_sizes=np.linspace(0.1, 1.0, 5),
cv=None, scoring=None, exploit_incremental_learning=False,
n_jobs=1, pre_dispatch="all", verbose=0):
"""Learning curve.
Determines cross-validated training and test scores for different training
set sizes.
A cross-validation generator splits the whole dataset k times in training
and test data. Subsets of the training set with varying sizes will be used
to train the estimator and a score for each training subset size and the
test set will be computed. Afterwards, the scores will be averaged over
all k runs for each training subset size.
Parameters
----------
estimator : object type that implements the "fit" and "predict" methods
An object of that type which is cloned for each validation.
X : array-like, shape (n_samples, n_features)
Training vector, where n_samples is the number of samples and
n_features is the number of features.
y : array-like, shape (n_samples) or (n_samples, n_features), optional
Target relative to X for classification or regression;
None for unsupervised learning.
train_sizes : array-like, shape (n_ticks,), dtype float or int
Relative or absolute numbers of training examples that will be used to
generate the learning curve. If the dtype is float, it is regarded as a
fraction of the maximum size of the training set (that is determined
by the selected validation method), i.e. it has to be within (0, 1].
Otherwise it is interpreted as absolute sizes of the training sets.
Note that for classification the number of samples usually have to
be big enough to contain at least one sample from each class.
(default: np.linspace(0.1, 1.0, 5))
cv : integer, cross-validation generator, optional
If an integer is passed, it is the number of folds (defaults to 3).
Specific cross-validation objects can be passed, see
sklearn.cross_validation module for the list of possible objects
scoring : string, callable or None, optional, default: None
A string (see model evaluation documentation) or
a scorer callable object / function with signature
``scorer(estimator, X, y)``.
exploit_incremental_learning : boolean, optional, default: False
If the estimator supports incremental learning, this will be
used to speed up fitting for different training set sizes.
n_jobs : integer, optional
Number of jobs to run in parallel (default 1).
pre_dispatch : integer or string, optional
Number of predispatched jobs for parallel execution (default is
all). The option can reduce the allocated memory. The string can
be an expression like '2*n_jobs'.
verbose : integer, optional
Controls the verbosity: the higher, the more messages.
Returns
-------
train_sizes_abs : array, shape = (n_unique_ticks,), dtype int
Numbers of training examples that has been used to generate the
learning curve. Note that the number of ticks might be less
than n_ticks because duplicate entries will be removed.
train_scores : array, shape (n_ticks, n_cv_folds)
Scores on training sets.
test_scores : array, shape (n_ticks, n_cv_folds)
Scores on test set.
Notes
-----
See :ref:`examples/plot_learning_curve.py <example_plot_learning_curve.py>`
"""
if exploit_incremental_learning and not hasattr(estimator, "partial_fit"):
raise ValueError("An estimator must support the partial_fit interface "
"to exploit incremental learning")
X, y = check_arrays(X, y, sparse_format='csr', allow_lists=True)
# Make a list since we will be iterating multiple times over the folds
cv = list(_check_cv(cv, X, y, classifier=is_classifier(estimator)))
scorer = check_scoring(estimator, scoring=scoring)
# HACK as long as boolean indices are allowed in cv generators
if cv[0][0].dtype == bool:
new_cv = []
for i in range(len(cv)):
new_cv.append((np.nonzero(cv[i][0])[0], np.nonzero(cv[i][1])[0]))
cv = new_cv
n_max_training_samples = len(cv[0][0])
# Because the lengths of folds can be significantly different, it is
# not guaranteed that we use all of the available training data when we
# use the first 'n_max_training_samples' samples.
train_sizes_abs = _translate_train_sizes(train_sizes,
n_max_training_samples)
n_unique_ticks = train_sizes_abs.shape[0]
if verbose > 0:
print("[learning_curve] Training set sizes: " + str(train_sizes_abs))
parallel = Parallel(n_jobs=n_jobs, pre_dispatch=pre_dispatch,
verbose=verbose)
if exploit_incremental_learning:
classes = np.unique(y) if is_classifier(estimator) else None
out = parallel(delayed(_incremental_fit_estimator)(
clone(estimator), X, y, classes, train, test, train_sizes_abs,
scorer, verbose) for train, test in cv)
else:
out = parallel(delayed(_fit_and_score)(
clone(estimator), X, y, scorer, train[:n_train_samples], test,
verbose, parameters=None, fit_params=None, return_train_score=True)
for train, test in cv for n_train_samples in train_sizes_abs)
out = np.array(out)[:, :2]
n_cv_folds = out.shape[0] // n_unique_ticks
out = out.reshape(n_cv_folds, n_unique_ticks, 2)
out = np.asarray(out).transpose((2, 1, 0))
return train_sizes_abs, out[0], out[1]
def _translate_train_sizes(train_sizes, n_max_training_samples):
"""Determine absolute sizes of training subsets and validate 'train_sizes'.
Examples:
_translate_train_sizes([0.5, 1.0], 10) -> [5, 10]
_translate_train_sizes([5, 10], 10) -> [5, 10]
Parameters
----------
train_sizes : array-like, shape (n_ticks,), dtype float or int
Numbers of training examples that will be used to generate the
learning curve. If the dtype is float, it is regarded as a
fraction of 'n_max_training_samples', i.e. it has to be within (0, 1].
n_max_training_samples : int
Maximum number of training samples (upper bound of 'train_sizes').
Returns
-------
train_sizes_abs : array, shape (n_unique_ticks,), dtype int
Numbers of training examples that will be used to generate the
learning curve. Note that the number of ticks might be less
than n_ticks because duplicate entries will be removed.
"""
train_sizes_abs = np.asarray(train_sizes)
n_ticks = train_sizes_abs.shape[0]
n_min_required_samples = np.min(train_sizes_abs)
n_max_required_samples = np.max(train_sizes_abs)
if np.issubdtype(train_sizes_abs.dtype, np.float):
if n_min_required_samples <= 0.0 or n_max_required_samples > 1.0:
raise ValueError("train_sizes has been interpreted as fractions "
"of the maximum number of training samples and "
"must be within (0, 1], but is within [%f, %f]."
% (n_min_required_samples,
n_max_required_samples))
train_sizes_abs = astype(train_sizes_abs * n_max_training_samples,
dtype=np.int, copy=False)
train_sizes_abs = np.clip(train_sizes_abs, 1,
n_max_training_samples)
else:
if (n_min_required_samples <= 0 or
n_max_required_samples > n_max_training_samples):
raise ValueError("train_sizes has been interpreted as absolute "
"numbers of training samples and must be within "
"(0, %d], but is within [%d, %d]."
% (n_max_training_samples,
n_min_required_samples,
n_max_required_samples))
train_sizes_abs = np.unique(train_sizes_abs)
if n_ticks > train_sizes_abs.shape[0]:
warnings.warn("Removed duplicate entries from 'train_sizes'. Number "
"of ticks will be less than than the size of "
"'train_sizes' %d instead of %d)."
% (train_sizes_abs.shape[0], n_ticks), RuntimeWarning)
return train_sizes_abs
def _incremental_fit_estimator(estimator, X, y, classes, train, test,
train_sizes, scorer, verbose):
"""Train estimator on training subsets incrementally and compute scores."""
train_scores, test_scores = [], []
partitions = zip(train_sizes, np.split(train, train_sizes)[:-1])
for n_train_samples, partial_train in partitions:
train_subset = train[:n_train_samples]
X_train, y_train = _safe_split(estimator, X, y, train_subset)
X_partial_train, y_partial_train = _safe_split(estimator, X, y,
partial_train)
X_test, y_test = _safe_split(estimator, X, y, test, train_subset)
if y_partial_train is None:
estimator.partial_fit(X_partial_train, classes=classes)
else:
estimator.partial_fit(X_partial_train, y_partial_train,
classes=classes)
train_scores.append(_score(estimator, X_train, y_train, scorer))
test_scores.append(_score(estimator, X_test, y_test, scorer))
return np.array((train_scores, test_scores)).T
def validation_curve(estimator, X, y, param_name, param_range, cv=None,
scoring=None, n_jobs=1, pre_dispatch="all", verbose=0):
"""Validation curve.
Determine training and test scores for varying parameter values.
Compute scores for an estimator with different values of a specified
parameter. This is similar to grid search with one parameter. However, this
will also compute training scores and is merely a utility for plotting the
results.
Parameters
----------
estimator : object type that implements the "fit" and "predict" methods
An object of that type which is cloned for each validation.
X : array-like, shape (n_samples, n_features)
Training vector, where n_samples is the number of samples and
n_features is the number of features.
y : array-like, shape (n_samples) or (n_samples, n_features), optional
Target relative to X for classification or regression;
None for unsupervised learning.
param_name : string
Name of the parameter that will be varied.
param_range : array-like, shape (n_values,)
The values of the parameter that will be evaluated.
cv : integer, cross-validation generator, optional
If an integer is passed, it is the number of folds (defaults to 3).
Specific cross-validation objects can be passed, see
sklearn.cross_validation module for the list of possible objects
scoring : string, callable or None, optional, default: None
A string (see model evaluation documentation) or
a scorer callable object / function with signature
``scorer(estimator, X, y)``.
n_jobs : integer, optional
Number of jobs to run in parallel (default 1).
pre_dispatch : integer or string, optional
Number of predispatched jobs for parallel execution (default is
all). The option can reduce the allocated memory. The string can
be an expression like '2*n_jobs'.
verbose : integer, optional
Controls the verbosity: the higher, the more messages.
Returns
-------
train_scores : array, shape (n_ticks, n_cv_folds)
Scores on training sets.
test_scores : array, shape (n_ticks, n_cv_folds)
Scores on test set.
Notes
-----
See
:ref:`examples/plot_validation_curve.py <example_plot_validation_curve.py>`
"""
X, y = check_arrays(X, y, sparse_format='csr', allow_lists=True)
cv = _check_cv(cv, X, y, classifier=is_classifier(estimator))
scorer = check_scoring(estimator, scoring=scoring)
parallel = Parallel(n_jobs=n_jobs, pre_dispatch=pre_dispatch,
verbose=verbose)
out = parallel(delayed(_fit_and_score)(
estimator, X, y, scorer, train, test, verbose,
parameters={param_name: v}, fit_params=None, return_train_score=True)
for train, test in cv for v in param_range)
out = np.asarray(out)[:, :2]
n_params = len(param_range)
n_cv_folds = out.shape[0] // n_params
out = out.reshape(n_cv_folds, n_params, 2).transpose((2, 1, 0))
return out[0], out[1]
| apache-2.0 |
cgre-aachen/gempy | gempy/plot/visualization_2d.py | 1 | 35966 | """
This file is part of gempy.
gempy is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
gempy is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with gempy. If not, see <http://www.gnu.org/licenses/>.
Module with classes and methods to visualized structural geology data and potential fields of the regional modelling based on
the potential field method.
Created on 23/09/2019
@author: Miguel de la Varga, Elisa Heim
"""
import warnings
import os
import copy
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.colors as mcolors
from matplotlib.ticker import FixedFormatter, FixedLocator
import matplotlib.gridspec as gridspect
import matplotlib as mpl
import scipy.spatial.distance as dd
import seaborn as sns
sns.set_context('talk')
plt.style.use(['seaborn-white', 'seaborn-talk'])
warnings.filterwarnings("ignore", message="No contour levels were found")
class Plot2D:
"""
Class with functionality to plot 2D gempy sections
Args:
model: gempy.Model object
cmap: Color map to pass to matplotlib
"""
def __init__(self, model, cmap=None, norm=None, **kwargs):
self.model = model
self._color_lot = dict(zip(self.model._surfaces.df['surface'],
self.model._surfaces.df['color']))
self.axes = list()
if cmap is None:
self.cmap = mcolors.ListedColormap(list(self.model._surfaces.df['color']))
self._custom_colormap = False
else:
self.cmap = cmap
self._custom_colormap = True
if norm is None:
self.norm = mcolors.Normalize(vmin=0.5, vmax=len(self.cmap.colors) + 0.5)
else:
self.norm = norm
def update_colot_lot(self, color_dir=None):
if color_dir is None:
color_dir = dict(zip(self.model._surfaces.df['surface'], self.model._surfaces.df['color']))
self._color_lot = color_dir
if self._custom_colormap is False:
self.cmap = mcolors.ListedColormap(list(self.model._surfaces.df['color']))
self.norm = mcolors.Normalize(vmin=0.5, vmax=len(self.cmap.colors) + 0.5)
@staticmethod
def remove(ax):
while len(ax.collections) != 0:
list(map(lambda x: x.remove(), ax.collections))
def _make_section_xylabels(self, section_name, n=5):
"""
@elisa heim
Setting the axis labels to any combination of vertical crossections
Args:
section_name: name of a defined gempy crossection. See gempy.Model().grid.section
n:
Returns:
"""
if n > 5:
n = 3 # todo I don't know why but sometimes it wants to make a lot of xticks
elif n < 0:
n = 3
j = np.where(self.model._grid.sections.names == section_name)[0][0]
startend = list(self.model._grid.sections.section_dict.values())[j]
p1, p2 = startend[0], startend[1]
xy = self.model._grid.sections.calculate_line_coordinates_2points(p1, p2, n)
if len(np.unique(xy[:, 0])) == 1:
labels = xy[:, 1].astype(int)
axname = 'Y'
elif len(np.unique(xy[:, 1])) == 1:
labels = xy[:, 0].astype(int)
axname = 'X'
else:
labels = [str(xy[:, 0].astype(int)[i]) + ',\n' + str(xy[:, 1].astype(int)[i]) for i in
range(xy[:, 0].shape[0])]
axname = 'X,Y'
return labels, axname
def _slice(self, direction, cell_number=25):
"""
Slice the 3D array (blocks or scalar field) in the specific direction selected in the plot functions
"""
_a, _b, _c = (slice(0, self.model._grid.regular_grid.resolution[0]),
slice(0, self.model._grid.regular_grid.resolution[1]),
slice(0, self.model._grid.regular_grid.resolution[2]))
if direction == "x":
cell_number = int(self.model._grid.regular_grid.resolution[0] / 2) if cell_number == 'mid' else cell_number
_a, x, y, Gx, Gy = cell_number, "Y", "Z", "G_y", "G_z"
extent_val = self.model._grid.regular_grid.extent[[2, 3, 4, 5]]
elif direction == "y":
cell_number = int(self.model._grid.regular_grid.resolution[1] / 2) if cell_number == 'mid' else cell_number
_b, x, y, Gx, Gy = cell_number, "X", "Z", "G_x", "G_z"
extent_val = self.model._grid.regular_grid.extent[[0, 1, 4, 5]]
elif direction == "z":
cell_number = int(self.model._grid.regular_grid.resolution[2] / 2) if cell_number == 'mid' else cell_number
_c, x, y, Gx, Gy = cell_number, "X", "Y", "G_x", "G_y"
extent_val = self.model._grid.regular_grid.extent[[0, 1, 2, 3]]
else:
raise AttributeError(str(direction) + "must be a cartesian direction, i.e. xyz")
return _a, _b, _c, extent_val, x, y, Gx, Gy
def create_figure(self, figsize=None, textsize=None, **kwargs):
"""
Create the figure.
Args:
figsize:
textsize:
Returns:
figure, list axes, subgrid values
"""
cols = kwargs.get('cols', 1)
rows = kwargs.get('rows', 1)
figsize, self.ax_labelsize, _, self.xt_labelsize, self.linewidth, _ = _scale_fig_size(
figsize, textsize, rows, cols)
self.fig = plt.figure( figsize=figsize, constrained_layout=False)
self.fig.is_legend = False
# TODO make grid variable
# self.gs_0 = gridspect.GridSpec(2, 2, figure=self.fig, hspace=.9)
return self.fig, self.axes # , self.gs_0
def add_section(self, section_name=None, cell_number=None, direction='y', ax=None, ax_pos=111,
ve=1., **kwargs):
extent_val = kwargs.get('extent', None)
self.update_colot_lot()
if ax is None:
ax = self.fig.add_subplot(ax_pos)
if section_name is not None:
if section_name == 'topography':
ax.set_title('Geological map')
ax.set_xlabel('X')
ax.set_ylabel('Y')
extent_val = self.model._grid.topography.extent
else:
dist = self.model._grid.sections.df.loc[section_name, 'dist']
extent_val = [0, dist,
self.model._grid.regular_grid.extent[4], self.model._grid.regular_grid.extent[5]]
labels, axname = self._make_section_xylabels(section_name, len(ax.get_xticklabels()) - 2)
pos_list = np.linspace(0, dist, len(labels))
ax.xaxis.set_major_locator(FixedLocator(nbins=len(labels), locs=pos_list))
ax.xaxis.set_major_formatter(FixedFormatter((labels)))
ax.set(title=section_name, xlabel=axname, ylabel='Z')
elif cell_number is not None:
_a, _b, _c, extent_val, x, y = self._slice(direction, cell_number)[:-2]
ax.set_xlabel(x)
ax.set_ylabel(y)
ax.set(title='Cell Number: ' + str(cell_number) + ' Direction: ' + str(direction))
if extent_val is not None:
if extent_val[3] < extent_val[2]: # correct vertical orientation of plot
ax.invert_yaxis()
self._aspect = (extent_val[3] - extent_val[2]) / (extent_val[1] - extent_val[0]) / ve
ax.set_xlim(extent_val[0], extent_val[1])
ax.set_ylim(extent_val[2], extent_val[3])
ax.set_aspect('equal')
# Adding some properties to the axes to make easier to plot
ax.section_name = section_name
ax.cell_number = cell_number
ax.direction = direction
ax.tick_params(axis='x', labelrotation=30)
self.axes = np.append(self.axes, ax)
self.fig.tight_layout()
return ax
@staticmethod
def _check_default_section(ax, section_name, cell_number, direction):
if section_name is None:
try:
section_name = ax.section_name
except AttributeError:
pass
if cell_number is None:
try:
cell_number = ax.cell_number
direction = ax.direction
except AttributeError:
pass
return section_name, cell_number, direction
def plot_regular_grid(self, ax, section_name=None, cell_number=None, direction='y',
block: np.ndarray = None, resolution=None, **kwargs):
"""Generic function to plot all regular data
Args:
block:
section_name:
cell_number:
direction:
ax:
**kwargs: imshow kwargs
Returns:
"""
self.update_colot_lot()
extent_val = [*ax.get_xlim(), *ax.get_ylim()]
if 'cmap' in kwargs:
cmap = kwargs['cmap']
else:
cmap = self.cmap
if 'norm' in kwargs:
norm = kwargs['norm']
else:
norm = self.norm
section_name, cell_number, direction = self._check_default_section(ax, section_name, cell_number, direction)
if section_name is not None:
if section_name == 'topography':
try:
image = self.model.solutions.geological_map[0].reshape(
self.model._grid.topography.values_2d[:, :, 2].shape)
except AttributeError:
raise AttributeError('Geological map not computed. Activate the topography grid.')
else:
assert type(section_name) == str or type(
section_name) == np.str_, 'section name must be a string of the name of the section'
assert self.model.solutions.sections is not None, 'no sections for plotting defined'
l0, l1 = self.model._grid.sections.get_section_args(section_name)
shape = self.model._grid.sections.df.loc[section_name, 'resolution']
image = self.model.solutions.sections[0][0][l0:l1].reshape(shape[0], shape[1]).T
elif cell_number is not None or block is not None:
_a, _b, _c, _, x, y = self._slice(direction, cell_number)[:-2]
if resolution is None:
resolution = self.model._grid.regular_grid.resolution
plot_block = block.reshape(self.model._grid.regular_grid.resolution)
image = plot_block[_a, _b, _c].T
else:
raise AttributeError
ax.imshow(image, origin='lower', zorder=-100,
cmap=cmap, norm=norm, extent=extent_val)
return ax
def plot_lith(self, ax, section_name=None, cell_number=None, direction='y', **kwargs):
block = self.model.solutions.lith_block
self.plot_regular_grid(ax, section_name, cell_number, direction, block=block)
def plot_values(self, ax, series_n=0, section_name=None, cell_number=None,
direction='y', **kwargs):
block = self.model.solutions.values_matrix[series_n]
self.plot_regular_grid(ax, section_name, cell_number, direction, block=block,
**kwargs)
def plot_block(self, ax, series_n=0, section_name=None, cell_number=None, direction='y',
**kwargs):
block = self.model.solutions.block_matrix[series_n]
self.plot_regular_grid(ax, section_name, cell_number, direction, block=block)
def plot_scalar_field(self, ax, section_name=None, cell_number=None, series_n=0, direction='y',
block=None, **kwargs):
"""
Plot the scalar field of a section.
Args:
ax:
section_name:
cell_number:
series_n:
direction:
block:
**kwargs:
Returns:
"""
extent_val = [*ax.get_xlim(), *ax.get_ylim()]
section_name, cell_number, direction = self._check_default_section(ax, section_name, cell_number, direction)
if section_name is not None:
if section_name == 'topography':
try:
image = self.model.solutions.geological_map[1][series_n].reshape(
self.model._grid.topography.values_3D[:, :, 2].shape)
except AttributeError:
raise AttributeError('Geological map not computed. Activate the topography grid.')
else:
l0, l1 = self.model._grid.sections.get_section_args(section_name)
shape = self.model._grid.sections.df.loc[section_name, 'resolution']
image = self.model.solutions.sections[1][series_n][l0:l1].reshape(shape).T
elif cell_number is not None or block is not None:
_a, _b, _c, _, x, y = self._slice(direction, cell_number)[:-2]
if block is None:
_block = self.model.solutions.scalar_field_matrix[series_n]
else:
_block = block
plot_block = _block.reshape(self.model._grid.regular_grid.resolution)
image = plot_block[_a, _b, _c].T
else:
raise AttributeError
ax.contour(image, cmap='autumn', extent=extent_val, zorder=8, **kwargs)
if 'N' in kwargs:
kwargs.pop('N')
ax.contourf(image, cmap='autumn', extent=extent_val, zorder=7, alpha=.8,
**kwargs)
def plot_data(self, ax, section_name=None, cell_number=None, direction='y',
legend=True,
projection_distance=None, **kwargs):
"""
Plot data--i.e. surface_points and orientations--of a section.
Args:
ax:
section_name:
cell_number:
direction:
legend: bool or 'force'
projection_distance:
**kwargs:
Returns:
"""
if projection_distance is None:
projection_distance = 0.2 * self.model._rescaling.df['rescaling factor'].values[0]
self.update_colot_lot()
points = self.model._surface_points.df.copy()
orientations = self.model._orientations.df.copy()
section_name, cell_number, direction = self._check_default_section(ax, section_name, cell_number, direction)
if section_name is not None:
if section_name == 'topography':
topo_comp = kwargs.get('topo_comp', 5000)
decimation_aux = int(self.model._grid.topography.values.shape[0] / topo_comp)
tpp = self.model._grid.topography.values[::decimation_aux + 1, :]
cartesian_point_dist = (dd.cdist(tpp, self.model._surface_points.df[['X', 'Y', 'Z']])
< projection_distance).sum(axis=0).astype(bool)
cartesian_ori_dist = (dd.cdist(tpp, self.model._orientations.df[['X', 'Y', 'Z']])
< projection_distance).sum(axis=0).astype(bool)
x, y, Gx, Gy = 'X', 'Y', 'G_x', 'G_y'
else:
# Project points:
shift = np.asarray(self.model._grid.sections.df.loc[section_name, 'start'])
end_point = np.atleast_2d(np.asarray(self.model._grid.sections.df.loc[section_name, 'stop']) - shift)
A_rotate = np.dot(end_point.T, end_point) / self.model._grid.sections.df.loc[section_name, 'dist'] ** 2
cartesian_point_dist = np.sqrt(((np.dot(
A_rotate, (points[['X', 'Y']]).T).T - points[['X', 'Y']]) ** 2).sum(axis=1))
cartesian_ori_dist = np.sqrt(((np.dot(
A_rotate, (orientations[['X', 'Y']]).T).T - orientations[['X', 'Y']]) ** 2).sum(axis=1))
# This are the coordinates of the data projected on the section
cartesian_point = np.dot(A_rotate, (points[['X', 'Y']] - shift).T).T
cartesian_ori = np.dot(A_rotate, (orientations[['X', 'Y']] - shift).T).T
# Since we plot only the section we want the norm of those coordinates
points[['X']] = np.linalg.norm(cartesian_point, axis=1)
orientations[['X']] = np.linalg.norm(cartesian_ori, axis=1)
x, y, Gx, Gy = 'X', 'Z', 'G_x', 'G_z'
else:
if cell_number is None:
cell_number = int(self.model._grid.regular_grid.resolution[0] / 2)
elif cell_number == 'mid':
cell_number = int(self.model._grid.regular_grid.resolution[0] / 2)
if direction == 'x' or direction == 'X':
arg_ = 0
dx = self.model._grid.regular_grid.dx
dir = 'X'
elif direction == 'y' or direction == 'Y':
arg_ = 2
dx = self.model._grid.regular_grid.dy
dir = 'Y'
elif direction == 'z' or direction == 'Z':
arg_ = 4
dx = self.model._grid.regular_grid.dz
dir = 'Z'
else:
raise AttributeError('Direction must be x, y, z')
_loc = self.model._grid.regular_grid.extent[arg_] + dx * cell_number
cartesian_point_dist = points[dir] - _loc
cartesian_ori_dist = orientations[dir] - _loc
x, y, Gx, Gy = self._slice(direction)[4:]
select_projected_p = cartesian_point_dist < projection_distance
select_projected_o = cartesian_ori_dist < projection_distance
# Hack to keep the right X label:
temp_label = copy.copy(ax.xaxis.label)
points_df = points[select_projected_p]
points_df['colors'] = points_df['surface'].map(self._color_lot)
points_df.plot.scatter(x=x, y=y, ax=ax, c='colors', s=70, zorder=102,
edgecolors='white',
colorbar=False)
# points_df.plot.scatter(x=x, y=y, ax=ax, c='white', s=80, zorder=101,
# colorbar=False)
if self.fig.is_legend is False and legend is True or legend == 'force':
markers = [plt.Line2D([0, 0], [0, 0], color=color, marker='o',
linestyle='') for color in
self._color_lot.values()]
ax.legend(markers, self._color_lot.keys(), numpoints=1)
self.fig.is_legend = True
ax.xaxis.label = temp_label
sel_ori = orientations[select_projected_o]
aspect = np.subtract(*ax.get_ylim()) / np.subtract(*ax.get_xlim())
min_axis = 'width' if aspect < 1 else 'height'
# Eli options
ax.quiver(sel_ori[x], sel_ori[y], sel_ori[Gx], sel_ori[Gy],
pivot="tail", scale_units=min_axis, scale=30, color=sel_ori['surface'].map(self._color_lot),
edgecolor='k', headwidth=8, linewidths=1, zorder=102)
try:
ax.legend_.set_frame_on(True)
ax.legend_.set_zorder(10000)
except AttributeError:
pass
def calculate_p1p2(self, direction, cell_number):
if direction == 'y':
cell_number = int(self.model._grid.regular_grid.resolution[1] / 2) if cell_number == 'mid' else cell_number
y = self.model._grid.regular_grid.extent[2] + self.model._grid.regular_grid.dy * cell_number
p1 = [self.model._grid.regular_grid.extent[0], y]
p2 = [self.model._grid.regular_grid.extent[1], y]
elif direction == 'x':
cell_number = int(self.model._grid.regular_grid.resolution[0] / 2) if cell_number == 'mid' else cell_number
x = self.model._grid.regular_grid.extent[0] + self.model._grid.regular_grid.dx * cell_number
p1 = [x, self.model._grid.regular_grid.extent[2]]
p2 = [x, self.model._grid.regular_grid.extent[3]]
else:
raise NotImplementedError
return p1, p2
def _slice_topo_4_sections(self, p1, p2, resx, method='interp2d'):
"""
Slices topography along a set linear section
Args:
:param p1: starting point (x,y) of the section
:param p2: end point (x,y) of the section
:param resx: resolution of the defined section
:param method: interpolation method, 'interp2d' for cubic scipy.interpolate.interp2d
'spline' for scipy.interpolate.RectBivariateSpline
Returns:
:return: returns x,y,z values of the topography along the section
"""
xy = self.model._grid.sections.calculate_line_coordinates_2points(p1, p2, resx)
z = self.model._grid.sections.interpolate_zvals_at_xy(xy, self.model._grid.topography, method)
return xy[:, 0], xy[:, 1], z
def plot_topography(self, ax, fill_contour=False,
contour=True,
section_name=None,
cell_number=None, direction='y', block=None, **kwargs):
hillshade = kwargs.get('hillshade', True)
azdeg = kwargs.get('azdeg', 0)
altdeg = kwargs.get('altdeg', 0)
cmap = kwargs.get('cmap', 'terrain')
self.update_colot_lot()
section_name, cell_number, direction = self._check_default_section(ax, section_name, cell_number, direction)
if section_name is not None and section_name != 'topography':
p1 = self.model._grid.sections.df.loc[section_name, 'start']
p2 = self.model._grid.sections.df.loc[section_name, 'stop']
x, y, z = self._slice_topo_4_sections(p1, p2, self.model._grid.topography.resolution[0])
pseudo_x = np.linspace(0, self.model._grid.sections.df.loc[section_name, 'dist'], z.shape[0])
a = np.vstack((pseudo_x, z)).T
xy = np.append(a,
([self.model._grid.sections.df.loc[section_name, 'dist'], a[:, 1][-1]],
[self.model._grid.sections.df.loc[section_name, 'dist'],
self.model._grid.regular_grid.extent[5]],
[0, self.model._grid.regular_grid.extent[5]],
[0, a[:, 1][0]])).reshape(-1, 2)
ax.fill(xy[:, 0], xy[:, 1], 'k', zorder=10)
elif section_name == 'topography':
import skimage
from gempy.plot.helpers import add_colorbar
topo = self.model._grid.topography
topo_super_res = skimage.transform.resize(
topo.values_2d,
(1600, 1600),
order=3,
mode='edge',
anti_aliasing=True, preserve_range=False)
values = topo_super_res[:, :, 2].T
if contour is True:
CS = ax.contour(values, extent=(topo.extent[:4]),
colors='k', linestyles='solid', origin='lower')
ax.clabel(CS, inline=1, fontsize=10, fmt='%d')
if fill_contour is True:
CS2 = ax.contourf(values, extent=(topo.extent[:4]), cmap=cmap)
add_colorbar(axes=ax, label='elevation [m]', cs=CS2)
if hillshade is True:
from matplotlib.colors import LightSource
ls = LightSource(azdeg=azdeg, altdeg=altdeg)
hillshade_topography = ls.hillshade(values)
ax.imshow(hillshade_topography, origin='lower', extent=topo.extent[:4], alpha=0.5, zorder=11,
cmap='gray')
elif cell_number is not None or block is not None:
p1, p2 = self.calculate_p1p2(direction, cell_number)
resx = self.model._grid.regular_grid.resolution[0]
resy = self.model._grid.regular_grid.resolution[1]
try:
x, y, z = self._slice_topo_4_sections(p1, p2, resx)
if direction == 'x':
a = np.vstack((y, z)).T
ext = self.model._grid.regular_grid.extent[[2, 3]]
elif direction == 'y':
a = np.vstack((x, z)).T
ext = self.model._grid.regular_grid.extent[[0, 1]]
else:
raise NotImplementedError
a = np.append(a,
([ext[1], a[:, 1][-1]],
[ext[1], self.model._grid.regular_grid.extent[5]],
[ext[0], self.model._grid.regular_grid.extent[5]],
[ext[0], a[:, 1][0]]))
line = a.reshape(-1, 2)
ax.fill(line[:, 0], line[:, 1], color='k')
except IndexError:
warnings.warn('Topography needs to be a raster to be able to plot it'
'in 2D sections')
return ax
def plot_contacts(self, ax, section_name=None, cell_number=None, direction='y', block=None,
only_faults=False, **kwargs):
self.update_colot_lot()
section_name, cell_number, direction = self._check_default_section(ax, section_name, cell_number, direction)
if only_faults:
contour_idx = list(self.model._faults.df[self.model._faults.df['isFault'] == True].index)
else:
contour_idx = list(self.model._surfaces.df.index)
extent_val = [*ax.get_xlim(), *ax.get_ylim()]
zorder = kwargs.get('zorder', 100)
if section_name is not None:
if section_name == 'topography':
shape = self.model._grid.topography.resolution
scalar_fields = self.model.solutions.geological_map[1]
c_id = 0 # color id startpoint
for e, block in enumerate(scalar_fields):
level = self.model.solutions.scalar_field_at_surface_points[e][np.where(
self.model.solutions.scalar_field_at_surface_points[e] != 0)]
c_id2 = c_id + len(level) # color id endpoint
ax.contour(block.reshape(shape), 0, levels=np.sort(level),
colors=self.cmap.colors[c_id:c_id2][::-1],
linestyles='solid', origin='lower',
extent=extent_val, zorder=zorder - (e + len(level))
)
c_id = c_id2
else:
l0, l1 = self.model._grid.sections.get_section_args(section_name)
shape = self.model._grid.sections.df.loc[section_name, 'resolution']
scalar_fields = self.model.solutions.sections[1][:, l0:l1]
c_id = 0 # color id startpoint
for e, block in enumerate(scalar_fields):
level = self.model.solutions.scalar_field_at_surface_points[e][np.where(
self.model.solutions.scalar_field_at_surface_points[e] != 0)]
# Ignore warning about some scalars not being on the plot since it is very common
# that an interface does not exit for a given section
c_id2 = c_id + len(level) # color id endpoint
color_list = self.model._surfaces.df.groupby('isActive').get_group(True)['color'][c_id:c_id2][::-1]
ax.contour(block.reshape(shape).T, 0, levels=np.sort(level),
# colors=self.cmap.colors[self.model.surfaces.df['isActive']][c_id:c_id2],
colors=color_list,
linestyles='solid', origin='lower',
extent=extent_val, zorder=zorder - (e + len(level))
)
c_id = c_id2
elif cell_number is not None or block is not None:
_slice = self._slice(direction, cell_number)[:3]
shape = self.model._grid.regular_grid.resolution
c_id = 0 # color id startpoint
for e, block in enumerate(self.model.solutions.scalar_field_matrix):
level = self.model.solutions.scalar_field_at_surface_points[e][np.where(
self.model.solutions.scalar_field_at_surface_points[e] != 0)]
# c_id = e
c_id2 = c_id + len(level)
# print(c_id, c_id2)
color_list = self.model._surfaces.df.groupby('isActive').get_group(True)['color'][c_id:c_id2][::-1]
# print(color_list)
ax.contour(block.reshape(shape)[_slice].T, 0, levels=np.sort(level),
colors=color_list,
linestyles='solid', origin='lower',
extent=extent_val, zorder=zorder - (e + len(level))
)
c_id = c_id2
def plot_section_traces(self, ax, section_names=None, show_data=True, **kwargs):
if section_names is None:
section_names = list(self.model._grid.sections.names)
if show_data:
self.plot_data(ax, section_name='topography', **kwargs)
for section in section_names:
j = np.where(self.model._grid.sections.names == section)[0][0]
x1, y1 = np.asarray(self.model._grid.sections.df.loc[section, 'start'])
x2, y2 = np.asarray(self.model._grid.sections.df.loc[section, 'stop'])
ax.plot([x1, x2], [y1, y2], label=section, linestyle='--')
ax.legend(frameon=True)
def plot_topo_g(self, ax, G, centroids, direction="y",
label_kwargs=None, node_kwargs=None, edge_kwargs=None):
res = self.model._grid.regular_grid.resolution
if direction == "y":
c1, c2 = (0, 2)
e1 = self.model._grid.regular_grid.extent[1] - self.model._grid.regular_grid.extent[0]
e2 = self.model._grid.regular_grid.extent[5] - self.model._grid.regular_grid.extent[4]
d1 = self.model._grid.regular_grid.extent[0]
d2 = self.model._grid.regular_grid.extent[4]
if len(list(centroids.items())[0][1]) == 2:
c1, c2 = (0, 1)
r1 = res[0]
r2 = res[2]
elif direction == "x":
c1, c2 = (1, 2)
e1 = self.model._grid.regular_grid.extent[3] - self.model._grid.regular_grid.extent[2]
e2 = self.model._grid.regular_grid.extent[5] - self.model._grid.regular_grid.extent[4]
d1 = self.model._grid.regular_grid.extent[2]
d2 = self.model._grid.regular_grid.extent[4]
if len(list(centroids.items())[0][1]) == 2:
c1, c2 = (0, 1)
r1 = res[1]
r2 = res[2]
elif direction == "z":
c1, c2 = (0, 1)
e1 = self.model._grid.regular_grid.extent[1] - self.model._grid.regular_grid.extent[0]
e2 = self.model._grid.regular_grid.extent[3] - self.model._grid.regular_grid.extent[2]
d1 = self.model._grid.regular_grid.extent[0]
d2 = self.model._grid.regular_grid.extent[2]
if len(list(centroids.items())[0][1]) == 2:
c1, c2 = (0, 1)
r1 = res[0]
r2 = res[1]
nkw = {
"marker": "o",
"color": "black",
"markersize": 20,
"alpha": 0.75
}
if node_kwargs is not None:
nkw.update(node_kwargs)
tkw = {
"color": "white",
"size": 10,
"ha": "center",
"va": "center",
"weight": "ultralight",
"family": "monospace"
}
if label_kwargs is not None:
tkw.update(label_kwargs)
lkw = {
"linewidth": 0.75,
"color": "black"
}
if edge_kwargs is not None:
lkw.update(edge_kwargs)
for edge in G.edges():
a, b = edge
# plot edges
ax.plot(np.array([centroids[a][c1], centroids[b][c1]]) * e1 / r1 + d1,
np.array([centroids[a][c2], centroids[b][c2]]) * e2 / r2 + d2, **lkw)
for node in G.nodes():
ax.plot(centroids[node][c1] * e1 / r1 + d1, centroids[node][c2] * e2 / r2 + d2,
marker="o", color="black", markersize=10, alpha=0.75)
ax.text(centroids[node][c1] * e1 / r1 + d1,
centroids[node][c2] * e2 / r2 + d2, str(node), **tkw)
def plot_gradient(self, scalar_field, gx, gy, gz, cell_number, quiver_stepsize=5,
# maybe call r sth. like "stepsize"?
direction="y", plot_scalar=True, *args, **kwargs): # include plot data?
"""
Plot the gradient of the scalar field in a given direction.
Args:
geo_data (gempy.DataManagement.InputData): Input data of the model
scalar_field(numpy.array): scalar field to plot with the gradient
gx(numpy.array): gradient in x-direction
gy(numpy.array): gradient in y-direction
gz(numpy.array): gradient in z-direction
cell_number(int): position of the array to plot
quiver_stepsize(int): step size between arrows to indicate gradient
direction(str): xyz. Caartesian direction to be plotted
plot_scalar(bool): boolean to plot scalar field
**kwargs: plt.contour kwargs
Returns:
None
"""
raise NotImplementedError
def _scale_fig_size(figsize, textsize, rows=1, cols=1):
"""Scale figure properties according to rows and cols.
Parameters
----------
figsize : float or None
Size of figure in inches
textsize : float or None
fontsize
rows : int
Number of rows
cols : int
Number of columns
Returns
-------
figsize : float or None
Size of figure in inches
ax_labelsize : int
fontsize for axes label
titlesize : int
fontsize for title
xt_labelsize : int
fontsize for axes ticks
linewidth : int
linewidth
markersize : int
markersize
"""
params = mpl.rcParams
rc_width, rc_height = tuple(params["figure.figsize"])
rc_ax_labelsize = params["axes.labelsize"]
rc_titlesize = params["axes.titlesize"]
rc_xt_labelsize = params["xtick.labelsize"]
rc_linewidth = params["lines.linewidth"]
rc_markersize = params["lines.markersize"]
if isinstance(rc_ax_labelsize, str):
rc_ax_labelsize = 15
if isinstance(rc_titlesize, str):
rc_titlesize = 16
if isinstance(rc_xt_labelsize, str):
rc_xt_labelsize = 14
if figsize is None:
width, height = rc_width, rc_height
sff = 1 if (rows == cols == 1) else 1.2
width = width * cols * sff
height = height * rows * sff
else:
width, height = figsize
if textsize is not None:
scale_factor = textsize / rc_xt_labelsize
elif rows == cols == 1:
scale_factor = ((width * height) / (rc_width * rc_height)) ** 0.5
else:
scale_factor = 1
ax_labelsize = rc_ax_labelsize * scale_factor
titlesize = rc_titlesize * scale_factor
xt_labelsize = rc_xt_labelsize * scale_factor
linewidth = rc_linewidth * scale_factor
markersize = rc_markersize * scale_factor
return (width, height), ax_labelsize, titlesize, xt_labelsize, linewidth, markersize
| lgpl-3.0 |
jreback/pandas | pandas/util/_decorators.py | 2 | 17021 | from functools import wraps
import inspect
from textwrap import dedent
from typing import Any, Callable, List, Mapping, Optional, Tuple, Type, Union, cast
import warnings
from pandas._libs.properties import cache_readonly # noqa
from pandas._typing import F
def deprecate(
name: str,
alternative: Callable[..., Any],
version: str,
alt_name: Optional[str] = None,
klass: Optional[Type[Warning]] = None,
stacklevel: int = 2,
msg: Optional[str] = None,
) -> Callable[[F], F]:
"""
Return a new function that emits a deprecation warning on use.
To use this method for a deprecated function, another function
`alternative` with the same signature must exist. The deprecated
function will emit a deprecation warning, and in the docstring
it will contain the deprecation directive with the provided version
so it can be detected for future removal.
Parameters
----------
name : str
Name of function to deprecate.
alternative : func
Function to use instead.
version : str
Version of pandas in which the method has been deprecated.
alt_name : str, optional
Name to use in preference of alternative.__name__.
klass : Warning, default FutureWarning
stacklevel : int, default 2
msg : str
The message to display in the warning.
Default is '{name} is deprecated. Use {alt_name} instead.'
"""
alt_name = alt_name or alternative.__name__
klass = klass or FutureWarning
warning_msg = msg or f"{name} is deprecated, use {alt_name} instead"
@wraps(alternative)
def wrapper(*args, **kwargs) -> Callable[..., Any]:
warnings.warn(warning_msg, klass, stacklevel=stacklevel)
return alternative(*args, **kwargs)
# adding deprecated directive to the docstring
msg = msg or f"Use `{alt_name}` instead."
doc_error_msg = (
"deprecate needs a correctly formatted docstring in "
"the target function (should have a one liner short "
"summary, and opening quotes should be in their own "
f"line). Found:\n{alternative.__doc__}"
)
# when python is running in optimized mode (i.e. `-OO`), docstrings are
# removed, so we check that a docstring with correct formatting is used
# but we allow empty docstrings
if alternative.__doc__:
if alternative.__doc__.count("\n") < 3:
raise AssertionError(doc_error_msg)
empty1, summary, empty2, doc = alternative.__doc__.split("\n", 3)
if empty1 or empty2 and not summary:
raise AssertionError(doc_error_msg)
wrapper.__doc__ = dedent(
f"""
{summary.strip()}
.. deprecated:: {version}
{msg}
{dedent(doc)}"""
)
return wrapper
def deprecate_kwarg(
old_arg_name: str,
new_arg_name: Optional[str],
mapping: Optional[Union[Mapping[Any, Any], Callable[[Any], Any]]] = None,
stacklevel: int = 2,
) -> Callable[[F], F]:
"""
Decorator to deprecate a keyword argument of a function.
Parameters
----------
old_arg_name : str
Name of argument in function to deprecate
new_arg_name : str or None
Name of preferred argument in function. Use None to raise warning that
``old_arg_name`` keyword is deprecated.
mapping : dict or callable
If mapping is present, use it to translate old arguments to
new arguments. A callable must do its own value checking;
values not found in a dict will be forwarded unchanged.
Examples
--------
The following deprecates 'cols', using 'columns' instead
>>> @deprecate_kwarg(old_arg_name='cols', new_arg_name='columns')
... def f(columns=''):
... print(columns)
...
>>> f(columns='should work ok')
should work ok
>>> f(cols='should raise warning')
FutureWarning: cols is deprecated, use columns instead
warnings.warn(msg, FutureWarning)
should raise warning
>>> f(cols='should error', columns="can\'t pass do both")
TypeError: Can only specify 'cols' or 'columns', not both
>>> @deprecate_kwarg('old', 'new', {'yes': True, 'no': False})
... def f(new=False):
... print('yes!' if new else 'no!')
...
>>> f(old='yes')
FutureWarning: old='yes' is deprecated, use new=True instead
warnings.warn(msg, FutureWarning)
yes!
To raise a warning that a keyword will be removed entirely in the future
>>> @deprecate_kwarg(old_arg_name='cols', new_arg_name=None)
... def f(cols='', another_param=''):
... print(cols)
...
>>> f(cols='should raise warning')
FutureWarning: the 'cols' keyword is deprecated and will be removed in a
future version please takes steps to stop use of 'cols'
should raise warning
>>> f(another_param='should not raise warning')
should not raise warning
>>> f(cols='should raise warning', another_param='')
FutureWarning: the 'cols' keyword is deprecated and will be removed in a
future version please takes steps to stop use of 'cols'
should raise warning
"""
if mapping is not None and not hasattr(mapping, "get") and not callable(mapping):
raise TypeError(
"mapping from old to new argument values must be dict or callable!"
)
def _deprecate_kwarg(func: F) -> F:
@wraps(func)
def wrapper(*args, **kwargs) -> Callable[..., Any]:
old_arg_value = kwargs.pop(old_arg_name, None)
if old_arg_value is not None:
if new_arg_name is None:
msg = (
f"the {repr(old_arg_name)} keyword is deprecated and "
"will be removed in a future version. Please take "
f"steps to stop the use of {repr(old_arg_name)}"
)
warnings.warn(msg, FutureWarning, stacklevel=stacklevel)
kwargs[old_arg_name] = old_arg_value
return func(*args, **kwargs)
elif mapping is not None:
if callable(mapping):
new_arg_value = mapping(old_arg_value)
else:
new_arg_value = mapping.get(old_arg_value, old_arg_value)
msg = (
f"the {old_arg_name}={repr(old_arg_value)} keyword is "
"deprecated, use "
f"{new_arg_name}={repr(new_arg_value)} instead"
)
else:
new_arg_value = old_arg_value
msg = (
f"the {repr(old_arg_name)}' keyword is deprecated, "
f"use {repr(new_arg_name)} instead"
)
warnings.warn(msg, FutureWarning, stacklevel=stacklevel)
if kwargs.get(new_arg_name) is not None:
msg = (
f"Can only specify {repr(old_arg_name)} "
f"or {repr(new_arg_name)}, not both"
)
raise TypeError(msg)
else:
kwargs[new_arg_name] = new_arg_value
return func(*args, **kwargs)
return cast(F, wrapper)
return _deprecate_kwarg
def _format_argument_list(allow_args: Union[List[str], int]):
"""
Convert the allow_args argument (either string or integer) of
`deprecate_nonkeyword_arguments` function to a string describing
it to be inserted into warning message.
Parameters
----------
allowed_args : list, tuple or int
The `allowed_args` argument for `deprecate_nonkeyword_arguments`,
but None value is not allowed.
Returns
-------
s : str
The substring describing the argument list in best way to be
inserted to the warning message.
Examples
--------
`format_argument_list(0)` -> ''
`format_argument_list(1)` -> 'except for the first argument'
`format_argument_list(2)` -> 'except for the first 2 arguments'
`format_argument_list([])` -> ''
`format_argument_list(['a'])` -> "except for the arguments 'a'"
`format_argument_list(['a', 'b'])` -> "except for the arguments 'a' and 'b'"
`format_argument_list(['a', 'b', 'c'])` ->
"except for the arguments 'a', 'b' and 'c'"
"""
if not allow_args:
return ""
elif allow_args == 1:
return " except for the first argument"
elif isinstance(allow_args, int):
return f" except for the first {allow_args} arguments"
elif len(allow_args) == 1:
return f" except for the argument '{allow_args[0]}'"
else:
last = allow_args[-1]
args = ", ".join(["'" + x + "'" for x in allow_args[:-1]])
return f" except for the arguments {args} and '{last}'"
def deprecate_nonkeyword_arguments(
version: str,
allowed_args: Optional[Union[List[str], int]] = None,
stacklevel: int = 2,
) -> Callable:
"""
Decorator to deprecate a use of non-keyword arguments of a function.
Parameters
----------
version : str
The version in which positional arguments will become
keyword-only.
allowed_args : list or int, optional
In case of list, it must be the list of names of some
first arguments of the decorated functions that are
OK to be given as positional arguments. In case of an
integer, this is the number of positional arguments
that will stay positional. In case of None value,
defaults to list of all arguments not having the
default value.
stacklevel : int, default=2
The stack level for warnings.warn
"""
def decorate(func):
if allowed_args is not None:
allow_args = allowed_args
else:
spec = inspect.getfullargspec(func)
# We must have some defaults if we are deprecating default-less
assert spec.defaults is not None # for mypy
allow_args = spec.args[: -len(spec.defaults)]
@wraps(func)
def wrapper(*args, **kwargs):
arguments = _format_argument_list(allow_args)
if isinstance(allow_args, (list, tuple)):
num_allow_args = len(allow_args)
else:
num_allow_args = allow_args
if len(args) > num_allow_args:
msg = (
f"Starting with Pandas version {version} all arguments of "
f"{func.__name__}{arguments} will be keyword-only"
)
warnings.warn(msg, FutureWarning, stacklevel=stacklevel)
return func(*args, **kwargs)
return wrapper
return decorate
def rewrite_axis_style_signature(
name: str, extra_params: List[Tuple[str, Any]]
) -> Callable[..., Any]:
def decorate(func: F) -> F:
@wraps(func)
def wrapper(*args, **kwargs) -> Callable[..., Any]:
return func(*args, **kwargs)
kind = inspect.Parameter.POSITIONAL_OR_KEYWORD
params = [
inspect.Parameter("self", kind),
inspect.Parameter(name, kind, default=None),
inspect.Parameter("index", kind, default=None),
inspect.Parameter("columns", kind, default=None),
inspect.Parameter("axis", kind, default=None),
]
for pname, default in extra_params:
params.append(inspect.Parameter(pname, kind, default=default))
sig = inspect.Signature(params)
# https://github.com/python/typing/issues/598
# error: "F" has no attribute "__signature__"
func.__signature__ = sig # type: ignore[attr-defined]
return cast(F, wrapper)
return decorate
def doc(*docstrings: Union[str, Callable], **params) -> Callable[[F], F]:
"""
A decorator take docstring templates, concatenate them and perform string
substitution on it.
This decorator will add a variable "_docstring_components" to the wrapped
callable to keep track the original docstring template for potential usage.
If it should be consider as a template, it will be saved as a string.
Otherwise, it will be saved as callable, and later user __doc__ and dedent
to get docstring.
Parameters
----------
*docstrings : str or callable
The string / docstring / docstring template to be appended in order
after default docstring under callable.
**params
The string which would be used to format docstring template.
"""
def decorator(decorated: F) -> F:
# collecting docstring and docstring templates
docstring_components: List[Union[str, Callable]] = []
if decorated.__doc__:
docstring_components.append(dedent(decorated.__doc__))
for docstring in docstrings:
if hasattr(docstring, "_docstring_components"):
# error: Item "str" of "Union[str, Callable[..., Any]]" has no
# attribute "_docstring_components" [union-attr]
# error: Item "function" of "Union[str, Callable[..., Any]]"
# has no attribute "_docstring_components" [union-attr]
docstring_components.extend(
docstring._docstring_components # type: ignore[union-attr]
)
elif isinstance(docstring, str) or docstring.__doc__:
docstring_components.append(docstring)
# formatting templates and concatenating docstring
decorated.__doc__ = "".join(
[
component.format(**params)
if isinstance(component, str)
else dedent(component.__doc__ or "")
for component in docstring_components
]
)
# error: "F" has no attribute "_docstring_components"
decorated._docstring_components = ( # type: ignore[attr-defined]
docstring_components
)
return decorated
return decorator
# Substitution and Appender are derived from matplotlib.docstring (1.1.0)
# module https://matplotlib.org/users/license.html
class Substitution:
"""
A decorator to take a function's docstring and perform string
substitution on it.
This decorator should be robust even if func.__doc__ is None
(for example, if -OO was passed to the interpreter)
Usage: construct a docstring.Substitution with a sequence or
dictionary suitable for performing substitution; then
decorate a suitable function with the constructed object. e.g.
sub_author_name = Substitution(author='Jason')
@sub_author_name
def some_function(x):
"%(author)s wrote this function"
# note that some_function.__doc__ is now "Jason wrote this function"
One can also use positional arguments.
sub_first_last_names = Substitution('Edgar Allen', 'Poe')
@sub_first_last_names
def some_function(x):
"%s %s wrote the Raven"
"""
def __init__(self, *args, **kwargs):
if args and kwargs:
raise AssertionError("Only positional or keyword args are allowed")
self.params = args or kwargs
def __call__(self, func: F) -> F:
func.__doc__ = func.__doc__ and func.__doc__ % self.params
return func
def update(self, *args, **kwargs) -> None:
"""
Update self.params with supplied args.
"""
if isinstance(self.params, dict):
self.params.update(*args, **kwargs)
class Appender:
"""
A function decorator that will append an addendum to the docstring
of the target function.
This decorator should be robust even if func.__doc__ is None
(for example, if -OO was passed to the interpreter).
Usage: construct a docstring.Appender with a string to be joined to
the original docstring. An optional 'join' parameter may be supplied
which will be used to join the docstring and addendum. e.g.
add_copyright = Appender("Copyright (c) 2009", join='\n')
@add_copyright
def my_dog(has='fleas'):
"This docstring will have a copyright below"
pass
"""
addendum: Optional[str]
def __init__(self, addendum: Optional[str], join: str = "", indents: int = 0):
if indents > 0:
self.addendum = indent(addendum, indents=indents)
else:
self.addendum = addendum
self.join = join
def __call__(self, func: F) -> F:
func.__doc__ = func.__doc__ if func.__doc__ else ""
self.addendum = self.addendum if self.addendum else ""
docitems = [func.__doc__, self.addendum]
func.__doc__ = dedent(self.join.join(docitems))
return func
def indent(text: Optional[str], indents: int = 1) -> str:
if not text or not isinstance(text, str):
return ""
jointext = "".join(["\n"] + [" "] * indents)
return jointext.join(text.split("\n"))
| bsd-3-clause |
moonbury/notebooks | github/MasteringMLWithScikit-learn/8365OS_02_Codes/scratch.py | 3 | 3078 | """
>>> import numpy as np
>>> import matplotlib.pyplot as plt
>>> from sklearn.linear_model import LinearRegression
>>> from sklearn.preprocessing import PolynomialFeatures
>>> X_train = [[6], [8], [10], [14], [18]]
>>> y_train = [[7], [9], [13], [17.5], [18]]
>>> X_test = [[6], [8], [11], [16]]
>>> y_test = [[8], [12], [15], [18]]
>>> regressor = LinearRegression()
>>> regressor.fit(X_train, y_train)
>>> xx = np.linspace(0, 26, 100)
>>> yy = regressor.predict(xx.reshape(xx.shape[0], 1))
>>> plt.plot(xx, yy)
>>> quadratic_featurizer = PolynomialFeatures(degree=2)
>>> X_train_quadratic = quadratic_featurizer.fit_transform(X_train)
>>> X_test_quadratic = quadratic_featurizer.transform(X_test)
>>> regressor_quadratic = LinearRegression()
>>> regressor_quadratic.fit(X_train_quadratic, y_train)
>>> xx_quadratic = quadratic_featurizer.transform(xx.reshape(xx.shape[0], 1))
>>> plt.plot(xx, regressor_quadratic.predict(xx_quadratic), c='r', linestyle='--')
>>> plt.title('Pizza price regressed on diameter')
>>> plt.xlabel('Diameter in inches')
>>> plt.ylabel('Price in dollars')
>>> plt.axis([0, 25, 0, 25])
>>> plt.grid(True)
>>> plt.scatter(X_train, y_train)
>>> plt.show()
>>> print X_train
>>> print X_train_quadratic
>>> print X_test
>>> print X_test_quadratic
>>> print 'Simple linear regression r-squared', regressor.score(X_test, y_test)
>>> print 'Quadratic regression r-squared', regressor_quadratic.score(X_test_quadratic, y_test)
[[6], [8], [10], [14], [18]]
[[ 1 6 36]
[ 1 8 64]
[ 1 10 100]
[ 1 14 196]
[ 1 18 324]]
[[6], [8], [11], [16]]
[[ 1 6 36]
[ 1 8 64]
[ 1 11 121]
[ 1 16 256]]
Simple linear regression r-squared 0.809726797708
Quadratic regression r-squared 0.867544365635
"""
import numpy as np
import matplotlib.pyplot as plt
from sklearn.linear_model import LinearRegression, Ridge
from sklearn.preprocessing import PolynomialFeatures
X_train = [[6], [8], [10], [14], [18]]
y_train = [[7], [9], [13], [17.5], [18]]
X_test = [[6], [8], [11], [16]]
y_test = [[8], [12], [15], [18]]
regressor = LinearRegression()
regressor.fit(X_train, y_train)
xx = np.linspace(0, 26, 100)
yy = regressor.predict(xx.reshape(xx.shape[0], 1))
plt.plot(xx, yy)
quadratic_featurizer = PolynomialFeatures(degree=3)
X_train_quadratic = quadratic_featurizer.fit_transform(X_train)
X_test_quadratic = quadratic_featurizer.transform(X_test)
regressor_quadratic = Ridge(alpha=100)
regressor_quadratic.fit(X_train_quadratic, y_train)
xx_quadratic = quadratic_featurizer.transform(xx.reshape(xx.shape[0], 1))
plt.plot(xx, regressor_quadratic.predict(xx_quadratic), c='r', linestyle='--')
plt.title('Pizza price regressed on diameter')
plt.xlabel('Diameter in inches')
plt.ylabel('Price in dollars')
plt.axis([0, 25, 0, 25])
plt.grid(True)
plt.scatter(X_train, y_train)
plt.show()
print X_train
print X_train_quadratic
print X_test
print X_test_quadratic
print 'Simple linear regression r-squared', regressor.score(X_test, y_test)
print 'Quadratic regression r-squared', regressor_quadratic.score(X_test_quadratic, y_test)
| gpl-3.0 |
ryanbaumann/Pandas-to_sql-upsert | to_sql_newrows.py | 1 | 7603 | import os
import sys
import time
import pandas as pd
import numpy as np
from sqlalchemy import create_engine
import threading
from timeit import default_timer as timer
os.path.dirname(os.path.abspath(__file__))
def clean_df_db_dups(df, tablename, engine, dup_cols=[],
filter_continuous_col=None, filter_categorical_col=None):
"""
Remove rows from a dataframe that already exist in a database
Required:
df : dataframe to remove duplicate rows from
engine: SQLAlchemy engine object
tablename: tablename to check duplicates in
dup_cols: list or tuple of column names to check for duplicate row values
Optional:
filter_continuous_col: the name of the continuous data column for BETWEEEN min/max filter
can be either a datetime, int, or float data type
useful for restricting the database table size to check
filter_categorical_col : the name of the categorical data column for Where = value check
Creates an "IN ()" check on the unique values in this column
Returns
Unique list of values from dataframe compared to database table
"""
args = 'SELECT %s FROM %s' %(', '.join(['"{0}"'.format(col) for col in dup_cols]), tablename)
args_contin_filter, args_cat_filter = None, None
if filter_continuous_col is not None:
if df[filter_continuous_col].dtype == 'datetime64[ns]':
args_contin_filter = """ "%s" BETWEEN Convert(datetime, '%s')
AND Convert(datetime, '%s')""" %(filter_continuous_col,
df[filter_continuous_col].min(), df[filter_continuous_col].max())
if filter_categorical_col is not None:
args_cat_filter = ' "%s" in(%s)' %(filter_categorical_col,
', '.join(["'{0}'".format(value) for value in df[filter_categorical_col].unique()]))
if args_contin_filter and args_cat_filter:
args += ' Where ' + args_contin_filter + ' AND' + args_cat_filter
elif args_contin_filter:
args += ' Where ' + args_contin_filter
elif args_cat_filter:
args += ' Where ' + args_cat_filter
df.drop_duplicates(dup_cols, keep='last', inplace=True)
df = pd.merge(df, pd.read_sql(args, engine), how='left', on=dup_cols, indicator=True)
df = df[df['_merge'] == 'left_only']
df.drop(['_merge'], axis=1, inplace=True)
return df
def to_sql_newrows(df, pool_size, *args, **kargs):
"""
Extend the Python pandas to_sql() method to thread database insertion
Required:
df : pandas dataframe to insert new rows into a database table
POOL_SIZE : your sqlalchemy max connection pool size. Set < your db connection limit.
Example where this matters: your cloud DB has a connection limit.
*args:
Pandas to_sql() arguments.
Required arguments are:
tablename : Database table name to write results to
engine : SqlAlchemy engine
Optional arguments are:
'if_exists' : 'append' or 'replace'. If table already exists, use append.
'index' : True or False. True if you want to write index values to the db.
Credits for intial threading code:
http://techyoubaji.blogspot.com/2015/10/speed-up-pandas-tosql-with.html
"""
CHUNKSIZE = 1000
INITIAL_CHUNK = 100
if len(df) > CHUNKSIZE:
#write the initial chunk to the database if df is bigger than chunksize
df.iloc[:INITIAL_CHUNK, :].to_sql(*args, **kargs)
else:
#if df is smaller than chunksize, just write it to the db now
df.to_sql(*args, **kargs)
workers, i = [], 0
for i in range((df.shape[0] - INITIAL_CHUNK)/CHUNKSIZE):
t = threading.Thread(target=lambda: df.iloc[INITIAL_CHUNK+i*CHUNKSIZE:INITIAL_CHUNK+(i+1)*CHUNKSIZE].to_sql(*args, **kargs))
t.start()
workers.append(t)
df.iloc[INITIAL_CHUNK+(i+1)*CHUNKSIZE:, :].to_sql(*args, **kargs)
[t.join() for t in workers]
def setup(engine, tablename):
engine.execute("""DROP TABLE IF EXISTS "%s" """ % (tablename))
engine.execute("""CREATE TABLE "%s" (
"A" INTEGER,
"B" INTEGER,
"C" INTEGER,
"D" INTEGER,
CONSTRAINT pk_A_B PRIMARY KEY ("A","B"))
""" % (tablename))
if __name__ == '__main__':
DB_TYPE = 'postgresql'
DB_DRIVER = 'psycopg2'
DB_USER = 'admin'
DB_PASS = 'password'
DB_HOST = 'localhost'
DB_PORT = '5432'
DB_NAME = 'pandas_upsert'
POOL_SIZE = 50
TABLENAME = 'test_upsert'
SQLALCHEMY_DATABASE_URI = '%s+%s://%s:%s@%s:%s/%s' % (DB_TYPE, DB_DRIVER, DB_USER,
DB_PASS, DB_HOST, DB_PORT, DB_NAME)
ENGINE = create_engine(
SQLALCHEMY_DATABASE_URI, pool_size=POOL_SIZE, max_overflow=0)
print 'setting up db'
setup(ENGINE, TABLENAME)
try:
i=0
prev = timer()
start = timer()
for i in range(10):
print 'running test %s' %(str(i))
df = pd.DataFrame(
np.random.randint(0, 500, size=(100000, 4)), columns=list('ABCD'))
df = clean_df_db_dups(df, TABLENAME, ENGINE, dup_cols=['A', 'B'])
print 'row count after drop db duplicates is now : %s' %(df.shape[0])
df.to_sql(TABLENAME, ENGINE, if_exists='append', index=False)
end = timer()
elapsed_time = end - prev
prev = timer()
print 'completed loop in %s sec!' %(elapsed_time)
i += 1
end = timer()
elapsed_time = end - start
print 'completed singlethread insert loops in %s sec!' %(elapsed_time)
inserted = pd.read_sql('SELECT count("A") from %s' %(TABLENAME), ENGINE)
print 'inserted %s new rows into database!' %(inserted.iloc[0]['count'])
print '\n setting up db'
setup(ENGINE, TABLENAME)
print '\n'
i=0
prev = timer()
start = timer()
for i in range(10):
print 'running test %s' %(str(i))
df = pd.DataFrame(
np.random.randint(0, 500, size=(100000, 4)), columns=list('ABCD'))
df.drop_duplicates(['A', 'B'], keep='last', inplace=True)
df.to_sql('temp', ENGINE, if_exists='replace', index=False)
connection = ENGINE.connect()
args1 = """ INSERT INTO "test_upsert"
SELECT * FROM
(SELECT a.*
FROM "temp" a LEFT OUTER JOIN "test_upsert" b
ON (a."A" = b."A" and a."B"=b."B")
WHERE b."A" is null) b"""
result = connection.execute(args1)
args2 = """ DROP Table If Exists "temp" """
connection.execute(args2)
connection.close()
end = timer()
elapsed_time = end - prev
prev = timer()
print 'completed loop in %s sec!' %(elapsed_time)
i += 1
end = timer()
elapsed_time = end - start
print 'completed staging insert loops in %s sec!' %(elapsed_time)
inserted = pd.read_sql('SELECT count("A") from %s' %(TABLENAME), ENGINE)
print 'inserted %s new rows into database!' %(inserted.iloc[0]['count'])
except KeyboardInterrupt:
print("Interrupted... exiting...")
| mit |
wavelets/hyperopt-sklearn | hpsklearn/vkmeans.py | 6 | 2032 | import numpy as np
from sklearn.cluster import KMeans
class ColumnKMeans(object):
def __init__(self,
n_clusters,
init='k-means++',
n_init=10,
max_iter=300,
tol=1e-4,
precompute_distances=True,
verbose=0,
random_state=None,
copy_x=True,
n_jobs=1,
):
self.n_clusters = n_clusters
self.init = init
self.n_init = n_init
self.max_iter = max_iter
self.tol = tol
self.precompute_distances = precompute_distances
self.verbose = verbose
self.random_state = random_state
self.copy_x = copy_x
self.n_jobs = n_jobs
self.output_dtype = None
def fit(self, X):
rows, cols = X.shape
self.col_models = []
for jj in range(cols):
col_model=KMeans(
n_clusters=self.n_clusters,
init=self.init,
n_init=self.n_init,
max_iter=self.max_iter,
tol=self.tol,
precompute_distances=self.precompute_distances,
verbose=self.verbose,
random_state=self.random_state,
copy_x=self.copy_x,
n_jobs=self.n_jobs,
)
col_model.fit(X[:, jj:jj + 1])
self.col_models.append(col_model)
def transform(self, X):
rows, cols = X.shape
if self.output_dtype is None:
output_dtype = X.dtype # XXX
else:
output_dtype = self.output_dtype
rval = np.empty(
(rows, cols, self.n_clusters),
dtype=output_dtype)
for jj in range(cols):
Xj = X[:, jj:jj + 1]
dists = self.col_models[jj].transform(Xj)
feats = np.exp(-(dists ** 2))
# -- normalize features by row
rval[:, jj, :] = feats / (feats.sum(axis=1)[:, None])
assert np.all(np.isfinite(rval))
return rval.reshape((rows, cols * self.n_clusters))
| bsd-3-clause |
kgullikson88/GSSP_Analyzer | gsspy/analyzer.py | 1 | 5730 | from __future__ import print_function, division, absolute_import
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import os
import logging
from ._utils import get_minimum
# Default labels for the Chi^2 output table
CHI2_LABELS = ['feh', 'Teff', 'logg', 'micro_turb', 'vsini',
'chi2_inter', 'contin_factor', 'chi2', 'chi2_1sig']
# Which labels are parameters (again, default)
PAR_LABELS = ['feh', 'Teff', 'logg', 'micro_turb', 'vsini', 'dilution']
class GSSP_Analyzer(object):
def __init__(self, basedir, chi2_labels=None, par_labels=None):
"""
Analyze the output of a GSSP_single run.
Parameters:
===========
basedir: string
The name of the GSSP output directory.
chi2_labels: iterable, optional
Labels to apply to the columns in the 'Chi2_table.dat',
which is found in basedir
par_labels: iterable, optional
The names of the parameters that were fit. This is
mostly the same as chi2_labels, but without the chi^2
columns
"""
if chi2_labels is None:
chi2_labels = CHI2_LABELS
if par_labels is None:
par_labels = PAR_LABELS
fname = os.path.join(basedir, 'Chi2_table.dat')
try:
df = pd.read_fwf(fname, header=None, names=chi2_labels)
except IOError as e:
logging.warning('File {} not found!'.format(fname))
raise e
self.chi2_labels = chi2_labels
self.par_labels = par_labels
self.chi2_df = df
self.basedir = basedir
return
def estimate_best_parameters(self):
"""
Estimate the best parameters by interpolating the grid
Returns:
=========
pd.Series object with the best parameter and associated uncertainties
for each parameter
A tuple of matplotlib.Figure instances with plots for each parameter.
"""
best_grid_pars = self._get_best_grid_pars()
parameters = [p for p in self.par_labels if p in self.chi2_df.columns]
figures = {}
for i, par in enumerate(parameters):
logging.debug('Slicing to find best {}'.format(par))
# Get all the other parameters
other_pars = [p for p in parameters if p != par]
# Get the chi^2 dependence on the current parameter alone
cond = np.all([self.chi2_df[p] == best_grid_pars[p] for p in other_pars], axis=0)
par_dependence = self.chi2_df[cond][[par, 'chi2']]
if len(par_dependence) < 2:
continue
logging.debug(par_dependence)
# Fit the dependence to a polynomial
polypars = np.polyfit(par_dependence[par],
par_dependence['chi2']-best_grid_pars['chi2_1sig'],
2)
chi2_fcn = np.poly1d(polypars)
roots = sorted(np.roots(polypars))
minimum = get_minimum(chi2_fcn, search_range=roots)
if len(minimum) == 1:
minimum = minimum[0]
elif len(minimum) > 1:
chi2_vals = chi2_fcn(minimum)
minimum = minimum[np.argmin(chi2_vals)]
else:
minimum = par_dependence.sort_values(by='chi2')[par].values[0]
# Plot
fig, ax = plt.subplots(1, 1)
ax.scatter(par_dependence[par], par_dependence['chi2'],
marker='x', color='red')
ax.scatter(minimum, chi2_fcn(minimum) + best_grid_pars['chi2_1sig'],
marker='o', color='blue')
x = np.linspace(par_dependence[par].min(), par_dependence[par].max(), 25)
ax.plot(x, chi2_fcn(x) + best_grid_pars['chi2_1sig'], 'g--')
ax.set_xlabel(par)
ax.set_ylabel('$\chi^2$')
# Save the best_parameters
best_grid_pars['best_{}'.format(par)] = minimum
best_grid_pars['1sig_CI_lower_{}'.format(par)] = min(roots)
best_grid_pars['1sig_CI_upper_{}'.format(par)] = max(roots)
figures[par] = fig
return best_grid_pars, figures
def plot_best_model(self):
""" Plot the observed spectrum with the best model
"""
obs_fname = os.path.join(self.basedir, 'Observed_spectrum.dat')
model_fname = os.path.join(self.basedir, 'Synthetic_best_fit.rgs')
obs_spec = np.loadtxt(obs_fname, unpack=True)
model_spec = np.loadtxt(model_fname, usecols=(0,1), unpack=True)
fig, ax = plt.subplots(1, 1, figsize=(12,7))
ax.plot(obs_spec[0], obs_spec[1], 'k-', alpha=0.7, label='Observed spectrum')
ax.plot(model_spec[0], model_spec[1], 'r-', alpha=0.8, label='Model Spectrum')
ax.set_xlabel('Wavelength ($\AA$)')
ax.set_ylabel('Normalized Flux')
leg = ax.legend(loc='best', fancybox=True)
leg.get_frame().set_alpha(0.5)
plt.show()
def _get_best_grid_pars(self):
"""
Finds the best set of parameters (lowest chi2) within the grid
The parameters to search are given in self.par_labels as an iterable
"""
best_row = self.chi2_df.sort('chi2', ascending=True).ix[0]
best_pars = {}
for par in self.par_labels:
if par in best_row:
best_pars[par] = best_row[par]
# Add the chi^2 information
best_pars['chi2'] = best_row['chi2']
best_pars['chi2_1sig'] = best_row['chi2_1sig']
return pd.Series(data=best_pars) | mit |
apache/spark | python/pyspark/sql/pandas/group_ops.py | 23 | 14683 | #
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import sys
import warnings
from pyspark.rdd import PythonEvalType
from pyspark.sql.column import Column
from pyspark.sql.dataframe import DataFrame
class PandasGroupedOpsMixin(object):
"""
Min-in for pandas grouped operations. Currently, only :class:`GroupedData`
can use this class.
"""
def apply(self, udf):
"""
It is an alias of :meth:`pyspark.sql.GroupedData.applyInPandas`; however, it takes a
:meth:`pyspark.sql.functions.pandas_udf` whereas
:meth:`pyspark.sql.GroupedData.applyInPandas` takes a Python native function.
.. versionadded:: 2.3.0
Parameters
----------
udf : :func:`pyspark.sql.functions.pandas_udf`
a grouped map user-defined function returned by
:func:`pyspark.sql.functions.pandas_udf`.
Notes
-----
It is preferred to use :meth:`pyspark.sql.GroupedData.applyInPandas` over this
API. This API will be deprecated in the future releases.
Examples
--------
>>> from pyspark.sql.functions import pandas_udf, PandasUDFType
>>> df = spark.createDataFrame(
... [(1, 1.0), (1, 2.0), (2, 3.0), (2, 5.0), (2, 10.0)],
... ("id", "v"))
>>> @pandas_udf("id long, v double", PandasUDFType.GROUPED_MAP) # doctest: +SKIP
... def normalize(pdf):
... v = pdf.v
... return pdf.assign(v=(v - v.mean()) / v.std())
>>> df.groupby("id").apply(normalize).show() # doctest: +SKIP
+---+-------------------+
| id| v|
+---+-------------------+
| 1|-0.7071067811865475|
| 1| 0.7071067811865475|
| 2|-0.8320502943378437|
| 2|-0.2773500981126146|
| 2| 1.1094003924504583|
+---+-------------------+
See Also
--------
pyspark.sql.functions.pandas_udf
"""
# Columns are special because hasattr always return True
if isinstance(udf, Column) or not hasattr(udf, 'func') \
or udf.evalType != PythonEvalType.SQL_GROUPED_MAP_PANDAS_UDF:
raise ValueError("Invalid udf: the udf argument must be a pandas_udf of type "
"GROUPED_MAP.")
warnings.warn(
"It is preferred to use 'applyInPandas' over this "
"API. This API will be deprecated in the future releases. See SPARK-28264 for "
"more details.", UserWarning)
return self.applyInPandas(udf.func, schema=udf.returnType)
def applyInPandas(self, func, schema):
"""
Maps each group of the current :class:`DataFrame` using a pandas udf and returns the result
as a `DataFrame`.
The function should take a `pandas.DataFrame` and return another
`pandas.DataFrame`. For each group, all columns are passed together as a `pandas.DataFrame`
to the user-function and the returned `pandas.DataFrame` are combined as a
:class:`DataFrame`.
The `schema` should be a :class:`StructType` describing the schema of the returned
`pandas.DataFrame`. The column labels of the returned `pandas.DataFrame` must either match
the field names in the defined schema if specified as strings, or match the
field data types by position if not strings, e.g. integer indices.
The length of the returned `pandas.DataFrame` can be arbitrary.
.. versionadded:: 3.0.0
Parameters
----------
func : function
a Python native function that takes a `pandas.DataFrame`, and outputs a
`pandas.DataFrame`.
schema : :class:`pyspark.sql.types.DataType` or str
the return type of the `func` in PySpark. The value can be either a
:class:`pyspark.sql.types.DataType` object or a DDL-formatted type string.
Examples
--------
>>> import pandas as pd # doctest: +SKIP
>>> from pyspark.sql.functions import pandas_udf, ceil
>>> df = spark.createDataFrame(
... [(1, 1.0), (1, 2.0), (2, 3.0), (2, 5.0), (2, 10.0)],
... ("id", "v")) # doctest: +SKIP
>>> def normalize(pdf):
... v = pdf.v
... return pdf.assign(v=(v - v.mean()) / v.std())
>>> df.groupby("id").applyInPandas(
... normalize, schema="id long, v double").show() # doctest: +SKIP
+---+-------------------+
| id| v|
+---+-------------------+
| 1|-0.7071067811865475|
| 1| 0.7071067811865475|
| 2|-0.8320502943378437|
| 2|-0.2773500981126146|
| 2| 1.1094003924504583|
+---+-------------------+
Alternatively, the user can pass a function that takes two arguments.
In this case, the grouping key(s) will be passed as the first argument and the data will
be passed as the second argument. The grouping key(s) will be passed as a tuple of numpy
data types, e.g., `numpy.int32` and `numpy.float64`. The data will still be passed in
as a `pandas.DataFrame` containing all columns from the original Spark DataFrame.
This is useful when the user does not want to hardcode grouping key(s) in the function.
>>> df = spark.createDataFrame(
... [(1, 1.0), (1, 2.0), (2, 3.0), (2, 5.0), (2, 10.0)],
... ("id", "v")) # doctest: +SKIP
>>> def mean_func(key, pdf):
... # key is a tuple of one numpy.int64, which is the value
... # of 'id' for the current group
... return pd.DataFrame([key + (pdf.v.mean(),)])
>>> df.groupby('id').applyInPandas(
... mean_func, schema="id long, v double").show() # doctest: +SKIP
+---+---+
| id| v|
+---+---+
| 1|1.5|
| 2|6.0|
+---+---+
>>> def sum_func(key, pdf):
... # key is a tuple of two numpy.int64s, which is the values
... # of 'id' and 'ceil(df.v / 2)' for the current group
... return pd.DataFrame([key + (pdf.v.sum(),)])
>>> df.groupby(df.id, ceil(df.v / 2)).applyInPandas(
... sum_func, schema="id long, `ceil(v / 2)` long, v double").show() # doctest: +SKIP
+---+-----------+----+
| id|ceil(v / 2)| v|
+---+-----------+----+
| 2| 5|10.0|
| 1| 1| 3.0|
| 2| 3| 5.0|
| 2| 2| 3.0|
+---+-----------+----+
Notes
-----
This function requires a full shuffle. All the data of a group will be loaded
into memory, so the user should be aware of the potential OOM risk if data is skewed
and certain groups are too large to fit in memory.
If returning a new `pandas.DataFrame` constructed with a dictionary, it is
recommended to explicitly index the columns by name to ensure the positions are correct,
or alternatively use an `OrderedDict`.
For example, `pd.DataFrame({'id': ids, 'a': data}, columns=['id', 'a'])` or
`pd.DataFrame(OrderedDict([('id', ids), ('a', data)]))`.
This API is experimental.
See Also
--------
pyspark.sql.functions.pandas_udf
"""
from pyspark.sql import GroupedData
from pyspark.sql.functions import pandas_udf, PandasUDFType
assert isinstance(self, GroupedData)
udf = pandas_udf(
func, returnType=schema, functionType=PandasUDFType.GROUPED_MAP)
df = self._df
udf_column = udf(*[df[col] for col in df.columns])
jdf = self._jgd.flatMapGroupsInPandas(udf_column._jc.expr())
return DataFrame(jdf, self.sql_ctx)
def cogroup(self, other):
"""
Cogroups this group with another group so that we can run cogrouped operations.
.. versionadded:: 3.0.0
See :class:`PandasCogroupedOps` for the operations that can be run.
"""
from pyspark.sql import GroupedData
assert isinstance(self, GroupedData)
return PandasCogroupedOps(self, other)
class PandasCogroupedOps(object):
"""
A logical grouping of two :class:`GroupedData`,
created by :func:`GroupedData.cogroup`.
.. versionadded:: 3.0.0
Notes
-----
This API is experimental.
"""
def __init__(self, gd1, gd2):
self._gd1 = gd1
self._gd2 = gd2
self.sql_ctx = gd1.sql_ctx
def applyInPandas(self, func, schema):
"""
Applies a function to each cogroup using pandas and returns the result
as a `DataFrame`.
The function should take two `pandas.DataFrame`\\s and return another
`pandas.DataFrame`. For each side of the cogroup, all columns are passed together as a
`pandas.DataFrame` to the user-function and the returned `pandas.DataFrame` are combined as
a :class:`DataFrame`.
The `schema` should be a :class:`StructType` describing the schema of the returned
`pandas.DataFrame`. The column labels of the returned `pandas.DataFrame` must either match
the field names in the defined schema if specified as strings, or match the
field data types by position if not strings, e.g. integer indices.
The length of the returned `pandas.DataFrame` can be arbitrary.
.. versionadded:: 3.0.0
Parameters
----------
func : function
a Python native function that takes two `pandas.DataFrame`\\s, and
outputs a `pandas.DataFrame`, or that takes one tuple (grouping keys) and two
pandas ``DataFrame``\\s, and outputs a pandas ``DataFrame``.
schema : :class:`pyspark.sql.types.DataType` or str
the return type of the `func` in PySpark. The value can be either a
:class:`pyspark.sql.types.DataType` object or a DDL-formatted type string.
Examples
--------
>>> from pyspark.sql.functions import pandas_udf
>>> df1 = spark.createDataFrame(
... [(20000101, 1, 1.0), (20000101, 2, 2.0), (20000102, 1, 3.0), (20000102, 2, 4.0)],
... ("time", "id", "v1"))
>>> df2 = spark.createDataFrame(
... [(20000101, 1, "x"), (20000101, 2, "y")],
... ("time", "id", "v2"))
>>> def asof_join(l, r):
... return pd.merge_asof(l, r, on="time", by="id")
>>> df1.groupby("id").cogroup(df2.groupby("id")).applyInPandas(
... asof_join, schema="time int, id int, v1 double, v2 string"
... ).show() # doctest: +SKIP
+--------+---+---+---+
| time| id| v1| v2|
+--------+---+---+---+
|20000101| 1|1.0| x|
|20000102| 1|3.0| x|
|20000101| 2|2.0| y|
|20000102| 2|4.0| y|
+--------+---+---+---+
Alternatively, the user can define a function that takes three arguments. In this case,
the grouping key(s) will be passed as the first argument and the data will be passed as the
second and third arguments. The grouping key(s) will be passed as a tuple of numpy data
types, e.g., `numpy.int32` and `numpy.float64`. The data will still be passed in as two
`pandas.DataFrame` containing all columns from the original Spark DataFrames.
>>> def asof_join(k, l, r):
... if k == (1,):
... return pd.merge_asof(l, r, on="time", by="id")
... else:
... return pd.DataFrame(columns=['time', 'id', 'v1', 'v2'])
>>> df1.groupby("id").cogroup(df2.groupby("id")).applyInPandas(
... asof_join, "time int, id int, v1 double, v2 string").show() # doctest: +SKIP
+--------+---+---+---+
| time| id| v1| v2|
+--------+---+---+---+
|20000101| 1|1.0| x|
|20000102| 1|3.0| x|
+--------+---+---+---+
Notes
-----
This function requires a full shuffle. All the data of a cogroup will be loaded
into memory, so the user should be aware of the potential OOM risk if data is skewed
and certain groups are too large to fit in memory.
If returning a new `pandas.DataFrame` constructed with a dictionary, it is
recommended to explicitly index the columns by name to ensure the positions are correct,
or alternatively use an `OrderedDict`.
For example, `pd.DataFrame({'id': ids, 'a': data}, columns=['id', 'a'])` or
`pd.DataFrame(OrderedDict([('id', ids), ('a', data)]))`.
This API is experimental.
See Also
--------
pyspark.sql.functions.pandas_udf
"""
from pyspark.sql.pandas.functions import pandas_udf
udf = pandas_udf(
func, returnType=schema, functionType=PythonEvalType.SQL_COGROUPED_MAP_PANDAS_UDF)
all_cols = self._extract_cols(self._gd1) + self._extract_cols(self._gd2)
udf_column = udf(*all_cols)
jdf = self._gd1._jgd.flatMapCoGroupsInPandas(self._gd2._jgd, udf_column._jc.expr())
return DataFrame(jdf, self.sql_ctx)
@staticmethod
def _extract_cols(gd):
df = gd._df
return [df[col] for col in df.columns]
def _test():
import doctest
from pyspark.sql import SparkSession
import pyspark.sql.pandas.group_ops
globs = pyspark.sql.pandas.group_ops.__dict__.copy()
spark = SparkSession.builder\
.master("local[4]")\
.appName("sql.pandas.group tests")\
.getOrCreate()
globs['spark'] = spark
(failure_count, test_count) = doctest.testmod(
pyspark.sql.pandas.group_ops, globs=globs,
optionflags=doctest.ELLIPSIS | doctest.NORMALIZE_WHITESPACE | doctest.REPORT_NDIFF)
spark.stop()
if failure_count:
sys.exit(-1)
if __name__ == "__main__":
_test()
| apache-2.0 |
hlin117/scikit-learn | examples/classification/plot_lda_qda.py | 32 | 5381 | """
====================================================================
Linear and Quadratic Discriminant Analysis with covariance ellipsoid
====================================================================
This example plots the covariance ellipsoids of each class and
decision boundary learned by LDA and QDA. The ellipsoids display
the double standard deviation for each class. With LDA, the
standard deviation is the same for all the classes, while each
class has its own standard deviation with QDA.
"""
print(__doc__)
from scipy import linalg
import numpy as np
import matplotlib.pyplot as plt
import matplotlib as mpl
from matplotlib import colors
from sklearn.discriminant_analysis import LinearDiscriminantAnalysis
from sklearn.discriminant_analysis import QuadraticDiscriminantAnalysis
###############################################################################
# colormap
cmap = colors.LinearSegmentedColormap(
'red_blue_classes',
{'red': [(0, 1, 1), (1, 0.7, 0.7)],
'green': [(0, 0.7, 0.7), (1, 0.7, 0.7)],
'blue': [(0, 0.7, 0.7), (1, 1, 1)]})
plt.cm.register_cmap(cmap=cmap)
###############################################################################
# generate datasets
def dataset_fixed_cov():
'''Generate 2 Gaussians samples with the same covariance matrix'''
n, dim = 300, 2
np.random.seed(0)
C = np.array([[0., -0.23], [0.83, .23]])
X = np.r_[np.dot(np.random.randn(n, dim), C),
np.dot(np.random.randn(n, dim), C) + np.array([1, 1])]
y = np.hstack((np.zeros(n), np.ones(n)))
return X, y
def dataset_cov():
'''Generate 2 Gaussians samples with different covariance matrices'''
n, dim = 300, 2
np.random.seed(0)
C = np.array([[0., -1.], [2.5, .7]]) * 2.
X = np.r_[np.dot(np.random.randn(n, dim), C),
np.dot(np.random.randn(n, dim), C.T) + np.array([1, 4])]
y = np.hstack((np.zeros(n), np.ones(n)))
return X, y
###############################################################################
# plot functions
def plot_data(lda, X, y, y_pred, fig_index):
splot = plt.subplot(2, 2, fig_index)
if fig_index == 1:
plt.title('Linear Discriminant Analysis')
plt.ylabel('Data with fixed covariance')
elif fig_index == 2:
plt.title('Quadratic Discriminant Analysis')
elif fig_index == 3:
plt.ylabel('Data with varying covariances')
tp = (y == y_pred) # True Positive
tp0, tp1 = tp[y == 0], tp[y == 1]
X0, X1 = X[y == 0], X[y == 1]
X0_tp, X0_fp = X0[tp0], X0[~tp0]
X1_tp, X1_fp = X1[tp1], X1[~tp1]
alpha = 0.5
# class 0: dots
plt.plot(X0_tp[:, 0], X0_tp[:, 1], 'o', alpha=alpha,
color='red')
plt.plot(X0_fp[:, 0], X0_fp[:, 1], '*', alpha=alpha,
color='#990000') # dark red
# class 1: dots
plt.plot(X1_tp[:, 0], X1_tp[:, 1], 'o', alpha=alpha,
color='blue')
plt.plot(X1_fp[:, 0], X1_fp[:, 1], '*', alpha=alpha,
color='#000099') # dark blue
# class 0 and 1 : areas
nx, ny = 200, 100
x_min, x_max = plt.xlim()
y_min, y_max = plt.ylim()
xx, yy = np.meshgrid(np.linspace(x_min, x_max, nx),
np.linspace(y_min, y_max, ny))
Z = lda.predict_proba(np.c_[xx.ravel(), yy.ravel()])
Z = Z[:, 1].reshape(xx.shape)
plt.pcolormesh(xx, yy, Z, cmap='red_blue_classes',
norm=colors.Normalize(0., 1.))
plt.contour(xx, yy, Z, [0.5], linewidths=2., colors='k')
# means
plt.plot(lda.means_[0][0], lda.means_[0][1],
'o', color='black', markersize=10)
plt.plot(lda.means_[1][0], lda.means_[1][1],
'o', color='black', markersize=10)
return splot
def plot_ellipse(splot, mean, cov, color):
v, w = linalg.eigh(cov)
u = w[0] / linalg.norm(w[0])
angle = np.arctan(u[1] / u[0])
angle = 180 * angle / np.pi # convert to degrees
# filled Gaussian at 2 standard deviation
ell = mpl.patches.Ellipse(mean, 2 * v[0] ** 0.5, 2 * v[1] ** 0.5,
180 + angle, facecolor=color, edgecolor='yellow',
linewidth=2, zorder=2)
ell.set_clip_box(splot.bbox)
ell.set_alpha(0.5)
splot.add_artist(ell)
splot.set_xticks(())
splot.set_yticks(())
def plot_lda_cov(lda, splot):
plot_ellipse(splot, lda.means_[0], lda.covariance_, 'red')
plot_ellipse(splot, lda.means_[1], lda.covariance_, 'blue')
def plot_qda_cov(qda, splot):
plot_ellipse(splot, qda.means_[0], qda.covariances_[0], 'red')
plot_ellipse(splot, qda.means_[1], qda.covariances_[1], 'blue')
###############################################################################
for i, (X, y) in enumerate([dataset_fixed_cov(), dataset_cov()]):
# Linear Discriminant Analysis
lda = LinearDiscriminantAnalysis(solver="svd", store_covariance=True)
y_pred = lda.fit(X, y).predict(X)
splot = plot_data(lda, X, y, y_pred, fig_index=2 * i + 1)
plot_lda_cov(lda, splot)
plt.axis('tight')
# Quadratic Discriminant Analysis
qda = QuadraticDiscriminantAnalysis(store_covariances=True)
y_pred = qda.fit(X, y).predict(X)
splot = plot_data(qda, X, y, y_pred, fig_index=2 * i + 2)
plot_qda_cov(qda, splot)
plt.axis('tight')
plt.suptitle('Linear Discriminant Analysis vs Quadratic Discriminant Analysis')
plt.show()
| bsd-3-clause |
dspaccapeli/bus-arrival | visualization/plot_distribution.py | 1 | 5860 | #!/usr/bin/env python2
# -*- coding: utf-8 -*-
"""
Description:
Plot the delay and pause distribution
eliminating the outliers in pandas
to visualize a normalized and
understandable/workable plot.
@author: dspaccapeli
"""
#imports to manage the sql db
import sqlite3 as lite
import pandas as pd
#to make the plot show-up from command line
import matplotlib.pyplot as plt
import matplotlib.patches as mpatches
#connect to the database
db_connection = lite.connect('DATABASE_PATH')
#open the cursor to start querying the database - read ops
read_curs = db_connection.cursor()
route_id = 2550
#select all infos for stop equals _n_
df = pd.read_sql_query("SELECT * FROM hsl WHERE route_id=%s" % (route_id), db_connection)
#select column to plot as series
delay = df['delay']
pause = df['pause']
#declare figure to show
plt.figure(1)
#start delay plot
plt.subplot(211)
#-----------------------------------------------------------------------------#
# OUTLIER DETECTION #
# note that substracting the mean centres the data #
# OLD AND PROBABLY HAS ERRORS #
#-----------------------------------------------------------------------------#
#consider all the points that go further than 3.5 std from the mean as outliers
d_outlier = delay[~((delay-delay.mean()).abs()>3*delay.std())]
#d_outlier.rename('DELAY FOR ROUTE_ID = %s, NO OUTLIERS' % route_id);
"""
# ALTERNATIVE METHOD
#-----------------------------------------------------------------------------#
# TUCKEY'S TEST #
# https://www.jstor.org/stable/2289073?seq=8#page_scan_tab_contents #
# (!) w/ k=2.28 & formula~ [q_1 -k(q_3 -q_1), q_3 +k(q_3 -q_1)] #
#-----------------------------------------------------------------------------#
k = 1.5
#-----------------------------------------------------------------------------#
#-----------------------------------------------------------------------------#
#--------------------------- DELAY -----------------------------------#
#-----------------------------------------------------------------------------#
#-----------------------------------------------------------------------------#
q_1 = delay.quantile(q=0.25)
q_3 = delay.quantile(q=0.75)
mean = delay.mean()
print "delay q_1 is " + str(q_1)
print "delay q_3 is " + str(q_3)
print "delay mean is " + str(mean)
#remove the data that is out of the Tuckey's range
d_outlier = delay[~(delay-mean <= q_1-k*(q_3-q_1))]
print "d_outlier size pre-skim: " + str(d_outlier.size)
d_outlier = d_outlier[~(d_outlier-mean >= q_3+k*(q_3-q_1))]
print "d_outlier size post-skim: " + str(d_outlier.size)
"""
#plot as a distribution
#d_outlier.plot(kind='kde', title="DELAY FOR ROUTE_ID = %s" % route_id)
#sns.distplot(d_outlier, rug=True, hist=False);
plt.hist(d_outlier, bins=30, histtype='step')
plt.title('Delay distribution for route_id = %s' % route_id)
#show the median and mean on the plot
plt.axvline(d_outlier.mean(), color='k', linestyle='solid')
plt.axvline(d_outlier.median(), color='r', linestyle='dashed')
#display the legend for subplot(1)
MN = mpatches.Patch(color='black', label='Mean')
MD = mpatches.Patch(color='red', label='Median')
plt.legend(handles=[MN, MD], loc='upper right')
#-----------------------------------------------------------------------------#
#-----------------------------------------------------------------------------#
#--------------------------- PAUSE -----------------------------------#
#-----------------------------------------------------------------------------#
#-----------------------------------------------------------------------------#
#start pause plot
plt.subplot(212)
#-----------------------------------------------------------------------------#
# OUTLIER DETECTION #
# note that substracting the mean centres the data #
# OLD AND PROBABLY HAS INCONSISTENCIES #
#-----------------------------------------------------------------------------#
p_outlier = pause[~((pause-pause.mean()).abs()>3*pause.std())]
#pause.plot(kind='kde', title="DELAY FOR STOP_ID = %s" % stop_id)
"""
# AGAIN ALTERNATIVE METHOD
q_1 = pause.quantile(q=0.25)
q_3 = pause.quantile(q=0.75)
mean = pause.mean()
print "pause q_1 is " + str(q_1)
print "pause q_3 is " + str(q_3)
print "pause mean is " + str(mean)
p_outlier = pause[~(pause-mean <= q_1-k*(q_3-q_1))]
print "p_outlier size pre-skim: " + str(p_outlier.size)
p_outlier = p_outlier[~(p_outlier-mean >= q_3+k*(q_3-q_1))]
print "p_outlier size post-skim: " + str(p_outlier.size)
#plot as a distribution
p_outlier.plot(kind='kde', title="PAUSE FOR STOP_ID = 1204101")
#show the median and mean on the plot
plt.axvline(p_outlier.mean(), color='k', linestyle='solid')
plt.axvline(p_outlier.median(), color='r', linestyle='dashed')
print(p_outlier.max())
print(p_outlier.count())
"""
#display the legend for subplot(2)
#PROBABLY REDUNDANT, you could use the old one
#MN_P = mpatches.Patch(color='black', label='Mean')
#MD_P = mpatches.Patch(color='red', label='Median')
#delay.rename("DELAY FOR ROUTE_ID = %s" % route_id);
#p_outlier.plot(kind='kde', title="PAUSE FOR ROUTE_ID = %s" % route_id)
#sns.distplot(delay, rug=True, hist=False);
plt.hist(p_outlier, bins=30, histtype='step')
plt.title('Pause distribution for route_id = %s' % route_id)
plt.legend(handles=[MN, MD], loc='upper right')
#show the median and mean on the plot
plt.axvline(p_outlier.mean(), color='k', linestyle='solid')
plt.axvline(p_outlier.median(), color='r', linestyle='dashed')
#let it show
plt.show()
| gpl-3.0 |
mojoboss/scikit-learn | sklearn/tests/test_naive_bayes.py | 142 | 17496 | import pickle
from io import BytesIO
import numpy as np
import scipy.sparse
from sklearn.datasets import load_digits, load_iris
from sklearn.cross_validation import cross_val_score, train_test_split
from sklearn.externals.six.moves import zip
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_greater
from sklearn.naive_bayes import GaussianNB, BernoulliNB, MultinomialNB
# Data is just 6 separable points in the plane
X = np.array([[-2, -1], [-1, -1], [-1, -2], [1, 1], [1, 2], [2, 1]])
y = np.array([1, 1, 1, 2, 2, 2])
# A bit more random tests
rng = np.random.RandomState(0)
X1 = rng.normal(size=(10, 3))
y1 = (rng.normal(size=(10)) > 0).astype(np.int)
# Data is 6 random integer points in a 100 dimensional space classified to
# three classes.
X2 = rng.randint(5, size=(6, 100))
y2 = np.array([1, 1, 2, 2, 3, 3])
def test_gnb():
# Gaussian Naive Bayes classification.
# This checks that GaussianNB implements fit and predict and returns
# correct values for a simple toy dataset.
clf = GaussianNB()
y_pred = clf.fit(X, y).predict(X)
assert_array_equal(y_pred, y)
y_pred_proba = clf.predict_proba(X)
y_pred_log_proba = clf.predict_log_proba(X)
assert_array_almost_equal(np.log(y_pred_proba), y_pred_log_proba, 8)
# Test whether label mismatch between target y and classes raises
# an Error
# FIXME Remove this test once the more general partial_fit tests are merged
assert_raises(ValueError, GaussianNB().partial_fit, X, y, classes=[0, 1])
def test_gnb_prior():
# Test whether class priors are properly set.
clf = GaussianNB().fit(X, y)
assert_array_almost_equal(np.array([3, 3]) / 6.0,
clf.class_prior_, 8)
clf.fit(X1, y1)
# Check that the class priors sum to 1
assert_array_almost_equal(clf.class_prior_.sum(), 1)
def test_gnb_sample_weight():
"""Test whether sample weights are properly used in GNB. """
# Sample weights all being 1 should not change results
sw = np.ones(6)
clf = GaussianNB().fit(X, y)
clf_sw = GaussianNB().fit(X, y, sw)
assert_array_almost_equal(clf.theta_, clf_sw.theta_)
assert_array_almost_equal(clf.sigma_, clf_sw.sigma_)
# Fitting twice with half sample-weights should result
# in same result as fitting once with full weights
sw = rng.rand(y.shape[0])
clf1 = GaussianNB().fit(X, y, sample_weight=sw)
clf2 = GaussianNB().partial_fit(X, y, classes=[1, 2], sample_weight=sw / 2)
clf2.partial_fit(X, y, sample_weight=sw / 2)
assert_array_almost_equal(clf1.theta_, clf2.theta_)
assert_array_almost_equal(clf1.sigma_, clf2.sigma_)
# Check that duplicate entries and correspondingly increased sample
# weights yield the same result
ind = rng.randint(0, X.shape[0], 20)
sample_weight = np.bincount(ind, minlength=X.shape[0])
clf_dupl = GaussianNB().fit(X[ind], y[ind])
clf_sw = GaussianNB().fit(X, y, sample_weight)
assert_array_almost_equal(clf_dupl.theta_, clf_sw.theta_)
assert_array_almost_equal(clf_dupl.sigma_, clf_sw.sigma_)
def test_discrete_prior():
# Test whether class priors are properly set.
for cls in [BernoulliNB, MultinomialNB]:
clf = cls().fit(X2, y2)
assert_array_almost_equal(np.log(np.array([2, 2, 2]) / 6.0),
clf.class_log_prior_, 8)
def test_mnnb():
# Test Multinomial Naive Bayes classification.
# This checks that MultinomialNB implements fit and predict and returns
# correct values for a simple toy dataset.
for X in [X2, scipy.sparse.csr_matrix(X2)]:
# Check the ability to predict the learning set.
clf = MultinomialNB()
assert_raises(ValueError, clf.fit, -X, y2)
y_pred = clf.fit(X, y2).predict(X)
assert_array_equal(y_pred, y2)
# Verify that np.log(clf.predict_proba(X)) gives the same results as
# clf.predict_log_proba(X)
y_pred_proba = clf.predict_proba(X)
y_pred_log_proba = clf.predict_log_proba(X)
assert_array_almost_equal(np.log(y_pred_proba), y_pred_log_proba, 8)
# Check that incremental fitting yields the same results
clf2 = MultinomialNB()
clf2.partial_fit(X[:2], y2[:2], classes=np.unique(y2))
clf2.partial_fit(X[2:5], y2[2:5])
clf2.partial_fit(X[5:], y2[5:])
y_pred2 = clf2.predict(X)
assert_array_equal(y_pred2, y2)
y_pred_proba2 = clf2.predict_proba(X)
y_pred_log_proba2 = clf2.predict_log_proba(X)
assert_array_almost_equal(np.log(y_pred_proba2), y_pred_log_proba2, 8)
assert_array_almost_equal(y_pred_proba2, y_pred_proba)
assert_array_almost_equal(y_pred_log_proba2, y_pred_log_proba)
# Partial fit on the whole data at once should be the same as fit too
clf3 = MultinomialNB()
clf3.partial_fit(X, y2, classes=np.unique(y2))
y_pred3 = clf3.predict(X)
assert_array_equal(y_pred3, y2)
y_pred_proba3 = clf3.predict_proba(X)
y_pred_log_proba3 = clf3.predict_log_proba(X)
assert_array_almost_equal(np.log(y_pred_proba3), y_pred_log_proba3, 8)
assert_array_almost_equal(y_pred_proba3, y_pred_proba)
assert_array_almost_equal(y_pred_log_proba3, y_pred_log_proba)
def check_partial_fit(cls):
clf1 = cls()
clf1.fit([[0, 1], [1, 0]], [0, 1])
clf2 = cls()
clf2.partial_fit([[0, 1], [1, 0]], [0, 1], classes=[0, 1])
assert_array_equal(clf1.class_count_, clf2.class_count_)
assert_array_equal(clf1.feature_count_, clf2.feature_count_)
clf3 = cls()
clf3.partial_fit([[0, 1]], [0], classes=[0, 1])
clf3.partial_fit([[1, 0]], [1])
assert_array_equal(clf1.class_count_, clf3.class_count_)
assert_array_equal(clf1.feature_count_, clf3.feature_count_)
def test_discretenb_partial_fit():
for cls in [MultinomialNB, BernoulliNB]:
yield check_partial_fit, cls
def test_gnb_partial_fit():
clf = GaussianNB().fit(X, y)
clf_pf = GaussianNB().partial_fit(X, y, np.unique(y))
assert_array_almost_equal(clf.theta_, clf_pf.theta_)
assert_array_almost_equal(clf.sigma_, clf_pf.sigma_)
assert_array_almost_equal(clf.class_prior_, clf_pf.class_prior_)
clf_pf2 = GaussianNB().partial_fit(X[0::2, :], y[0::2], np.unique(y))
clf_pf2.partial_fit(X[1::2], y[1::2])
assert_array_almost_equal(clf.theta_, clf_pf2.theta_)
assert_array_almost_equal(clf.sigma_, clf_pf2.sigma_)
assert_array_almost_equal(clf.class_prior_, clf_pf2.class_prior_)
def test_discretenb_pickle():
# Test picklability of discrete naive Bayes classifiers
for cls in [BernoulliNB, MultinomialNB, GaussianNB]:
clf = cls().fit(X2, y2)
y_pred = clf.predict(X2)
store = BytesIO()
pickle.dump(clf, store)
clf = pickle.load(BytesIO(store.getvalue()))
assert_array_equal(y_pred, clf.predict(X2))
if cls is not GaussianNB:
# TODO re-enable me when partial_fit is implemented for GaussianNB
# Test pickling of estimator trained with partial_fit
clf2 = cls().partial_fit(X2[:3], y2[:3], classes=np.unique(y2))
clf2.partial_fit(X2[3:], y2[3:])
store = BytesIO()
pickle.dump(clf2, store)
clf2 = pickle.load(BytesIO(store.getvalue()))
assert_array_equal(y_pred, clf2.predict(X2))
def test_input_check_fit():
# Test input checks for the fit method
for cls in [BernoulliNB, MultinomialNB, GaussianNB]:
# check shape consistency for number of samples at fit time
assert_raises(ValueError, cls().fit, X2, y2[:-1])
# check shape consistency for number of input features at predict time
clf = cls().fit(X2, y2)
assert_raises(ValueError, clf.predict, X2[:, :-1])
def test_input_check_partial_fit():
for cls in [BernoulliNB, MultinomialNB]:
# check shape consistency
assert_raises(ValueError, cls().partial_fit, X2, y2[:-1],
classes=np.unique(y2))
# classes is required for first call to partial fit
assert_raises(ValueError, cls().partial_fit, X2, y2)
# check consistency of consecutive classes values
clf = cls()
clf.partial_fit(X2, y2, classes=np.unique(y2))
assert_raises(ValueError, clf.partial_fit, X2, y2,
classes=np.arange(42))
# check consistency of input shape for partial_fit
assert_raises(ValueError, clf.partial_fit, X2[:, :-1], y2)
# check consistency of input shape for predict
assert_raises(ValueError, clf.predict, X2[:, :-1])
def test_discretenb_predict_proba():
# Test discrete NB classes' probability scores
# The 100s below distinguish Bernoulli from multinomial.
# FIXME: write a test to show this.
X_bernoulli = [[1, 100, 0], [0, 1, 0], [0, 100, 1]]
X_multinomial = [[0, 1], [1, 3], [4, 0]]
# test binary case (1-d output)
y = [0, 0, 2] # 2 is regression test for binary case, 02e673
for cls, X in zip([BernoulliNB, MultinomialNB],
[X_bernoulli, X_multinomial]):
clf = cls().fit(X, y)
assert_equal(clf.predict(X[-1]), 2)
assert_equal(clf.predict_proba(X[0]).shape, (1, 2))
assert_array_almost_equal(clf.predict_proba(X[:2]).sum(axis=1),
np.array([1., 1.]), 6)
# test multiclass case (2-d output, must sum to one)
y = [0, 1, 2]
for cls, X in zip([BernoulliNB, MultinomialNB],
[X_bernoulli, X_multinomial]):
clf = cls().fit(X, y)
assert_equal(clf.predict_proba(X[0]).shape, (1, 3))
assert_equal(clf.predict_proba(X[:2]).shape, (2, 3))
assert_almost_equal(np.sum(clf.predict_proba(X[1])), 1)
assert_almost_equal(np.sum(clf.predict_proba(X[-1])), 1)
assert_almost_equal(np.sum(np.exp(clf.class_log_prior_)), 1)
assert_almost_equal(np.sum(np.exp(clf.intercept_)), 1)
def test_discretenb_uniform_prior():
# Test whether discrete NB classes fit a uniform prior
# when fit_prior=False and class_prior=None
for cls in [BernoulliNB, MultinomialNB]:
clf = cls()
clf.set_params(fit_prior=False)
clf.fit([[0], [0], [1]], [0, 0, 1])
prior = np.exp(clf.class_log_prior_)
assert_array_equal(prior, np.array([.5, .5]))
def test_discretenb_provide_prior():
# Test whether discrete NB classes use provided prior
for cls in [BernoulliNB, MultinomialNB]:
clf = cls(class_prior=[0.5, 0.5])
clf.fit([[0], [0], [1]], [0, 0, 1])
prior = np.exp(clf.class_log_prior_)
assert_array_equal(prior, np.array([.5, .5]))
# Inconsistent number of classes with prior
assert_raises(ValueError, clf.fit, [[0], [1], [2]], [0, 1, 2])
assert_raises(ValueError, clf.partial_fit, [[0], [1]], [0, 1],
classes=[0, 1, 1])
def test_discretenb_provide_prior_with_partial_fit():
# Test whether discrete NB classes use provided prior
# when using partial_fit
iris = load_iris()
iris_data1, iris_data2, iris_target1, iris_target2 = train_test_split(
iris.data, iris.target, test_size=0.4, random_state=415)
for cls in [BernoulliNB, MultinomialNB]:
for prior in [None, [0.3, 0.3, 0.4]]:
clf_full = cls(class_prior=prior)
clf_full.fit(iris.data, iris.target)
clf_partial = cls(class_prior=prior)
clf_partial.partial_fit(iris_data1, iris_target1,
classes=[0, 1, 2])
clf_partial.partial_fit(iris_data2, iris_target2)
assert_array_almost_equal(clf_full.class_log_prior_,
clf_partial.class_log_prior_)
def test_sample_weight_multiclass():
for cls in [BernoulliNB, MultinomialNB]:
# check shape consistency for number of samples at fit time
yield check_sample_weight_multiclass, cls
def check_sample_weight_multiclass(cls):
X = [
[0, 0, 1],
[0, 1, 1],
[0, 1, 1],
[1, 0, 0],
]
y = [0, 0, 1, 2]
sample_weight = np.array([1, 1, 2, 2], dtype=np.float)
sample_weight /= sample_weight.sum()
clf = cls().fit(X, y, sample_weight=sample_weight)
assert_array_equal(clf.predict(X), [0, 1, 1, 2])
# Check sample weight using the partial_fit method
clf = cls()
clf.partial_fit(X[:2], y[:2], classes=[0, 1, 2],
sample_weight=sample_weight[:2])
clf.partial_fit(X[2:3], y[2:3], sample_weight=sample_weight[2:3])
clf.partial_fit(X[3:], y[3:], sample_weight=sample_weight[3:])
assert_array_equal(clf.predict(X), [0, 1, 1, 2])
def test_sample_weight_mnb():
clf = MultinomialNB()
clf.fit([[1, 2], [1, 2], [1, 0]],
[0, 0, 1],
sample_weight=[1, 1, 4])
assert_array_equal(clf.predict([1, 0]), [1])
positive_prior = np.exp(clf.intercept_[0])
assert_array_almost_equal([1 - positive_prior, positive_prior],
[1 / 3., 2 / 3.])
def test_coef_intercept_shape():
# coef_ and intercept_ should have shapes as in other linear models.
# Non-regression test for issue #2127.
X = [[1, 0, 0], [1, 1, 1]]
y = [1, 2] # binary classification
for clf in [MultinomialNB(), BernoulliNB()]:
clf.fit(X, y)
assert_equal(clf.coef_.shape, (1, 3))
assert_equal(clf.intercept_.shape, (1,))
def test_check_accuracy_on_digits():
# Non regression test to make sure that any further refactoring / optim
# of the NB models do not harm the performance on a slightly non-linearly
# separable dataset
digits = load_digits()
X, y = digits.data, digits.target
binary_3v8 = np.logical_or(digits.target == 3, digits.target == 8)
X_3v8, y_3v8 = X[binary_3v8], y[binary_3v8]
# Multinomial NB
scores = cross_val_score(MultinomialNB(alpha=10), X, y, cv=10)
assert_greater(scores.mean(), 0.86)
scores = cross_val_score(MultinomialNB(alpha=10), X_3v8, y_3v8, cv=10)
assert_greater(scores.mean(), 0.94)
# Bernoulli NB
scores = cross_val_score(BernoulliNB(alpha=10), X > 4, y, cv=10)
assert_greater(scores.mean(), 0.83)
scores = cross_val_score(BernoulliNB(alpha=10), X_3v8 > 4, y_3v8, cv=10)
assert_greater(scores.mean(), 0.92)
# Gaussian NB
scores = cross_val_score(GaussianNB(), X, y, cv=10)
assert_greater(scores.mean(), 0.77)
scores = cross_val_score(GaussianNB(), X_3v8, y_3v8, cv=10)
assert_greater(scores.mean(), 0.86)
def test_feature_log_prob_bnb():
# Test for issue #4268.
# Tests that the feature log prob value computed by BernoulliNB when
# alpha=1.0 is equal to the expression given in Manning, Raghavan,
# and Schuetze's "Introduction to Information Retrieval" book:
# http://nlp.stanford.edu/IR-book/html/htmledition/the-bernoulli-model-1.html
X = np.array([[0, 0, 0], [1, 1, 0], [0, 1, 0], [1, 0, 1], [0, 1, 0]])
Y = np.array([0, 0, 1, 2, 2])
# Fit Bernoulli NB w/ alpha = 1.0
clf = BernoulliNB(alpha=1.0)
clf.fit(X, Y)
# Manually form the (log) numerator and denominator that
# constitute P(feature presence | class)
num = np.log(clf.feature_count_ + 1.0)
denom = np.tile(np.log(clf.class_count_ + 2.0), (X.shape[1], 1)).T
# Check manual estimate matches
assert_array_equal(clf.feature_log_prob_, (num - denom))
def test_bnb():
# Tests that BernoulliNB when alpha=1.0 gives the same values as
# those given for the toy example in Manning, Raghavan, and
# Schuetze's "Introduction to Information Retrieval" book:
# http://nlp.stanford.edu/IR-book/html/htmledition/the-bernoulli-model-1.html
# Training data points are:
# Chinese Beijing Chinese (class: China)
# Chinese Chinese Shanghai (class: China)
# Chinese Macao (class: China)
# Tokyo Japan Chinese (class: Japan)
# Features are Beijing, Chinese, Japan, Macao, Shanghai, and Tokyo
X = np.array([[1, 1, 0, 0, 0, 0],
[0, 1, 0, 0, 1, 0],
[0, 1, 0, 1, 0, 0],
[0, 1, 1, 0, 0, 1]])
# Classes are China (0), Japan (1)
Y = np.array([0, 0, 0, 1])
# Fit BernoulliBN w/ alpha = 1.0
clf = BernoulliNB(alpha=1.0)
clf.fit(X, Y)
# Check the class prior is correct
class_prior = np.array([0.75, 0.25])
assert_array_almost_equal(np.exp(clf.class_log_prior_), class_prior)
# Check the feature probabilities are correct
feature_prob = np.array([[0.4, 0.8, 0.2, 0.4, 0.4, 0.2],
[1/3.0, 2/3.0, 2/3.0, 1/3.0, 1/3.0, 2/3.0]])
assert_array_almost_equal(np.exp(clf.feature_log_prob_), feature_prob)
# Testing data point is:
# Chinese Chinese Chinese Tokyo Japan
X_test = np.array([0, 1, 1, 0, 0, 1])
# Check the predictive probabilities are correct
unnorm_predict_proba = np.array([[0.005183999999999999,
0.02194787379972565]])
predict_proba = unnorm_predict_proba / np.sum(unnorm_predict_proba)
assert_array_almost_equal(clf.predict_proba(X_test), predict_proba)
| bsd-3-clause |
WangWenjun559/Weiss | summary/sumy/sklearn/tests/test_learning_curve.py | 225 | 10791 | # Author: Alexander Fabisch <[email protected]>
#
# License: BSD 3 clause
import sys
from sklearn.externals.six.moves import cStringIO as StringIO
import numpy as np
import warnings
from sklearn.base import BaseEstimator
from sklearn.learning_curve import learning_curve, validation_curve
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_warns
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.datasets import make_classification
from sklearn.cross_validation import KFold
from sklearn.linear_model import PassiveAggressiveClassifier
class MockImprovingEstimator(BaseEstimator):
"""Dummy classifier to test the learning curve"""
def __init__(self, n_max_train_sizes):
self.n_max_train_sizes = n_max_train_sizes
self.train_sizes = 0
self.X_subset = None
def fit(self, X_subset, y_subset=None):
self.X_subset = X_subset
self.train_sizes = X_subset.shape[0]
return self
def predict(self, X):
raise NotImplementedError
def score(self, X=None, Y=None):
# training score becomes worse (2 -> 1), test error better (0 -> 1)
if self._is_training_data(X):
return 2. - float(self.train_sizes) / self.n_max_train_sizes
else:
return float(self.train_sizes) / self.n_max_train_sizes
def _is_training_data(self, X):
return X is self.X_subset
class MockIncrementalImprovingEstimator(MockImprovingEstimator):
"""Dummy classifier that provides partial_fit"""
def __init__(self, n_max_train_sizes):
super(MockIncrementalImprovingEstimator,
self).__init__(n_max_train_sizes)
self.x = None
def _is_training_data(self, X):
return self.x in X
def partial_fit(self, X, y=None, **params):
self.train_sizes += X.shape[0]
self.x = X[0]
class MockEstimatorWithParameter(BaseEstimator):
"""Dummy classifier to test the validation curve"""
def __init__(self, param=0.5):
self.X_subset = None
self.param = param
def fit(self, X_subset, y_subset):
self.X_subset = X_subset
self.train_sizes = X_subset.shape[0]
return self
def predict(self, X):
raise NotImplementedError
def score(self, X=None, y=None):
return self.param if self._is_training_data(X) else 1 - self.param
def _is_training_data(self, X):
return X is self.X_subset
def test_learning_curve():
X, y = make_classification(n_samples=30, n_features=1, n_informative=1,
n_redundant=0, n_classes=2,
n_clusters_per_class=1, random_state=0)
estimator = MockImprovingEstimator(20)
with warnings.catch_warnings(record=True) as w:
train_sizes, train_scores, test_scores = learning_curve(
estimator, X, y, cv=3, train_sizes=np.linspace(0.1, 1.0, 10))
if len(w) > 0:
raise RuntimeError("Unexpected warning: %r" % w[0].message)
assert_equal(train_scores.shape, (10, 3))
assert_equal(test_scores.shape, (10, 3))
assert_array_equal(train_sizes, np.linspace(2, 20, 10))
assert_array_almost_equal(train_scores.mean(axis=1),
np.linspace(1.9, 1.0, 10))
assert_array_almost_equal(test_scores.mean(axis=1),
np.linspace(0.1, 1.0, 10))
def test_learning_curve_unsupervised():
X, _ = make_classification(n_samples=30, n_features=1, n_informative=1,
n_redundant=0, n_classes=2,
n_clusters_per_class=1, random_state=0)
estimator = MockImprovingEstimator(20)
train_sizes, train_scores, test_scores = learning_curve(
estimator, X, y=None, cv=3, train_sizes=np.linspace(0.1, 1.0, 10))
assert_array_equal(train_sizes, np.linspace(2, 20, 10))
assert_array_almost_equal(train_scores.mean(axis=1),
np.linspace(1.9, 1.0, 10))
assert_array_almost_equal(test_scores.mean(axis=1),
np.linspace(0.1, 1.0, 10))
def test_learning_curve_verbose():
X, y = make_classification(n_samples=30, n_features=1, n_informative=1,
n_redundant=0, n_classes=2,
n_clusters_per_class=1, random_state=0)
estimator = MockImprovingEstimator(20)
old_stdout = sys.stdout
sys.stdout = StringIO()
try:
train_sizes, train_scores, test_scores = \
learning_curve(estimator, X, y, cv=3, verbose=1)
finally:
out = sys.stdout.getvalue()
sys.stdout.close()
sys.stdout = old_stdout
assert("[learning_curve]" in out)
def test_learning_curve_incremental_learning_not_possible():
X, y = make_classification(n_samples=2, n_features=1, n_informative=1,
n_redundant=0, n_classes=2,
n_clusters_per_class=1, random_state=0)
# The mockup does not have partial_fit()
estimator = MockImprovingEstimator(1)
assert_raises(ValueError, learning_curve, estimator, X, y,
exploit_incremental_learning=True)
def test_learning_curve_incremental_learning():
X, y = make_classification(n_samples=30, n_features=1, n_informative=1,
n_redundant=0, n_classes=2,
n_clusters_per_class=1, random_state=0)
estimator = MockIncrementalImprovingEstimator(20)
train_sizes, train_scores, test_scores = learning_curve(
estimator, X, y, cv=3, exploit_incremental_learning=True,
train_sizes=np.linspace(0.1, 1.0, 10))
assert_array_equal(train_sizes, np.linspace(2, 20, 10))
assert_array_almost_equal(train_scores.mean(axis=1),
np.linspace(1.9, 1.0, 10))
assert_array_almost_equal(test_scores.mean(axis=1),
np.linspace(0.1, 1.0, 10))
def test_learning_curve_incremental_learning_unsupervised():
X, _ = make_classification(n_samples=30, n_features=1, n_informative=1,
n_redundant=0, n_classes=2,
n_clusters_per_class=1, random_state=0)
estimator = MockIncrementalImprovingEstimator(20)
train_sizes, train_scores, test_scores = learning_curve(
estimator, X, y=None, cv=3, exploit_incremental_learning=True,
train_sizes=np.linspace(0.1, 1.0, 10))
assert_array_equal(train_sizes, np.linspace(2, 20, 10))
assert_array_almost_equal(train_scores.mean(axis=1),
np.linspace(1.9, 1.0, 10))
assert_array_almost_equal(test_scores.mean(axis=1),
np.linspace(0.1, 1.0, 10))
def test_learning_curve_batch_and_incremental_learning_are_equal():
X, y = make_classification(n_samples=30, n_features=1, n_informative=1,
n_redundant=0, n_classes=2,
n_clusters_per_class=1, random_state=0)
train_sizes = np.linspace(0.2, 1.0, 5)
estimator = PassiveAggressiveClassifier(n_iter=1, shuffle=False)
train_sizes_inc, train_scores_inc, test_scores_inc = \
learning_curve(
estimator, X, y, train_sizes=train_sizes,
cv=3, exploit_incremental_learning=True)
train_sizes_batch, train_scores_batch, test_scores_batch = \
learning_curve(
estimator, X, y, cv=3, train_sizes=train_sizes,
exploit_incremental_learning=False)
assert_array_equal(train_sizes_inc, train_sizes_batch)
assert_array_almost_equal(train_scores_inc.mean(axis=1),
train_scores_batch.mean(axis=1))
assert_array_almost_equal(test_scores_inc.mean(axis=1),
test_scores_batch.mean(axis=1))
def test_learning_curve_n_sample_range_out_of_bounds():
X, y = make_classification(n_samples=30, n_features=1, n_informative=1,
n_redundant=0, n_classes=2,
n_clusters_per_class=1, random_state=0)
estimator = MockImprovingEstimator(20)
assert_raises(ValueError, learning_curve, estimator, X, y, cv=3,
train_sizes=[0, 1])
assert_raises(ValueError, learning_curve, estimator, X, y, cv=3,
train_sizes=[0.0, 1.0])
assert_raises(ValueError, learning_curve, estimator, X, y, cv=3,
train_sizes=[0.1, 1.1])
assert_raises(ValueError, learning_curve, estimator, X, y, cv=3,
train_sizes=[0, 20])
assert_raises(ValueError, learning_curve, estimator, X, y, cv=3,
train_sizes=[1, 21])
def test_learning_curve_remove_duplicate_sample_sizes():
X, y = make_classification(n_samples=3, n_features=1, n_informative=1,
n_redundant=0, n_classes=2,
n_clusters_per_class=1, random_state=0)
estimator = MockImprovingEstimator(2)
train_sizes, _, _ = assert_warns(
RuntimeWarning, learning_curve, estimator, X, y, cv=3,
train_sizes=np.linspace(0.33, 1.0, 3))
assert_array_equal(train_sizes, [1, 2])
def test_learning_curve_with_boolean_indices():
X, y = make_classification(n_samples=30, n_features=1, n_informative=1,
n_redundant=0, n_classes=2,
n_clusters_per_class=1, random_state=0)
estimator = MockImprovingEstimator(20)
cv = KFold(n=30, n_folds=3)
train_sizes, train_scores, test_scores = learning_curve(
estimator, X, y, cv=cv, train_sizes=np.linspace(0.1, 1.0, 10))
assert_array_equal(train_sizes, np.linspace(2, 20, 10))
assert_array_almost_equal(train_scores.mean(axis=1),
np.linspace(1.9, 1.0, 10))
assert_array_almost_equal(test_scores.mean(axis=1),
np.linspace(0.1, 1.0, 10))
def test_validation_curve():
X, y = make_classification(n_samples=2, n_features=1, n_informative=1,
n_redundant=0, n_classes=2,
n_clusters_per_class=1, random_state=0)
param_range = np.linspace(0, 1, 10)
with warnings.catch_warnings(record=True) as w:
train_scores, test_scores = validation_curve(
MockEstimatorWithParameter(), X, y, param_name="param",
param_range=param_range, cv=2
)
if len(w) > 0:
raise RuntimeError("Unexpected warning: %r" % w[0].message)
assert_array_almost_equal(train_scores.mean(axis=1), param_range)
assert_array_almost_equal(test_scores.mean(axis=1), 1 - param_range)
| apache-2.0 |
rohanp/scikit-learn | sklearn/feature_extraction/hashing.py | 41 | 6175 | # Author: Lars Buitinck <[email protected]>
# License: BSD 3 clause
import numbers
import numpy as np
import scipy.sparse as sp
from . import _hashing
from ..base import BaseEstimator, TransformerMixin
def _iteritems(d):
"""Like d.iteritems, but accepts any collections.Mapping."""
return d.iteritems() if hasattr(d, "iteritems") else d.items()
class FeatureHasher(BaseEstimator, TransformerMixin):
"""Implements feature hashing, aka the hashing trick.
This class turns sequences of symbolic feature names (strings) into
scipy.sparse matrices, using a hash function to compute the matrix column
corresponding to a name. The hash function employed is the signed 32-bit
version of Murmurhash3.
Feature names of type byte string are used as-is. Unicode strings are
converted to UTF-8 first, but no Unicode normalization is done.
Feature values must be (finite) numbers.
This class is a low-memory alternative to DictVectorizer and
CountVectorizer, intended for large-scale (online) learning and situations
where memory is tight, e.g. when running prediction code on embedded
devices.
Read more in the :ref:`User Guide <feature_hashing>`.
Parameters
----------
n_features : integer, optional
The number of features (columns) in the output matrices. Small numbers
of features are likely to cause hash collisions, but large numbers
will cause larger coefficient dimensions in linear learners.
dtype : numpy type, optional, default np.float64
The type of feature values. Passed to scipy.sparse matrix constructors
as the dtype argument. Do not set this to bool, np.boolean or any
unsigned integer type.
input_type : string, optional, default "dict"
Either "dict" (the default) to accept dictionaries over
(feature_name, value); "pair" to accept pairs of (feature_name, value);
or "string" to accept single strings.
feature_name should be a string, while value should be a number.
In the case of "string", a value of 1 is implied.
The feature_name is hashed to find the appropriate column for the
feature. The value's sign might be flipped in the output (but see
non_negative, below).
non_negative : boolean, optional, default False
Whether output matrices should contain non-negative values only;
effectively calls abs on the matrix prior to returning it.
When True, output values can be interpreted as frequencies.
When False, output values will have expected value zero.
Examples
--------
>>> from sklearn.feature_extraction import FeatureHasher
>>> h = FeatureHasher(n_features=10)
>>> D = [{'dog': 1, 'cat':2, 'elephant':4},{'dog': 2, 'run': 5}]
>>> f = h.transform(D)
>>> f.toarray()
array([[ 0., 0., -4., -1., 0., 0., 0., 0., 0., 2.],
[ 0., 0., 0., -2., -5., 0., 0., 0., 0., 0.]])
See also
--------
DictVectorizer : vectorizes string-valued features using a hash table.
sklearn.preprocessing.OneHotEncoder : handles nominal/categorical features
encoded as columns of integers.
"""
def __init__(self, n_features=(2 ** 20), input_type="dict",
dtype=np.float64, non_negative=False):
self._validate_params(n_features, input_type)
self.dtype = dtype
self.input_type = input_type
self.n_features = n_features
self.non_negative = non_negative
@staticmethod
def _validate_params(n_features, input_type):
# strangely, np.int16 instances are not instances of Integral,
# while np.int64 instances are...
if not isinstance(n_features, (numbers.Integral, np.integer)):
raise TypeError("n_features must be integral, got %r (%s)."
% (n_features, type(n_features)))
elif n_features < 1 or n_features >= 2 ** 31:
raise ValueError("Invalid number of features (%d)." % n_features)
if input_type not in ("dict", "pair", "string"):
raise ValueError("input_type must be 'dict', 'pair' or 'string',"
" got %r." % input_type)
def fit(self, X=None, y=None):
"""No-op.
This method doesn't do anything. It exists purely for compatibility
with the scikit-learn transformer API.
Returns
-------
self : FeatureHasher
"""
# repeat input validation for grid search (which calls set_params)
self._validate_params(self.n_features, self.input_type)
return self
def transform(self, raw_X, y=None):
"""Transform a sequence of instances to a scipy.sparse matrix.
Parameters
----------
raw_X : iterable over iterable over raw features, length = n_samples
Samples. Each sample must be iterable an (e.g., a list or tuple)
containing/generating feature names (and optionally values, see
the input_type constructor argument) which will be hashed.
raw_X need not support the len function, so it can be the result
of a generator; n_samples is determined on the fly.
y : (ignored)
Returns
-------
X : scipy.sparse matrix, shape = (n_samples, self.n_features)
Feature matrix, for use with estimators or further transformers.
"""
raw_X = iter(raw_X)
if self.input_type == "dict":
raw_X = (_iteritems(d) for d in raw_X)
elif self.input_type == "string":
raw_X = (((f, 1) for f in x) for x in raw_X)
indices, indptr, values = \
_hashing.transform(raw_X, self.n_features, self.dtype)
n_samples = indptr.shape[0] - 1
if n_samples == 0:
raise ValueError("Cannot vectorize empty sequence.")
X = sp.csr_matrix((values, indices, indptr), dtype=self.dtype,
shape=(n_samples, self.n_features))
X.sum_duplicates() # also sorts the indices
if self.non_negative:
np.abs(X.data, X.data)
return X
| bsd-3-clause |
vsmolyakov/cv | visual_words/visual_words.py | 1 | 9664 | import numpy as np
import cv2
import matplotlib.pyplot as plt
from sklearn.datasets import fetch_olivetti_faces
from sklearn.cluster import MiniBatchKMeans
from sklearn.decomposition import LatentDirichletAllocation
from sklearn.feature_extraction.text import TfidfVectorizer, CountVectorizer
from sklearn.neighbors import KDTree
from sklearn.metrics.pairwise import cosine_similarity
from sklearn.decomposition import PCA
from time import time
class visual_words:
def __init__(self):
pass
def plot_images(n_rows, n_cols, images):
f = plt.figure()
for i, image in enumerate(images):
plt.subplot(n_rows, n_cols, i+1)
plt.imshow(images[i], cmap = plt.cm.gray)
plt.xticks([])
plt.yticks([])
plt.show()
#f.savefig('./figures/knn_faces.png')
np.random.seed(0)
if __name__ == "__main__":
#Overview:
#Olivetti dataset
#Split into test and training
#extract keypoints and compute sift features on training images
#cluster sift features into a visual dictionary of size V
#represent each image as visual words histogram
#apply tf-idf (need text data)
#fit LDA topic model on bags of visual words
#given test data transform test image into tf_idf vector
#use cosine similarity for image retrieval
#display top-K images
# Load the faces datasets
data = fetch_olivetti_faces(shuffle=True, random_state=0)
targets = data.target
data = data.images.reshape((len(data.images), -1))
data_train = data[targets < 30]
data_test = data[targets >= 30]
num_train_images = data_train.shape[0]
#show mean training image
plt.figure()
plt.imshow(np.mean(data_train,axis=0).reshape(64,64))
plt.title('Olivetti Dataset (Mean Training Image)')
plt.show()
#show random selection of images
rnd_idx = np.arange(num_train_images)
np.random.shuffle(rnd_idx)
images = data_train[rnd_idx[0:16],:].reshape(16,64,64)
plot_images(4,4,images)
#compute dense SIFT
num_kps = np.zeros(num_train_images)
sift = cv2.SIFT()
#orb = cv2.ORB()
for img_idx in range(num_train_images):
gray_img = 255*data_train[img_idx,:]/np.max(data_train[img_idx,:]) #scale
gray_img = gray_img.reshape(64,64).astype(np.uint8) #reshape and cast
dense = cv2.FeatureDetector_create("Dense")
kp = dense.detect(gray_img)
kp, des = sift.compute(gray_img, kp)
#kp, des = orb.compute(gray_img, kp)
#img_kp = cv2.drawKeypoints(gray_img, kp, color=(0,255,0), flags=0)
#cv2.imshow('ORB keypoints', img_kp)
num_kps[img_idx] = len(kp)
#stack descriptors for all training images
if (img_idx == 0):
des_tot = des
else:
des_tot = np.vstack((des_tot, des))
#end for
#cluster images into a dictionary
dictionary_size = 100
kmeans = MiniBatchKMeans(n_clusters = dictionary_size, init = 'k-means++', batch_size = 5000, random_state = 0, verbose=0)
tic = time()
kmeans.fit(des_tot)
toc = time()
kmeans.get_params()
print "K-means objective: %.2f" %kmeans.inertia_
print "elapsed time: %.4f sec" %(toc - tic)
kmeans.cluster_centers_
labels = kmeans.labels_
#PCA plot of kmeans_cluster centers
pca = PCA(n_components=2)
visual_words = pca.fit_transform(kmeans.cluster_centers_)
plt.figure()
plt.scatter(visual_words[:,0], visual_words[:,1], color='b', marker='o', lw = 2.0, label='Olivetti visual words')
plt.title("Visual Words (PCA of cluster centers)")
plt.xlabel("PC1")
plt.ylabel("PC2")
plt.grid(True)
plt.legend()
plt.show()
#histogram of labels for each image = term-document matrix
A = np.zeros((dictionary_size,num_train_images))
ii = 0
jj = 0
for img_idx in range(num_train_images):
if img_idx == 0:
A[:,img_idx], bins = np.histogram(labels[0:num_kps[img_idx]], bins=range(dictionary_size+1))
else:
ii = np.int(ii + num_kps[img_idx-1])
jj = np.int(ii + num_kps[img_idx])
A[:,img_idx], bins = np.histogram(labels[ii:jj] , bins=range(dictionary_size+1))
#print str(ii) + ':' + str(jj)
#end for
plt.figure()
plt.spy(A.T, cmap = 'gray')
plt.gca().set_aspect('auto')
plt.title('AP tf-idf corpus')
plt.xlabel('dictionary')
plt.ylabel('documents')
plt.show()
#fit LDA topic model based on tf-idf of term-document matrix
num_features = dictionary_size
num_topics = 8 #fixed for LDA
#fit LDA model
print "Fitting LDA model..."
lda_vb = LatentDirichletAllocation(n_topics = num_topics, max_iter=10, learning_method='online', batch_size = 512, random_state=0, n_jobs=1)
tic = time()
lda_vb.fit(A.T) #online VB
toc = time()
print "elapsed time: %.4f sec" %(toc - tic)
print "LDA params"
print lda_vb.get_params()
print "number of EM iter: %d" % lda_vb.n_batch_iter_
print "number of dataset sweeps: %d" % lda_vb.n_iter_
#topic matrix W: K x V
#components[i,j]: topic i, word j
#note: here topics correspond to label clusters
topics = lda_vb.components_
f = plt.figure()
plt.matshow(topics, cmap = 'gray')
plt.gca().set_aspect('auto')
plt.title('learned topic matrix')
plt.ylabel('topics')
plt.xlabel('dictionary')
plt.show()
f.savefig('./figures/topic.png')
#topic proportions matrix: D x K
#note: np.sum(H, axis=1) is not 1
H = lda_vb.transform(A.T)
f = plt.figure()
plt.matshow(H, cmap = 'gray')
plt.gca().set_aspect('auto')
plt.show()
plt.title('topic proportions')
plt.xlabel('topics')
plt.ylabel('documents')
f.savefig('./figures/proportions.png')
#given test data transform test image into tf_idf vector
#show mean test image
plt.figure()
plt.imshow(np.mean(data_test,axis=0).reshape(64,64))
plt.show()
num_test_images = data_test.shape[0]
num_test_kps = np.zeros(num_test_images)
#compute dense SIFT
sift = cv2.SIFT()
#orb = cv2.ORB()
for img_idx in range(num_test_images):
gray_img = 255*data_test[img_idx,:]/np.max(data_test[img_idx,:]) #scale
gray_img = gray_img.reshape(64,64).astype(np.uint8) #reshape and cast
dense = cv2.FeatureDetector_create("Dense")
kp = dense.detect(gray_img)
kp, des = sift.compute(gray_img, kp)
#kp, des = orb.compute(gray_img, kp)
#img_kp = cv2.drawKeypoints(gray_img, kp, color=(0,255,0), flags=0)
#cv2.imshow('ORB keypoints', img_kp)
num_test_kps[img_idx] = len(kp)
#stack descriptors for all test images
if (img_idx == 0):
des_test_tot = des
else:
des_test_tot = np.vstack((des_test_tot, des))
#end for
#assign des_test_tot to one of kmeans cluster centers
#use 128-dimensional kd-tree to search for nearest neighbors
kdt = KDTree(kmeans.cluster_centers_)
Q = des_test_tot #query
kdt_dist, kdt_idx = kdt.query(Q,k=1) #knn
test_labels = kdt_idx #knn = 1 labels
#form A_test matrix from test_labels
#histogram of labels for each image: term-document matrix
A_test = np.zeros((dictionary_size,num_test_images))
ii = 0
jj = 0
for img_idx in range(num_test_images):
if img_idx == 0:
A_test[:,img_idx], bins = np.histogram(test_labels[0:num_kps[img_idx]], bins=range(dictionary_size+1))
else:
ii = np.int(ii + num_kps[img_idx-1])
jj = np.int(ii + num_kps[img_idx])
A_test[:,img_idx], bins = np.histogram(test_labels[ii:jj] , bins=range(dictionary_size+1))
#print str(ii) + ':' + str(jj)
#end for
plt.figure()
plt.spy(A_test.T, cmap = 'gray')
plt.gca().set_aspect('auto')
plt.title('AP tf-idf corpus')
plt.xlabel('dictionary')
plt.ylabel('documents')
plt.show()
#Use fit transform on A_test for already trained LDA to get the H_test matrix
#topic proportions matrix: D x K
#note: np.sum(H, axis=1) is not 1
H_test = lda_vb.transform(A_test.T)
f = plt.figure()
plt.matshow(H_test, cmap = 'gray')
plt.gca().set_aspect('auto')
plt.show()
plt.title('topic proportions')
plt.xlabel('topics')
plt.ylabel('documents')
f.savefig('./figures/proportions_test.png')
#retrieve H_train document that's closest in cosine similarity for each H_test
#use cosine similarity for image retrieval
Kxy = cosine_similarity(H_test, H)
knn_test = np.argmin(Kxy, axis=1)
f = plt.figure()
plt.matshow(Kxy, cmap = 'gray')
plt.gca().set_aspect('auto')
plt.show()
plt.title('Cosine Similarity')
plt.xlabel('train data')
plt.ylabel('test data')
f.savefig('./figures/cosine_similarity.png')
#display knn images (docId is an image)
rnd_idx = np.arange(num_test_images)
np.random.shuffle(rnd_idx)
images = data_test[rnd_idx[0:16],:].reshape(16,64,64)
images_knn = data_train[knn_test[rnd_idx[0:16]],:].reshape(16,64,64)
plot_images(4,4,images)
plot_images(4,4,images_knn)
| mit |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.